HADOOP-15079. ITestS3AFileOperationCost#testFakeDirectoryDeletion failing
after OutputCommitter patch. Contributed by Steve Loughran
This commit is contained in:
parent
2e1e9017aa
commit
a0c71dcc33
|
@ -108,6 +108,7 @@ public class Listing {
|
||||||
* @return the iterator
|
* @return the iterator
|
||||||
* @throws IOException IO Problems
|
* @throws IOException IO Problems
|
||||||
*/
|
*/
|
||||||
|
@Retries.RetryRaw
|
||||||
FileStatusListingIterator createFileStatusListingIterator(
|
FileStatusListingIterator createFileStatusListingIterator(
|
||||||
Path listPath,
|
Path listPath,
|
||||||
S3ListRequest request,
|
S3ListRequest request,
|
||||||
|
@ -330,6 +331,7 @@ public class Listing {
|
||||||
* items that are not listed from source.
|
* items that are not listed from source.
|
||||||
* @throws IOException IO Problems
|
* @throws IOException IO Problems
|
||||||
*/
|
*/
|
||||||
|
@Retries.RetryTranslated
|
||||||
FileStatusListingIterator(ObjectListingIterator source,
|
FileStatusListingIterator(ObjectListingIterator source,
|
||||||
PathFilter filter,
|
PathFilter filter,
|
||||||
FileStatusAcceptor acceptor,
|
FileStatusAcceptor acceptor,
|
||||||
|
@ -361,10 +363,12 @@ public class Listing {
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
|
@Retries.RetryTranslated
|
||||||
public boolean hasNext() throws IOException {
|
public boolean hasNext() throws IOException {
|
||||||
return sourceHasNext() || providedStatusIterator.hasNext();
|
return sourceHasNext() || providedStatusIterator.hasNext();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Retries.RetryTranslated
|
||||||
private boolean sourceHasNext() throws IOException {
|
private boolean sourceHasNext() throws IOException {
|
||||||
if (statusBatchIterator.hasNext() || requestNextBatch()) {
|
if (statusBatchIterator.hasNext() || requestNextBatch()) {
|
||||||
return true;
|
return true;
|
||||||
|
@ -379,6 +383,7 @@ public class Listing {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@Retries.RetryTranslated
|
||||||
public FileStatus next() throws IOException {
|
public FileStatus next() throws IOException {
|
||||||
final FileStatus status;
|
final FileStatus status;
|
||||||
if (sourceHasNext()) {
|
if (sourceHasNext()) {
|
||||||
|
@ -408,6 +413,7 @@ public class Listing {
|
||||||
* @return true if a new batch was created.
|
* @return true if a new batch was created.
|
||||||
* @throws IOException IO problems
|
* @throws IOException IO problems
|
||||||
*/
|
*/
|
||||||
|
@Retries.RetryTranslated
|
||||||
private boolean requestNextBatch() throws IOException {
|
private boolean requestNextBatch() throws IOException {
|
||||||
// look for more object listing batches being available
|
// look for more object listing batches being available
|
||||||
while (source.hasNext()) {
|
while (source.hasNext()) {
|
||||||
|
@ -543,6 +549,7 @@ public class Listing {
|
||||||
* @param request initial request to make
|
* @param request initial request to make
|
||||||
* @throws IOException if listObjects raises one.
|
* @throws IOException if listObjects raises one.
|
||||||
*/
|
*/
|
||||||
|
@Retries.RetryRaw
|
||||||
ObjectListingIterator(
|
ObjectListingIterator(
|
||||||
Path listPath,
|
Path listPath,
|
||||||
S3ListRequest request) throws IOException {
|
S3ListRequest request) throws IOException {
|
||||||
|
|
|
@ -89,4 +89,25 @@ public class Retries {
|
||||||
String value() default "";
|
String value() default "";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Retried, Exceptions are swallowed.
|
||||||
|
*/
|
||||||
|
@Documented
|
||||||
|
@Retention(RetentionPolicy.SOURCE)
|
||||||
|
public @interface RetryExceptionsSwallowed {
|
||||||
|
|
||||||
|
String value() default "";
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* One attempt, Exceptions are swallowed.
|
||||||
|
*/
|
||||||
|
@Documented
|
||||||
|
@Retention(RetentionPolicy.SOURCE)
|
||||||
|
public @interface OnceExceptionsSwallowed {
|
||||||
|
|
||||||
|
String value() default "";
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -74,7 +74,6 @@ import com.amazonaws.services.s3.transfer.TransferManagerConfiguration;
|
||||||
import com.amazonaws.services.s3.transfer.Upload;
|
import com.amazonaws.services.s3.transfer.Upload;
|
||||||
import com.amazonaws.services.s3.transfer.model.UploadResult;
|
import com.amazonaws.services.s3.transfer.model.UploadResult;
|
||||||
import com.amazonaws.event.ProgressListener;
|
import com.amazonaws.event.ProgressListener;
|
||||||
import com.amazonaws.event.ProgressEvent;
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import com.google.common.util.concurrent.ListeningExecutorService;
|
import com.google.common.util.concurrent.ListeningExecutorService;
|
||||||
|
@ -865,7 +864,10 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
|
||||||
* The inner rename operation. See {@link #rename(Path, Path)} for
|
* The inner rename operation. See {@link #rename(Path, Path)} for
|
||||||
* the description of the operation.
|
* the description of the operation.
|
||||||
* This operation throws an exception on any failure which needs to be
|
* This operation throws an exception on any failure which needs to be
|
||||||
* reported and downgraded to a failure. That is: if a rename
|
* reported and downgraded to a failure.
|
||||||
|
* Retries: retry translated, assuming all operations it is called do
|
||||||
|
* so. For safely, consider catch and handle AmazonClientException
|
||||||
|
* because this is such a complex method there's a risk it could surface.
|
||||||
* @param source path to be renamed
|
* @param source path to be renamed
|
||||||
* @param dest new path after rename
|
* @param dest new path after rename
|
||||||
* @throws RenameFailedException if some criteria for a state changing
|
* @throws RenameFailedException if some criteria for a state changing
|
||||||
|
@ -876,6 +878,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
|
||||||
* @throws IOException on IO failure.
|
* @throws IOException on IO failure.
|
||||||
* @throws AmazonClientException on failures inside the AWS SDK
|
* @throws AmazonClientException on failures inside the AWS SDK
|
||||||
*/
|
*/
|
||||||
|
@Retries.RetryMixed
|
||||||
private boolean innerRename(Path source, Path dest)
|
private boolean innerRename(Path source, Path dest)
|
||||||
throws RenameFailedException, FileNotFoundException, IOException,
|
throws RenameFailedException, FileNotFoundException, IOException,
|
||||||
AmazonClientException {
|
AmazonClientException {
|
||||||
|
@ -969,10 +972,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
|
||||||
LOG.debug("rename: renaming file {} to {}", src, dst);
|
LOG.debug("rename: renaming file {} to {}", src, dst);
|
||||||
long length = srcStatus.getLen();
|
long length = srcStatus.getLen();
|
||||||
if (dstStatus != null && dstStatus.isDirectory()) {
|
if (dstStatus != null && dstStatus.isDirectory()) {
|
||||||
String newDstKey = dstKey;
|
String newDstKey = maybeAddTrailingSlash(dstKey);
|
||||||
if (!newDstKey.endsWith("/")) {
|
|
||||||
newDstKey = newDstKey + "/";
|
|
||||||
}
|
|
||||||
String filename =
|
String filename =
|
||||||
srcKey.substring(pathToKey(src.getParent()).length()+1);
|
srcKey.substring(pathToKey(src.getParent()).length()+1);
|
||||||
newDstKey = newDstKey + filename;
|
newDstKey = newDstKey + filename;
|
||||||
|
@ -990,13 +990,8 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
|
||||||
LOG.debug("rename: renaming directory {} to {}", src, dst);
|
LOG.debug("rename: renaming directory {} to {}", src, dst);
|
||||||
|
|
||||||
// This is a directory to directory copy
|
// This is a directory to directory copy
|
||||||
if (!dstKey.endsWith("/")) {
|
dstKey = maybeAddTrailingSlash(dstKey);
|
||||||
dstKey = dstKey + "/";
|
srcKey = maybeAddTrailingSlash(srcKey);
|
||||||
}
|
|
||||||
|
|
||||||
if (!srcKey.endsWith("/")) {
|
|
||||||
srcKey = srcKey + "/";
|
|
||||||
}
|
|
||||||
|
|
||||||
//Verify dest is not a child of the source directory
|
//Verify dest is not a child of the source directory
|
||||||
if (dstKey.startsWith(srcKey)) {
|
if (dstKey.startsWith(srcKey)) {
|
||||||
|
@ -1065,7 +1060,8 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
|
||||||
|
|
||||||
metadataStore.move(srcPaths, dstMetas);
|
metadataStore.move(srcPaths, dstMetas);
|
||||||
|
|
||||||
if (src.getParent() != dst.getParent()) {
|
if (!src.getParent().equals(dst.getParent())) {
|
||||||
|
LOG.debug("source & dest parents are different; fix up dir markers");
|
||||||
deleteUnnecessaryFakeDirectories(dst.getParent());
|
deleteUnnecessaryFakeDirectories(dst.getParent());
|
||||||
maybeCreateFakeParentDirectory(src);
|
maybeCreateFakeParentDirectory(src);
|
||||||
}
|
}
|
||||||
|
@ -1321,6 +1317,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
|
||||||
* <i>does not</i> update the metastore.
|
* <i>does not</i> update the metastore.
|
||||||
* Increments the {@code OBJECT_DELETE_REQUESTS} and write
|
* Increments the {@code OBJECT_DELETE_REQUESTS} and write
|
||||||
* operation statistics.
|
* operation statistics.
|
||||||
|
* This call does <i>not</i> create any mock parent entries.
|
||||||
*
|
*
|
||||||
* Retry policy: retry untranslated; delete considered idempotent.
|
* Retry policy: retry untranslated; delete considered idempotent.
|
||||||
* @param key key to blob to delete.
|
* @param key key to blob to delete.
|
||||||
|
@ -1515,7 +1512,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
|
||||||
* @return the upload initiated
|
* @return the upload initiated
|
||||||
* @throws AmazonClientException on problems
|
* @throws AmazonClientException on problems
|
||||||
*/
|
*/
|
||||||
@Retries.OnceRaw
|
@Retries.OnceRaw("For PUT; post-PUT actions are RetriesExceptionsSwallowed")
|
||||||
PutObjectResult putObjectDirect(PutObjectRequest putObjectRequest)
|
PutObjectResult putObjectDirect(PutObjectRequest putObjectRequest)
|
||||||
throws AmazonClientException {
|
throws AmazonClientException {
|
||||||
long len = getPutRequestLength(putObjectRequest);
|
long len = getPutRequestLength(putObjectRequest);
|
||||||
|
@ -1685,7 +1682,11 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
|
||||||
public boolean delete(Path f, boolean recursive) throws IOException {
|
public boolean delete(Path f, boolean recursive) throws IOException {
|
||||||
try {
|
try {
|
||||||
entryPoint(INVOCATION_DELETE);
|
entryPoint(INVOCATION_DELETE);
|
||||||
return innerDelete(innerGetFileStatus(f, true), recursive);
|
boolean outcome = innerDelete(innerGetFileStatus(f, true), recursive);
|
||||||
|
if (outcome) {
|
||||||
|
maybeCreateFakeParentDirectory(f);
|
||||||
|
}
|
||||||
|
return outcome;
|
||||||
} catch (FileNotFoundException e) {
|
} catch (FileNotFoundException e) {
|
||||||
LOG.debug("Couldn't delete {} - does not exist", f);
|
LOG.debug("Couldn't delete {} - does not exist", f);
|
||||||
instrumentation.errorIgnored();
|
instrumentation.errorIgnored();
|
||||||
|
@ -1697,7 +1698,8 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Delete an object. See {@link #delete(Path, boolean)}.
|
* Delete an object. See {@link #delete(Path, boolean)}.
|
||||||
*
|
* This call does not create any fake parent directory; that is
|
||||||
|
* left to the caller.
|
||||||
* @param status fileStatus object
|
* @param status fileStatus object
|
||||||
* @param recursive if path is a directory and set to
|
* @param recursive if path is a directory and set to
|
||||||
* true, the directory is deleted else throws an exception. In
|
* true, the directory is deleted else throws an exception. In
|
||||||
|
@ -1771,7 +1773,6 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
|
||||||
deleteObjectAtPath(f, key, true);
|
deleteObjectAtPath(f, key, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
maybeCreateFakeParentDirectory(f);
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2049,11 +2050,9 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
|
||||||
fPart = fPart.getParent();
|
fPart = fPart.getParent();
|
||||||
}
|
}
|
||||||
String key = pathToKey(f);
|
String key = pathToKey(f);
|
||||||
|
// this will create the marker file, delete the parent entries
|
||||||
|
// and update S3Guard
|
||||||
createFakeDirectory(key);
|
createFakeDirectory(key);
|
||||||
S3Guard.makeDirsOrdered(metadataStore, metadataStoreDirs, username, true);
|
|
||||||
// this is complicated because getParent(a/b/c/) returns a/b/c, but
|
|
||||||
// we want a/b. See HADOOP-14428 for more details.
|
|
||||||
deleteUnnecessaryFakeDirectories(new Path(f.toString()).getParent());
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2389,7 +2388,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
|
||||||
* @return the upload result
|
* @return the upload result
|
||||||
* @throws InterruptedIOException if the blocking was interrupted.
|
* @throws InterruptedIOException if the blocking was interrupted.
|
||||||
*/
|
*/
|
||||||
@Retries.OnceRaw
|
@Retries.OnceRaw("For PUT; post-PUT actions are RetriesExceptionsSwallowed")
|
||||||
UploadResult executePut(PutObjectRequest putObjectRequest,
|
UploadResult executePut(PutObjectRequest putObjectRequest,
|
||||||
Progressable progress)
|
Progressable progress)
|
||||||
throws InterruptedIOException {
|
throws InterruptedIOException {
|
||||||
|
@ -2483,6 +2482,8 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Copy a single object in the bucket via a COPY operation.
|
* Copy a single object in the bucket via a COPY operation.
|
||||||
|
* There's no update of metadata, directory markers, etc.
|
||||||
|
* Callers must implement.
|
||||||
* @param srcKey source object path
|
* @param srcKey source object path
|
||||||
* @param dstKey destination object path
|
* @param dstKey destination object path
|
||||||
* @param size object size
|
* @param size object size
|
||||||
|
@ -2490,46 +2491,42 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
|
||||||
* @throws InterruptedIOException the operation was interrupted
|
* @throws InterruptedIOException the operation was interrupted
|
||||||
* @throws IOException Other IO problems
|
* @throws IOException Other IO problems
|
||||||
*/
|
*/
|
||||||
|
@Retries.RetryMixed
|
||||||
private void copyFile(String srcKey, String dstKey, long size)
|
private void copyFile(String srcKey, String dstKey, long size)
|
||||||
throws IOException, InterruptedIOException, AmazonClientException {
|
throws IOException, InterruptedIOException {
|
||||||
LOG.debug("copyFile {} -> {} ", srcKey, dstKey);
|
LOG.debug("copyFile {} -> {} ", srcKey, dstKey);
|
||||||
|
|
||||||
try {
|
ProgressListener progressListener = progressEvent -> {
|
||||||
ObjectMetadata srcom = getObjectMetadata(srcKey);
|
switch (progressEvent.getEventType()) {
|
||||||
ObjectMetadata dstom = cloneObjectMetadata(srcom);
|
case TRANSFER_PART_COMPLETED_EVENT:
|
||||||
setOptionalObjectMetadata(dstom);
|
|
||||||
CopyObjectRequest copyObjectRequest =
|
|
||||||
new CopyObjectRequest(bucket, srcKey, bucket, dstKey);
|
|
||||||
setOptionalCopyObjectRequestParameters(copyObjectRequest);
|
|
||||||
copyObjectRequest.setCannedAccessControlList(cannedACL);
|
|
||||||
copyObjectRequest.setNewObjectMetadata(dstom);
|
|
||||||
|
|
||||||
ProgressListener progressListener = new ProgressListener() {
|
|
||||||
public void progressChanged(ProgressEvent progressEvent) {
|
|
||||||
switch (progressEvent.getEventType()) {
|
|
||||||
case TRANSFER_PART_COMPLETED_EVENT:
|
|
||||||
incrementWriteOperations();
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
Copy copy = transfers.copy(copyObjectRequest);
|
|
||||||
copy.addProgressListener(progressListener);
|
|
||||||
try {
|
|
||||||
copy.waitForCopyResult();
|
|
||||||
incrementWriteOperations();
|
incrementWriteOperations();
|
||||||
instrumentation.filesCopied(1, size);
|
break;
|
||||||
} catch (InterruptedException e) {
|
default:
|
||||||
throw new InterruptedIOException("Interrupted copying " + srcKey
|
break;
|
||||||
+ " to " + dstKey + ", cancelling");
|
|
||||||
}
|
}
|
||||||
} catch (AmazonClientException e) {
|
};
|
||||||
throw translateException("copyFile("+ srcKey+ ", " + dstKey + ")",
|
|
||||||
srcKey, e);
|
once("copyFile(" + srcKey + ", " + dstKey + ")", srcKey,
|
||||||
}
|
() -> {
|
||||||
|
ObjectMetadata srcom = getObjectMetadata(srcKey);
|
||||||
|
ObjectMetadata dstom = cloneObjectMetadata(srcom);
|
||||||
|
setOptionalObjectMetadata(dstom);
|
||||||
|
CopyObjectRequest copyObjectRequest =
|
||||||
|
new CopyObjectRequest(bucket, srcKey, bucket, dstKey);
|
||||||
|
setOptionalCopyObjectRequestParameters(copyObjectRequest);
|
||||||
|
copyObjectRequest.setCannedAccessControlList(cannedACL);
|
||||||
|
copyObjectRequest.setNewObjectMetadata(dstom);
|
||||||
|
Copy copy = transfers.copy(copyObjectRequest);
|
||||||
|
copy.addProgressListener(progressListener);
|
||||||
|
try {
|
||||||
|
copy.waitForCopyResult();
|
||||||
|
incrementWriteOperations();
|
||||||
|
instrumentation.filesCopied(1, size);
|
||||||
|
} catch (InterruptedException e) {
|
||||||
|
throw new InterruptedIOException("Interrupted copying " + srcKey
|
||||||
|
+ " to " + dstKey + ", cancelling");
|
||||||
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void setOptionalMultipartUploadRequestParameters(
|
protected void setOptionalMultipartUploadRequestParameters(
|
||||||
|
@ -2626,9 +2623,12 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Perform post-write actions.
|
* Perform post-write actions.
|
||||||
|
* Calls {@link #deleteUnnecessaryFakeDirectories(Path)} and then
|
||||||
|
* {@link S3Guard#addAncestors(MetadataStore, Path, String)}}.
|
||||||
* This operation MUST be called after any PUT/multipart PUT completes
|
* This operation MUST be called after any PUT/multipart PUT completes
|
||||||
* successfully.
|
* successfully.
|
||||||
* This includes
|
*
|
||||||
|
* The operations actions include
|
||||||
* <ol>
|
* <ol>
|
||||||
* <li>Calling {@link #deleteUnnecessaryFakeDirectories(Path)}</li>
|
* <li>Calling {@link #deleteUnnecessaryFakeDirectories(Path)}</li>
|
||||||
* <li>Updating any metadata store with details on the newly created
|
* <li>Updating any metadata store with details on the newly created
|
||||||
|
@ -2638,12 +2638,12 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
|
||||||
* @param length total length of file written
|
* @param length total length of file written
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@Retries.RetryTranslated("Exceptions are swallowed")
|
@Retries.RetryExceptionsSwallowed
|
||||||
void finishedWrite(String key, long length) {
|
void finishedWrite(String key, long length) {
|
||||||
LOG.debug("Finished write to {}, len {}", key, length);
|
LOG.debug("Finished write to {}, len {}", key, length);
|
||||||
Path p = keyToQualifiedPath(key);
|
Path p = keyToQualifiedPath(key);
|
||||||
deleteUnnecessaryFakeDirectories(p.getParent());
|
|
||||||
Preconditions.checkArgument(length >= 0, "content length is negative");
|
Preconditions.checkArgument(length >= 0, "content length is negative");
|
||||||
|
deleteUnnecessaryFakeDirectories(p.getParent());
|
||||||
|
|
||||||
// See note about failure semantics in S3Guard documentation
|
// See note about failure semantics in S3Guard documentation
|
||||||
try {
|
try {
|
||||||
|
@ -2666,7 +2666,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
|
||||||
* Retry policy: retrying; exceptions swallowed.
|
* Retry policy: retrying; exceptions swallowed.
|
||||||
* @param path path
|
* @param path path
|
||||||
*/
|
*/
|
||||||
@Retries.RetryRaw("Exceptions are swallowed")
|
@Retries.RetryExceptionsSwallowed
|
||||||
private void deleteUnnecessaryFakeDirectories(Path path) {
|
private void deleteUnnecessaryFakeDirectories(Path path) {
|
||||||
List<DeleteObjectsRequest.KeyVersion> keysToRemove = new ArrayList<>();
|
List<DeleteObjectsRequest.KeyVersion> keysToRemove = new ArrayList<>();
|
||||||
while (!path.isRoot()) {
|
while (!path.isRoot()) {
|
||||||
|
@ -2960,7 +2960,8 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
|
||||||
* @throws IOException IO failure
|
* @throws IOException IO failure
|
||||||
* @see <a href="http://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html">Common Response Headers</a>
|
* @see <a href="http://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html">Common Response Headers</a>
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
|
@Retries.RetryTranslated
|
||||||
public EtagChecksum getFileChecksum(Path f, final long length)
|
public EtagChecksum getFileChecksum(Path f, final long length)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
Preconditions.checkArgument(length >= 0);
|
Preconditions.checkArgument(length >= 0);
|
||||||
|
@ -3002,18 +3003,21 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
|
||||||
* @throws IOException if any I/O error occurred
|
* @throws IOException if any I/O error occurred
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
|
@Retries.OnceTranslated
|
||||||
public RemoteIterator<LocatedFileStatus> listFiles(Path f,
|
public RemoteIterator<LocatedFileStatus> listFiles(Path f,
|
||||||
boolean recursive) throws FileNotFoundException, IOException {
|
boolean recursive) throws FileNotFoundException, IOException {
|
||||||
return innerListFiles(f, recursive,
|
return innerListFiles(f, recursive,
|
||||||
new Listing.AcceptFilesOnly(qualify(f)));
|
new Listing.AcceptFilesOnly(qualify(f)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Retries.OnceTranslated
|
||||||
public RemoteIterator<LocatedFileStatus> listFilesAndEmptyDirectories(Path f,
|
public RemoteIterator<LocatedFileStatus> listFilesAndEmptyDirectories(Path f,
|
||||||
boolean recursive) throws IOException {
|
boolean recursive) throws IOException {
|
||||||
return innerListFiles(f, recursive,
|
return innerListFiles(f, recursive,
|
||||||
new Listing.AcceptAllButS3nDirs());
|
new Listing.AcceptAllButS3nDirs());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Retries.OnceTranslated
|
||||||
private RemoteIterator<LocatedFileStatus> innerListFiles(Path f, boolean
|
private RemoteIterator<LocatedFileStatus> innerListFiles(Path f, boolean
|
||||||
recursive, Listing.FileStatusAcceptor acceptor) throws IOException {
|
recursive, Listing.FileStatusAcceptor acceptor) throws IOException {
|
||||||
entryPoint(INVOCATION_LIST_FILES);
|
entryPoint(INVOCATION_LIST_FILES);
|
||||||
|
@ -3097,42 +3101,43 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
|
||||||
* @throws IOException if any I/O error occurred
|
* @throws IOException if any I/O error occurred
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
@Retries.OnceTranslated
|
@Retries.OnceTranslated("s3guard not retrying")
|
||||||
public RemoteIterator<LocatedFileStatus> listLocatedStatus(final Path f,
|
public RemoteIterator<LocatedFileStatus> listLocatedStatus(final Path f,
|
||||||
final PathFilter filter)
|
final PathFilter filter)
|
||||||
throws FileNotFoundException, IOException {
|
throws FileNotFoundException, IOException {
|
||||||
entryPoint(INVOCATION_LIST_LOCATED_STATUS);
|
entryPoint(INVOCATION_LIST_LOCATED_STATUS);
|
||||||
Path path = qualify(f);
|
Path path = qualify(f);
|
||||||
LOG.debug("listLocatedStatus({}, {}", path, filter);
|
LOG.debug("listLocatedStatus({}, {}", path, filter);
|
||||||
try {
|
return once("listLocatedStatus", path.toString(),
|
||||||
// lookup dir triggers existence check
|
() -> {
|
||||||
final FileStatus fileStatus = getFileStatus(path);
|
// lookup dir triggers existence check
|
||||||
if (fileStatus.isFile()) {
|
final FileStatus fileStatus = getFileStatus(path);
|
||||||
// simple case: File
|
if (fileStatus.isFile()) {
|
||||||
LOG.debug("Path is a file");
|
// simple case: File
|
||||||
return new Listing.SingleStatusRemoteIterator(
|
LOG.debug("Path is a file");
|
||||||
filter.accept(path) ? toLocatedFileStatus(fileStatus) : null);
|
return new Listing.SingleStatusRemoteIterator(
|
||||||
} else {
|
filter.accept(path) ? toLocatedFileStatus(fileStatus) : null);
|
||||||
// directory: trigger a lookup
|
} else {
|
||||||
final String key = maybeAddTrailingSlash(pathToKey(path));
|
// directory: trigger a lookup
|
||||||
final Listing.FileStatusAcceptor acceptor =
|
final String key = maybeAddTrailingSlash(pathToKey(path));
|
||||||
new Listing.AcceptAllButSelfAndS3nDirs(path);
|
final Listing.FileStatusAcceptor acceptor =
|
||||||
DirListingMetadata meta = metadataStore.listChildren(path);
|
new Listing.AcceptAllButSelfAndS3nDirs(path);
|
||||||
final RemoteIterator<FileStatus> cachedFileStatusIterator =
|
DirListingMetadata meta = metadataStore.listChildren(path);
|
||||||
listing.createProvidedFileStatusIterator(
|
final RemoteIterator<FileStatus> cachedFileStatusIterator =
|
||||||
S3Guard.dirMetaToStatuses(meta), filter, acceptor);
|
listing.createProvidedFileStatusIterator(
|
||||||
return (allowAuthoritative && meta != null && meta.isAuthoritative())
|
S3Guard.dirMetaToStatuses(meta), filter, acceptor);
|
||||||
? listing.createLocatedFileStatusIterator(cachedFileStatusIterator)
|
return (allowAuthoritative && meta != null
|
||||||
: listing.createLocatedFileStatusIterator(
|
&& meta.isAuthoritative())
|
||||||
listing.createFileStatusListingIterator(path,
|
? listing.createLocatedFileStatusIterator(
|
||||||
createListObjectsRequest(key, "/"),
|
cachedFileStatusIterator)
|
||||||
filter,
|
: listing.createLocatedFileStatusIterator(
|
||||||
acceptor,
|
listing.createFileStatusListingIterator(path,
|
||||||
cachedFileStatusIterator));
|
createListObjectsRequest(key, "/"),
|
||||||
}
|
filter,
|
||||||
} catch (AmazonClientException e) {
|
acceptor,
|
||||||
throw translateException("listLocatedStatus", path, e);
|
cachedFileStatusIterator));
|
||||||
}
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -3159,6 +3164,8 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
|
||||||
* @return Iterator over multipart uploads.
|
* @return Iterator over multipart uploads.
|
||||||
* @throws IOException on failure
|
* @throws IOException on failure
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
@Retries.RetryTranslated
|
||||||
public MultipartUtils.UploadIterator listUploads(@Nullable String prefix)
|
public MultipartUtils.UploadIterator listUploads(@Nullable String prefix)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
return MultipartUtils.listMultipartUploads(s3, invoker, bucket, maxKeys,
|
return MultipartUtils.listMultipartUploads(s3, invoker, bucket, maxKeys,
|
||||||
|
|
|
@ -257,7 +257,15 @@ public final class S3Guard {
|
||||||
* Update MetadataStore to reflect creation of the given directories.
|
* Update MetadataStore to reflect creation of the given directories.
|
||||||
*
|
*
|
||||||
* If an IOException is raised while trying to update the entry, this
|
* If an IOException is raised while trying to update the entry, this
|
||||||
* operation catches the exception and returns.
|
* operation catches the exception, swallows it and returns.
|
||||||
|
*
|
||||||
|
* @deprecated this is no longer called by {@code S3AFilesystem.innerMkDirs}.
|
||||||
|
* See: HADOOP-15079 (January 2018).
|
||||||
|
* It is currently retained because of its discussion in the method on
|
||||||
|
* atomicity and in case we need to reinstate it or adapt the current
|
||||||
|
* process of directory marker creation.
|
||||||
|
* But it is not being tested and so may age with time...consider
|
||||||
|
* deleting it in future if it's clear there's no need for it.
|
||||||
* @param ms MetadataStore to update.
|
* @param ms MetadataStore to update.
|
||||||
* @param dirs null, or an ordered list of directories from leaf to root.
|
* @param dirs null, or an ordered list of directories from leaf to root.
|
||||||
* E.g. if /a/ exists, and mkdirs(/a/b/c/d) is called, this
|
* E.g. if /a/ exists, and mkdirs(/a/b/c/d) is called, this
|
||||||
|
@ -267,6 +275,8 @@ public final class S3Guard {
|
||||||
* @param owner Hadoop user name.
|
* @param owner Hadoop user name.
|
||||||
* @param authoritative Whether to mark new directories as authoritative.
|
* @param authoritative Whether to mark new directories as authoritative.
|
||||||
*/
|
*/
|
||||||
|
@Deprecated
|
||||||
|
@Retries.OnceExceptionsSwallowed
|
||||||
public static void makeDirsOrdered(MetadataStore ms, List<Path> dirs,
|
public static void makeDirsOrdered(MetadataStore ms, List<Path> dirs,
|
||||||
String owner, boolean authoritative) {
|
String owner, boolean authoritative) {
|
||||||
if (dirs == null) {
|
if (dirs == null) {
|
||||||
|
|
|
@ -29,12 +29,14 @@ import org.slf4j.LoggerFactory;
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.FileNotFoundException;
|
import java.io.FileNotFoundException;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
|
import java.util.UUID;
|
||||||
|
import java.util.concurrent.Callable;
|
||||||
|
|
||||||
import static org.apache.hadoop.fs.contract.ContractTestUtils.*;
|
import static org.apache.hadoop.fs.contract.ContractTestUtils.*;
|
||||||
import static org.apache.hadoop.fs.s3a.Statistic.*;
|
import static org.apache.hadoop.fs.s3a.Statistic.*;
|
||||||
import static org.apache.hadoop.fs.s3a.S3ATestUtils.*;
|
import static org.apache.hadoop.fs.s3a.S3ATestUtils.*;
|
||||||
import static org.apache.hadoop.test.GenericTestUtils.getTestDir;
|
import static org.apache.hadoop.test.GenericTestUtils.getTestDir;
|
||||||
import static org.junit.Assume.assumeFalse;
|
import static org.apache.hadoop.test.LambdaTestUtils.intercept;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Use metrics to assert about the cost of file status queries.
|
* Use metrics to assert about the cost of file status queries.
|
||||||
|
@ -54,6 +56,7 @@ public class ITestS3AFileOperationCost extends AbstractS3ATestBase {
|
||||||
S3AFileSystem fs = getFileSystem();
|
S3AFileSystem fs = getFileSystem();
|
||||||
metadataRequests = new MetricDiff(fs, OBJECT_METADATA_REQUESTS);
|
metadataRequests = new MetricDiff(fs, OBJECT_METADATA_REQUESTS);
|
||||||
listRequests = new MetricDiff(fs, OBJECT_LIST_REQUESTS);
|
listRequests = new MetricDiff(fs, OBJECT_LIST_REQUESTS);
|
||||||
|
skipDuringFaultInjection(fs);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -83,8 +86,8 @@ public class ITestS3AFileOperationCost extends AbstractS3ATestBase {
|
||||||
fs.mkdirs(dir);
|
fs.mkdirs(dir);
|
||||||
resetMetricDiffs();
|
resetMetricDiffs();
|
||||||
S3AFileStatus status = fs.innerGetFileStatus(dir, true);
|
S3AFileStatus status = fs.innerGetFileStatus(dir, true);
|
||||||
assertTrue("not empty: " + status,
|
assertSame("not empty: " + status, status.isEmptyDirectory(),
|
||||||
status.isEmptyDirectory() == Tristate.TRUE);
|
Tristate.TRUE);
|
||||||
|
|
||||||
if (!fs.hasMetadataStore()) {
|
if (!fs.hasMetadataStore()) {
|
||||||
metadataRequests.assertDiffEquals(2);
|
metadataRequests.assertDiffEquals(2);
|
||||||
|
@ -98,12 +101,8 @@ public class ITestS3AFileOperationCost extends AbstractS3ATestBase {
|
||||||
S3AFileSystem fs = getFileSystem();
|
S3AFileSystem fs = getFileSystem();
|
||||||
Path path = path("missing");
|
Path path = path("missing");
|
||||||
resetMetricDiffs();
|
resetMetricDiffs();
|
||||||
try {
|
intercept(FileNotFoundException.class,
|
||||||
FileStatus status = fs.getFileStatus(path);
|
() -> fs.getFileStatus(path));
|
||||||
fail("Got a status back from a missing file path " + status);
|
|
||||||
} catch (FileNotFoundException expected) {
|
|
||||||
// expected
|
|
||||||
}
|
|
||||||
metadataRequests.assertDiffEquals(2);
|
metadataRequests.assertDiffEquals(2);
|
||||||
listRequests.assertDiffEquals(1);
|
listRequests.assertDiffEquals(1);
|
||||||
}
|
}
|
||||||
|
@ -114,12 +113,8 @@ public class ITestS3AFileOperationCost extends AbstractS3ATestBase {
|
||||||
S3AFileSystem fs = getFileSystem();
|
S3AFileSystem fs = getFileSystem();
|
||||||
Path path = path("missingdir/missingpath");
|
Path path = path("missingdir/missingpath");
|
||||||
resetMetricDiffs();
|
resetMetricDiffs();
|
||||||
try {
|
intercept(FileNotFoundException.class,
|
||||||
FileStatus status = fs.getFileStatus(path);
|
() -> fs.getFileStatus(path));
|
||||||
fail("Got a status back from a missing file path " + status);
|
|
||||||
} catch (FileNotFoundException expected) {
|
|
||||||
// expected
|
|
||||||
}
|
|
||||||
metadataRequests.assertDiffEquals(2);
|
metadataRequests.assertDiffEquals(2);
|
||||||
listRequests.assertDiffEquals(1);
|
listRequests.assertDiffEquals(1);
|
||||||
}
|
}
|
||||||
|
@ -184,10 +179,11 @@ public class ITestS3AFileOperationCost extends AbstractS3ATestBase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void reset(MetricDiff... diffs) {
|
private boolean reset(MetricDiff... diffs) {
|
||||||
for (MetricDiff diff : diffs) {
|
for (MetricDiff diff : diffs) {
|
||||||
diff.reset();
|
diff.reset();
|
||||||
}
|
}
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -201,9 +197,7 @@ public class ITestS3AFileOperationCost extends AbstractS3ATestBase {
|
||||||
// operations, it depends on side effects happening internally. With
|
// operations, it depends on side effects happening internally. With
|
||||||
// metadata store enabled, it is brittle to change. We disable this test
|
// metadata store enabled, it is brittle to change. We disable this test
|
||||||
// before the internal behavior w/ or w/o metadata store.
|
// before the internal behavior w/ or w/o metadata store.
|
||||||
assumeFalse(fs.hasMetadataStore());
|
// assumeFalse(fs.hasMetadataStore());
|
||||||
|
|
||||||
skipDuringFaultInjection(fs);
|
|
||||||
|
|
||||||
Path srcBaseDir = path("src");
|
Path srcBaseDir = path("src");
|
||||||
mkdirs(srcBaseDir);
|
mkdirs(srcBaseDir);
|
||||||
|
@ -216,62 +210,171 @@ public class ITestS3AFileOperationCost extends AbstractS3ATestBase {
|
||||||
MetricDiff directoriesCreated =
|
MetricDiff directoriesCreated =
|
||||||
new MetricDiff(fs, Statistic.DIRECTORIES_CREATED);
|
new MetricDiff(fs, Statistic.DIRECTORIES_CREATED);
|
||||||
|
|
||||||
|
// when you call toString() on this, you get the stats
|
||||||
|
// so it gets auto-evaluated in log calls.
|
||||||
|
Object summary = new Object() {
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return String.format("[%s, %s, %s, %s]",
|
||||||
|
directoriesCreated, directoriesDeleted,
|
||||||
|
deleteRequests, fakeDirectoriesDeleted);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// reset operation to invoke
|
||||||
|
Callable<Boolean> reset = () ->
|
||||||
|
reset(deleteRequests, directoriesCreated, directoriesDeleted,
|
||||||
|
fakeDirectoriesDeleted);
|
||||||
|
|
||||||
Path srcDir = new Path(srcBaseDir, "1/2/3/4/5/6");
|
Path srcDir = new Path(srcBaseDir, "1/2/3/4/5/6");
|
||||||
int srcDirDepth = directoriesInPath(srcDir);
|
int srcDirDepth = directoriesInPath(srcDir);
|
||||||
// one dir created, one removed
|
// one dir created, one removed
|
||||||
mkdirs(srcDir);
|
mkdirs(srcDir);
|
||||||
String state = "after mkdir(srcDir)";
|
String state = "after mkdir(srcDir) " + summary;
|
||||||
directoriesCreated.assertDiffEquals(state, 1);
|
directoriesCreated.assertDiffEquals(state, 1);
|
||||||
deleteRequests.assertDiffEquals(state, 1);
|
deleteRequests.assertDiffEquals(state, 1);
|
||||||
directoriesDeleted.assertDiffEquals(state, 0);
|
directoriesDeleted.assertDiffEquals(state, 0);
|
||||||
// HADOOP-14255 deletes unnecessary fake directory objects in mkdirs()
|
// HADOOP-14255 deletes unnecessary fake directory objects in mkdirs()
|
||||||
fakeDirectoriesDeleted.assertDiffEquals(state, srcDirDepth - 1);
|
fakeDirectoriesDeleted.assertDiffEquals(state, srcDirDepth - 1);
|
||||||
reset(deleteRequests, directoriesCreated, directoriesDeleted,
|
reset.call();
|
||||||
fakeDirectoriesDeleted);
|
|
||||||
|
|
||||||
// creating a file should trigger demise of the src dir
|
// creating a file should trigger demise of the src dir
|
||||||
final Path srcFilePath = new Path(srcDir, "source.txt");
|
final Path srcFilePath = new Path(srcDir, "source.txt");
|
||||||
touch(fs, srcFilePath);
|
touch(fs, srcFilePath);
|
||||||
state = "after touch(fs, srcFilePath)";
|
state = "after touch(fs, srcFilePath) " + summary;
|
||||||
deleteRequests.assertDiffEquals(state, 1);
|
deleteRequests.assertDiffEquals(state, 1);
|
||||||
directoriesCreated.assertDiffEquals(state, 0);
|
directoriesCreated.assertDiffEquals(state, 0);
|
||||||
directoriesDeleted.assertDiffEquals(state, 0);
|
directoriesDeleted.assertDiffEquals(state, 0);
|
||||||
fakeDirectoriesDeleted.assertDiffEquals(state, srcDirDepth);
|
fakeDirectoriesDeleted.assertDiffEquals(state, srcDirDepth);
|
||||||
|
|
||||||
reset(deleteRequests, directoriesCreated, directoriesDeleted,
|
reset.call();
|
||||||
fakeDirectoriesDeleted);
|
|
||||||
|
|
||||||
|
// create a directory tree, expect the dir to be created and
|
||||||
|
// a request to delete all parent directories made.
|
||||||
Path destBaseDir = path("dest");
|
Path destBaseDir = path("dest");
|
||||||
Path destDir = new Path(destBaseDir, "1/2/3/4/5/6");
|
Path destDir = new Path(destBaseDir, "1/2/3/4/5/6");
|
||||||
Path destFilePath = new Path(destDir, "dest.txt");
|
Path destFilePath = new Path(destDir, "dest.txt");
|
||||||
mkdirs(destDir);
|
mkdirs(destDir);
|
||||||
state = "after mkdir(destDir)";
|
state = "after mkdir(destDir) " + summary;
|
||||||
|
|
||||||
int destDirDepth = directoriesInPath(destDir);
|
int destDirDepth = directoriesInPath(destDir);
|
||||||
directoriesCreated.assertDiffEquals(state, 1);
|
directoriesCreated.assertDiffEquals(state, 1);
|
||||||
deleteRequests.assertDiffEquals(state, 1);
|
deleteRequests.assertDiffEquals(state, 1);
|
||||||
directoriesDeleted.assertDiffEquals(state, 0);
|
directoriesDeleted.assertDiffEquals(state, 0);
|
||||||
fakeDirectoriesDeleted.assertDiffEquals(state, destDirDepth - 1);
|
fakeDirectoriesDeleted.assertDiffEquals(state, destDirDepth - 1);
|
||||||
reset(deleteRequests, directoriesCreated, directoriesDeleted,
|
|
||||||
fakeDirectoriesDeleted);
|
|
||||||
|
|
||||||
|
// create a new source file.
|
||||||
|
// Explicitly use a new path object to guarantee that the parent paths
|
||||||
|
// are different object instances
|
||||||
|
final Path srcFile2 = new Path(srcDir.toUri() + "/source2.txt");
|
||||||
|
touch(fs, srcFile2);
|
||||||
|
|
||||||
|
reset.call();
|
||||||
|
|
||||||
|
// rename the source file to the destination file.
|
||||||
|
// this tests the file rename path, not the dir rename path
|
||||||
|
// as srcFile2 exists, the parent dir of srcFilePath must not be created.
|
||||||
fs.rename(srcFilePath, destFilePath);
|
fs.rename(srcFilePath, destFilePath);
|
||||||
state = "after rename(srcFilePath, destFilePath)";
|
state = String.format("after rename(srcFilePath, destFilePath)"
|
||||||
directoriesCreated.assertDiffEquals(state, 1);
|
+ " %s dest dir depth=%d",
|
||||||
// one for the renamed file, one for the parent
|
summary,
|
||||||
|
destDirDepth);
|
||||||
|
|
||||||
|
directoriesCreated.assertDiffEquals(state, 0);
|
||||||
|
// one for the renamed file, one for the parent of the dest dir
|
||||||
deleteRequests.assertDiffEquals(state, 2);
|
deleteRequests.assertDiffEquals(state, 2);
|
||||||
directoriesDeleted.assertDiffEquals(state, 0);
|
directoriesDeleted.assertDiffEquals(state, 0);
|
||||||
fakeDirectoriesDeleted.assertDiffEquals(state, destDirDepth);
|
fakeDirectoriesDeleted.assertDiffEquals(state, destDirDepth);
|
||||||
|
|
||||||
reset(deleteRequests, directoriesCreated, directoriesDeleted,
|
// these asserts come after the checks on iop counts, so they don't
|
||||||
fakeDirectoriesDeleted);
|
// interfere
|
||||||
|
|
||||||
assertIsFile(destFilePath);
|
assertIsFile(destFilePath);
|
||||||
assertIsDirectory(srcDir);
|
assertIsDirectory(srcDir);
|
||||||
|
assertPathDoesNotExist("should have gone in the rename", srcFilePath);
|
||||||
|
reset.call();
|
||||||
|
|
||||||
|
// rename the source file2 to the (no longer existing
|
||||||
|
// this tests the file rename path, not the dir rename path
|
||||||
|
// as srcFile2 exists, the parent dir of srcFilePath must not be created.
|
||||||
|
fs.rename(srcFile2, srcFilePath);
|
||||||
|
state = String.format("after rename(%s, %s) %s dest dir depth=%d",
|
||||||
|
srcFile2, srcFilePath,
|
||||||
|
summary,
|
||||||
|
destDirDepth);
|
||||||
|
|
||||||
|
// here we expect there to be no fake directories
|
||||||
|
directoriesCreated.assertDiffEquals(state, 0);
|
||||||
|
// one for the renamed file only
|
||||||
|
deleteRequests.assertDiffEquals(state, 1);
|
||||||
|
directoriesDeleted.assertDiffEquals(state, 0);
|
||||||
|
fakeDirectoriesDeleted.assertDiffEquals(state, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
private int directoriesInPath(Path path) {
|
private int directoriesInPath(Path path) {
|
||||||
return path.isRoot() ? 0 : 1 + directoriesInPath(path.getParent());
|
return path.isRoot() ? 0 : 1 + directoriesInPath(path.getParent());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCostOfRootRename() throws Throwable {
|
||||||
|
describe("assert that a root directory rename doesn't"
|
||||||
|
+ " do much in terms of parent dir operations");
|
||||||
|
S3AFileSystem fs = getFileSystem();
|
||||||
|
|
||||||
|
// unique name, so that even when run in parallel tests, there's no conflict
|
||||||
|
String uuid = UUID.randomUUID().toString();
|
||||||
|
Path src = new Path("/src-" + uuid);
|
||||||
|
Path dest = new Path("/dest-" + uuid);
|
||||||
|
|
||||||
|
try {
|
||||||
|
MetricDiff deleteRequests =
|
||||||
|
new MetricDiff(fs, Statistic.OBJECT_DELETE_REQUESTS);
|
||||||
|
MetricDiff directoriesDeleted =
|
||||||
|
new MetricDiff(fs, Statistic.DIRECTORIES_DELETED);
|
||||||
|
MetricDiff fakeDirectoriesDeleted =
|
||||||
|
new MetricDiff(fs, Statistic.FAKE_DIRECTORIES_DELETED);
|
||||||
|
MetricDiff directoriesCreated =
|
||||||
|
new MetricDiff(fs, Statistic.DIRECTORIES_CREATED);
|
||||||
|
touch(fs, src);
|
||||||
|
fs.rename(src, dest);
|
||||||
|
Object summary = new Object() {
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return String.format("[%s, %s, %s, %s]",
|
||||||
|
directoriesCreated, directoriesDeleted,
|
||||||
|
deleteRequests, fakeDirectoriesDeleted);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
String state = String.format("after touch(%s) %s",
|
||||||
|
src, summary);
|
||||||
|
touch(fs, src);
|
||||||
|
fs.rename(src, dest);
|
||||||
|
directoriesCreated.assertDiffEquals(state, 0);
|
||||||
|
|
||||||
|
|
||||||
|
state = String.format("after rename(%s, %s) %s",
|
||||||
|
src, dest, summary);
|
||||||
|
// here we expect there to be no fake directories
|
||||||
|
directoriesCreated.assertDiffEquals(state, 0);
|
||||||
|
// one for the renamed file only
|
||||||
|
deleteRequests.assertDiffEquals(state, 1);
|
||||||
|
directoriesDeleted.assertDiffEquals(state, 0);
|
||||||
|
fakeDirectoriesDeleted.assertDiffEquals(state, 0);
|
||||||
|
|
||||||
|
// delete that destination file, assert only the file delete was issued
|
||||||
|
reset(deleteRequests, directoriesCreated, directoriesDeleted,
|
||||||
|
fakeDirectoriesDeleted);
|
||||||
|
|
||||||
|
fs.delete(dest, false);
|
||||||
|
// here we expect there to be no fake directories
|
||||||
|
directoriesCreated.assertDiffEquals(state, 0);
|
||||||
|
// one for the deleted file
|
||||||
|
deleteRequests.assertDiffEquals(state, 1);
|
||||||
|
directoriesDeleted.assertDiffEquals(state, 0);
|
||||||
|
fakeDirectoriesDeleted.assertDiffEquals(state, 0);
|
||||||
|
} finally {
|
||||||
|
fs.delete(src, false);
|
||||||
|
fs.delete(dest, false);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue