HADOOP-16383. Pass ITtlTimeProvider instance in initialize method in MetadataStore interface. Contributed by Gabor Bota. (#1009)
This commit is contained in:
parent
256fcc160e
commit
c58e11bf52
|
@ -396,7 +396,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
|
|||
DEFAULT_METADATASTORE_METADATA_TTL, TimeUnit.MILLISECONDS);
|
||||
ttlTimeProvider = new S3Guard.TtlTimeProvider(authDirTtl);
|
||||
|
||||
setMetadataStore(S3Guard.getMetadataStore(this));
|
||||
setMetadataStore(S3Guard.getMetadataStore(this, ttlTimeProvider));
|
||||
allowAuthoritativeMetadataStore = conf.getBoolean(METADATASTORE_AUTHORITATIVE,
|
||||
DEFAULT_METADATASTORE_AUTHORITATIVE);
|
||||
allowAuthoritativePaths = S3Guard.getAuthoritativePaths(this);
|
||||
|
@ -1767,7 +1767,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
|
|||
instrumentation.directoryDeleted();
|
||||
}
|
||||
deleteObject(key);
|
||||
metadataStore.delete(f, ttlTimeProvider);
|
||||
metadataStore.delete(f);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2293,7 +2293,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
|
|||
}
|
||||
try(DurationInfo ignored =
|
||||
new DurationInfo(LOG, false, "Delete metastore")) {
|
||||
metadataStore.deleteSubtree(f, ttlTimeProvider);
|
||||
metadataStore.deleteSubtree(f);
|
||||
}
|
||||
} else {
|
||||
LOG.debug("delete: Path is a file: {}", key);
|
||||
|
@ -4066,6 +4066,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
|
|||
@VisibleForTesting
|
||||
protected void setTtlTimeProvider(ITtlTimeProvider ttlTimeProvider) {
|
||||
this.ttlTimeProvider = ttlTimeProvider;
|
||||
metadataStore.setTtlTimeProvider(ttlTimeProvider);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -178,7 +178,7 @@ public final class MultiObjectDeleteSupport extends AbstractStoreOperation {
|
|||
// metastore entries
|
||||
deleted.forEach(path -> {
|
||||
try {
|
||||
metadataStore.delete(path, getStoreContext().getTimeProvider());
|
||||
metadataStore.delete(path);
|
||||
} catch (IOException e) {
|
||||
// trouble: we failed to delete the far end entry
|
||||
// try with the next one.
|
||||
|
|
|
@ -135,9 +135,7 @@ public class DelayedUpdateRenameTracker extends RenameTracker {
|
|||
|
||||
@Override
|
||||
public void completeRename() throws IOException {
|
||||
metadataStore.move(sourcePaths, destMetas,
|
||||
getStoreContext().getTimeProvider(),
|
||||
getOperationState());
|
||||
metadataStore.move(sourcePaths, destMetas, getOperationState());
|
||||
super.completeRename();
|
||||
}
|
||||
|
||||
|
@ -147,12 +145,10 @@ public class DelayedUpdateRenameTracker extends RenameTracker {
|
|||
try (DurationInfo ignored = new DurationInfo(LOG,
|
||||
"Cleaning up deleted paths")) {
|
||||
// the destination paths are updated; the source is left alone.
|
||||
metadataStore.move(new ArrayList<>(0), destMetas,
|
||||
getStoreContext().getTimeProvider(),
|
||||
getOperationState());
|
||||
metadataStore.move(new ArrayList<>(0), destMetas, getOperationState());
|
||||
for (Path deletedPath : deletedPaths) {
|
||||
// this is not ideal in that it may leave parent stuff around.
|
||||
metadataStore.delete(deletedPath, getStoreContext().getTimeProvider());
|
||||
metadataStore.delete(deletedPath);
|
||||
}
|
||||
deleteParentPaths();
|
||||
} catch (IOException | SdkBaseException e) {
|
||||
|
@ -185,7 +181,7 @@ public class DelayedUpdateRenameTracker extends RenameTracker {
|
|||
PathMetadata md = metadataStore.get(parent, true);
|
||||
if (md != null && md.isEmptyDirectory() == Tristate.TRUE) {
|
||||
// if were confident that this is empty: delete it.
|
||||
metadataStore.delete(parent, getStoreContext().getTimeProvider());
|
||||
metadataStore.delete(parent);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -200,8 +200,8 @@ import static org.apache.hadoop.fs.s3a.s3guard.S3Guard.*;
|
|||
* sub-tree.
|
||||
*
|
||||
* Some mutating operations, notably
|
||||
* {@link MetadataStore#deleteSubtree(Path, ITtlTimeProvider)} and
|
||||
* {@link MetadataStore#move(Collection, Collection, ITtlTimeProvider, BulkOperationState)}
|
||||
* {@link MetadataStore#deleteSubtree(Path)} and
|
||||
* {@link MetadataStore#move(Collection, Collection, BulkOperationState)}
|
||||
* are less efficient with this schema.
|
||||
* They require mutating multiple items in the DynamoDB table.
|
||||
*
|
||||
|
@ -356,7 +356,7 @@ public class DynamoDBMetadataStore implements MetadataStore,
|
|||
* Time source. This is used during writes when parent
|
||||
* entries need to be created.
|
||||
*/
|
||||
private ITtlTimeProvider timeProvider;
|
||||
private ITtlTimeProvider ttlTimeProvider;
|
||||
|
||||
/**
|
||||
* A utility function to create DynamoDB instance.
|
||||
|
@ -391,11 +391,13 @@ public class DynamoDBMetadataStore implements MetadataStore,
|
|||
* FS via {@link S3AFileSystem#shareCredentials(String)}; this will
|
||||
* increment the reference counter of these credentials.
|
||||
* @param fs {@code S3AFileSystem} associated with the MetadataStore
|
||||
* @param ttlTp the time provider to use for metadata expiry
|
||||
* @throws IOException on a failure
|
||||
*/
|
||||
@Override
|
||||
@Retries.OnceRaw
|
||||
public void initialize(FileSystem fs) throws IOException {
|
||||
public void initialize(FileSystem fs, ITtlTimeProvider ttlTp)
|
||||
throws IOException {
|
||||
Preconditions.checkNotNull(fs, "Null filesystem");
|
||||
Preconditions.checkArgument(fs instanceof S3AFileSystem,
|
||||
"DynamoDBMetadataStore only supports S3A filesystem.");
|
||||
|
@ -433,7 +435,7 @@ public class DynamoDBMetadataStore implements MetadataStore,
|
|||
this::retryEvent
|
||||
);
|
||||
|
||||
timeProvider = new S3Guard.TtlTimeProvider(conf);
|
||||
this.ttlTimeProvider = ttlTp;
|
||||
initTable();
|
||||
|
||||
instrumentation.initialized();
|
||||
|
@ -453,7 +455,7 @@ public class DynamoDBMetadataStore implements MetadataStore,
|
|||
instrumentation = context.getInstrumentation().getS3GuardInstrumentation();
|
||||
username = context.getUsername();
|
||||
executor = context.createThrottledExecutor();
|
||||
timeProvider = Preconditions.checkNotNull(
|
||||
ttlTimeProvider = Preconditions.checkNotNull(
|
||||
context.getTimeProvider(),
|
||||
"ttlTimeProvider must not be null");
|
||||
}
|
||||
|
@ -468,7 +470,8 @@ public class DynamoDBMetadataStore implements MetadataStore,
|
|||
*
|
||||
* This is used to operate the metadata store directly beyond the scope of the
|
||||
* S3AFileSystem integration, e.g. command line tools.
|
||||
* Generally, callers should use {@link #initialize(FileSystem)}
|
||||
* Generally, callers should use
|
||||
* {@link MetadataStore#initialize(FileSystem, ITtlTimeProvider)}
|
||||
* with an initialized {@code S3AFileSystem} instance.
|
||||
*
|
||||
* Without a filesystem to act as a reference point, the configuration itself
|
||||
|
@ -479,13 +482,14 @@ public class DynamoDBMetadataStore implements MetadataStore,
|
|||
* using the base fs.s3a.* options, as there is no bucket to infer per-bucket
|
||||
* settings from.
|
||||
*
|
||||
* @see #initialize(FileSystem)
|
||||
* @see MetadataStore#initialize(FileSystem, ITtlTimeProvider)
|
||||
* @throws IOException if there is an error
|
||||
* @throws IllegalArgumentException if the configuration is incomplete
|
||||
*/
|
||||
@Override
|
||||
@Retries.OnceRaw
|
||||
public void initialize(Configuration config) throws IOException {
|
||||
public void initialize(Configuration config,
|
||||
ITtlTimeProvider ttlTp) throws IOException {
|
||||
conf = config;
|
||||
// use the bucket as the DynamoDB table name if not specified in config
|
||||
tableName = conf.getTrimmed(S3GUARD_DDB_TABLE_NAME_KEY);
|
||||
|
@ -512,7 +516,7 @@ public class DynamoDBMetadataStore implements MetadataStore,
|
|||
TimeUnit.SECONDS,
|
||||
"s3a-ddb-" + tableName);
|
||||
initDataAccessRetries(conf);
|
||||
timeProvider = new S3Guard.TtlTimeProvider(conf);
|
||||
this.ttlTimeProvider = ttlTp;
|
||||
initTable();
|
||||
}
|
||||
|
||||
|
@ -540,16 +544,16 @@ public class DynamoDBMetadataStore implements MetadataStore,
|
|||
|
||||
@Override
|
||||
@Retries.RetryTranslated
|
||||
public void delete(Path path, ITtlTimeProvider ttlTimeProvider)
|
||||
public void delete(Path path)
|
||||
throws IOException {
|
||||
innerDelete(path, true, ttlTimeProvider, null);
|
||||
innerDelete(path, true, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
@Retries.RetryTranslated
|
||||
public void forgetMetadata(Path path) throws IOException {
|
||||
LOG.debug("Forget metadata for {}", path);
|
||||
innerDelete(path, false, null, null);
|
||||
innerDelete(path, false, null);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -558,15 +562,12 @@ public class DynamoDBMetadataStore implements MetadataStore,
|
|||
* There is no check as to whether the entry exists in the table first.
|
||||
* @param path path to delete
|
||||
* @param tombstone flag to create a tombstone marker
|
||||
* @param ttlTimeProvider The time provider to set last_updated. Must not
|
||||
* be null if tombstone is true.
|
||||
* @param ancestorState ancestor state for logging
|
||||
* @throws IOException I/O error.
|
||||
*/
|
||||
@Retries.RetryTranslated
|
||||
private void innerDelete(final Path path,
|
||||
final boolean tombstone,
|
||||
final ITtlTimeProvider ttlTimeProvider,
|
||||
final AncestorState ancestorState)
|
||||
throws IOException {
|
||||
checkPath(path);
|
||||
|
@ -615,7 +616,7 @@ public class DynamoDBMetadataStore implements MetadataStore,
|
|||
|
||||
@Override
|
||||
@Retries.RetryTranslated
|
||||
public void deleteSubtree(Path path, ITtlTimeProvider ttlTimeProvider)
|
||||
public void deleteSubtree(Path path)
|
||||
throws IOException {
|
||||
checkPath(path);
|
||||
LOG.debug("Deleting subtree from table {} in region {}: {}",
|
||||
|
@ -639,7 +640,7 @@ public class DynamoDBMetadataStore implements MetadataStore,
|
|||
desc.hasNext();) {
|
||||
final Path pathToDelete = desc.next().getPath();
|
||||
futures.add(submit(executor, () -> {
|
||||
innerDelete(pathToDelete, true, ttlTimeProvider, state);
|
||||
innerDelete(pathToDelete, true, state);
|
||||
return null;
|
||||
}));
|
||||
if (futures.size() > S3GUARD_DDB_SUBMITTED_TASK_LIMIT) {
|
||||
|
@ -823,13 +824,11 @@ public class DynamoDBMetadataStore implements MetadataStore,
|
|||
* Callers are required to synchronize on ancestorState.
|
||||
* @param pathsToCreate paths to create
|
||||
* @param ancestorState ongoing ancestor state.
|
||||
* @param ttlTimeProvider Must not be null
|
||||
* @return the full ancestry paths
|
||||
*/
|
||||
private Collection<DDBPathMetadata> completeAncestry(
|
||||
final Collection<DDBPathMetadata> pathsToCreate,
|
||||
final AncestorState ancestorState,
|
||||
final ITtlTimeProvider ttlTimeProvider) throws PathIOException {
|
||||
final AncestorState ancestorState) throws PathIOException {
|
||||
// Key on path to allow fast lookup
|
||||
Map<Path, DDBPathMetadata> ancestry = new HashMap<>();
|
||||
LOG.debug("Completing ancestry for {} paths", pathsToCreate.size());
|
||||
|
@ -913,9 +912,7 @@ public class DynamoDBMetadataStore implements MetadataStore,
|
|||
@SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
|
||||
@Override
|
||||
@Retries.RetryTranslated
|
||||
public void addAncestors(
|
||||
final Path qualifiedPath,
|
||||
final ITtlTimeProvider ttlTimeProvider,
|
||||
public void addAncestors(final Path qualifiedPath,
|
||||
@Nullable final BulkOperationState operationState) throws IOException {
|
||||
|
||||
Collection<DDBPathMetadata> newDirs = new ArrayList<>();
|
||||
|
@ -1000,10 +997,8 @@ public class DynamoDBMetadataStore implements MetadataStore,
|
|||
*/
|
||||
@Override
|
||||
@Retries.RetryTranslated
|
||||
public void move(
|
||||
@Nullable Collection<Path> pathsToDelete,
|
||||
public void move(@Nullable Collection<Path> pathsToDelete,
|
||||
@Nullable Collection<PathMetadata> pathsToCreate,
|
||||
final ITtlTimeProvider ttlTimeProvider,
|
||||
@Nullable final BulkOperationState operationState) throws IOException {
|
||||
if (pathsToDelete == null && pathsToCreate == null) {
|
||||
return;
|
||||
|
@ -1032,8 +1027,7 @@ public class DynamoDBMetadataStore implements MetadataStore,
|
|||
newItems.addAll(
|
||||
completeAncestry(
|
||||
pathMetaToDDBPathMeta(pathsToCreate),
|
||||
ancestorState,
|
||||
extractTimeProvider(ttlTimeProvider)));
|
||||
ancestorState));
|
||||
}
|
||||
}
|
||||
// sort all the new items topmost first.
|
||||
|
@ -1222,7 +1216,7 @@ public class DynamoDBMetadataStore implements MetadataStore,
|
|||
public void put(
|
||||
final Collection<? extends PathMetadata> metas,
|
||||
@Nullable final BulkOperationState operationState) throws IOException {
|
||||
innerPut(pathMetaToDDBPathMeta(metas), operationState, timeProvider);
|
||||
innerPut(pathMetaToDDBPathMeta(metas), operationState, ttlTimeProvider);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1236,7 +1230,7 @@ public class DynamoDBMetadataStore implements MetadataStore,
|
|||
* create entries in the table without parents.
|
||||
* @param metas metadata entries to write.
|
||||
* @param operationState (nullable) operational state for a bulk update
|
||||
* @param ttlTimeProvider
|
||||
* @param ttlTp The time provider for metadata expiry
|
||||
* @throws IOException failure.
|
||||
*/
|
||||
@SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
|
||||
|
@ -1244,7 +1238,7 @@ public class DynamoDBMetadataStore implements MetadataStore,
|
|||
private void innerPut(
|
||||
final Collection<DDBPathMetadata> metas,
|
||||
@Nullable final BulkOperationState operationState,
|
||||
final ITtlTimeProvider ttlTimeProvider) throws IOException {
|
||||
final ITtlTimeProvider ttlTp) throws IOException {
|
||||
if (metas.isEmpty()) {
|
||||
// Happens when someone calls put() with an empty list.
|
||||
LOG.debug("Ignoring empty list of entries to put");
|
||||
|
@ -1258,7 +1252,7 @@ public class DynamoDBMetadataStore implements MetadataStore,
|
|||
Item[] items;
|
||||
synchronized (ancestorState) {
|
||||
items = pathMetadataToItem(
|
||||
completeAncestry(metas, ancestorState, ttlTimeProvider));
|
||||
completeAncestry(metas, ancestorState));
|
||||
}
|
||||
LOG.debug("Saving batch of {} items to table {}, region {}", items.length,
|
||||
tableName, region);
|
||||
|
@ -1644,7 +1638,7 @@ public class DynamoDBMetadataStore implements MetadataStore,
|
|||
try {
|
||||
LOG.debug("innerPut on metas: {}", metas);
|
||||
if (!metas.isEmpty()) {
|
||||
innerPut(metas, state, timeProvider);
|
||||
innerPut(metas, state, ttlTimeProvider);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
String msg = String.format("IOException while setting false "
|
||||
|
@ -2320,15 +2314,20 @@ public class DynamoDBMetadataStore implements MetadataStore,
|
|||
return new AncestorState(this, operation, dest);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setTtlTimeProvider(ITtlTimeProvider ttlTimeProvider) {
|
||||
this.ttlTimeProvider = ttlTimeProvider;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract a time provider from the argument or fall back to the
|
||||
* one in the constructor.
|
||||
* @param ttlTimeProvider nullable time source passed in as an argument.
|
||||
* @param ttlTp nullable time source passed in as an argument.
|
||||
* @return a non-null time source.
|
||||
*/
|
||||
private ITtlTimeProvider extractTimeProvider(
|
||||
@Nullable ITtlTimeProvider ttlTimeProvider) {
|
||||
return ttlTimeProvider != null ? ttlTimeProvider : timeProvider;
|
||||
@Nullable ITtlTimeProvider ttlTp) {
|
||||
return ttlTp != null ? ttlTp : this.ttlTimeProvider;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -79,8 +79,11 @@ public class LocalMetadataStore implements MetadataStore {
|
|||
|
||||
private String username;
|
||||
|
||||
private ITtlTimeProvider ttlTimeProvider;
|
||||
|
||||
@Override
|
||||
public void initialize(FileSystem fileSystem) throws IOException {
|
||||
public void initialize(FileSystem fileSystem,
|
||||
ITtlTimeProvider ttlTp) throws IOException {
|
||||
Preconditions.checkNotNull(fileSystem);
|
||||
fs = fileSystem;
|
||||
URI fsURI = fs.getUri();
|
||||
|
@ -89,11 +92,12 @@ public class LocalMetadataStore implements MetadataStore {
|
|||
uriHost = null;
|
||||
}
|
||||
|
||||
initialize(fs.getConf());
|
||||
initialize(fs.getConf(), ttlTp);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void initialize(Configuration conf) throws IOException {
|
||||
public void initialize(Configuration conf, ITtlTimeProvider ttlTp)
|
||||
throws IOException {
|
||||
Preconditions.checkNotNull(conf);
|
||||
int maxRecords = conf.getInt(S3GUARD_METASTORE_LOCAL_MAX_RECORDS,
|
||||
DEFAULT_S3GUARD_METASTORE_LOCAL_MAX_RECORDS);
|
||||
|
@ -110,6 +114,7 @@ public class LocalMetadataStore implements MetadataStore {
|
|||
|
||||
localCache = builder.build();
|
||||
username = UserGroupInformation.getCurrentUser().getShortUserName();
|
||||
this.ttlTimeProvider = ttlTp;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -122,7 +127,7 @@ public class LocalMetadataStore implements MetadataStore {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void delete(Path p, ITtlTimeProvider ttlTimeProvider)
|
||||
public void delete(Path p)
|
||||
throws IOException {
|
||||
doDelete(p, false, true, ttlTimeProvider);
|
||||
}
|
||||
|
@ -133,23 +138,23 @@ public class LocalMetadataStore implements MetadataStore {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void deleteSubtree(Path path, ITtlTimeProvider ttlTimeProvider)
|
||||
public void deleteSubtree(Path path)
|
||||
throws IOException {
|
||||
doDelete(path, true, true, ttlTimeProvider);
|
||||
}
|
||||
|
||||
private synchronized void doDelete(Path p, boolean recursive,
|
||||
boolean tombstone, ITtlTimeProvider ttlTimeProvider) {
|
||||
boolean tombstone, ITtlTimeProvider ttlTp) {
|
||||
|
||||
Path path = standardize(p);
|
||||
|
||||
// Delete entry from file cache, then from cached parent directory, if any
|
||||
|
||||
deleteCacheEntries(path, tombstone, ttlTimeProvider);
|
||||
deleteCacheEntries(path, tombstone, ttlTp);
|
||||
|
||||
if (recursive) {
|
||||
// Remove all entries that have this dir as path prefix.
|
||||
deleteEntryByAncestor(path, localCache, tombstone, ttlTimeProvider);
|
||||
deleteEntryByAncestor(path, localCache, tombstone, ttlTp);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -202,10 +207,8 @@ public class LocalMetadataStore implements MetadataStore {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void move(
|
||||
@Nullable Collection<Path> pathsToDelete,
|
||||
public void move(@Nullable Collection<Path> pathsToDelete,
|
||||
@Nullable Collection<PathMetadata> pathsToCreate,
|
||||
ITtlTimeProvider ttlTimeProvider,
|
||||
@Nullable final BulkOperationState operationState) throws IOException {
|
||||
LOG.info("Move {} to {}", pathsToDelete, pathsToCreate);
|
||||
|
||||
|
@ -222,7 +225,7 @@ public class LocalMetadataStore implements MetadataStore {
|
|||
// 1. Delete pathsToDelete
|
||||
for (Path meta : pathsToDelete) {
|
||||
LOG.debug("move: deleting metadata {}", meta);
|
||||
delete(meta, ttlTimeProvider);
|
||||
delete(meta);
|
||||
}
|
||||
|
||||
// 2. Create new destination path metadata
|
||||
|
@ -487,7 +490,7 @@ public class LocalMetadataStore implements MetadataStore {
|
|||
* lock held.
|
||||
*/
|
||||
private void deleteCacheEntries(Path path, boolean tombstone,
|
||||
ITtlTimeProvider ttlTimeProvider) {
|
||||
ITtlTimeProvider ttlTp) {
|
||||
LocalMetadataEntry entry = localCache.getIfPresent(path);
|
||||
// If there's no entry, delete should silently succeed
|
||||
// (based on MetadataStoreTestBase#testDeleteNonExisting)
|
||||
|
@ -501,7 +504,7 @@ public class LocalMetadataStore implements MetadataStore {
|
|||
if(entry.hasPathMeta()){
|
||||
if (tombstone) {
|
||||
PathMetadata pmd = PathMetadata.tombstone(path);
|
||||
pmd.setLastUpdated(ttlTimeProvider.getNow());
|
||||
pmd.setLastUpdated(ttlTp.getNow());
|
||||
entry.setPathMetadata(pmd);
|
||||
} else {
|
||||
entry.setPathMetadata(null);
|
||||
|
@ -528,7 +531,7 @@ public class LocalMetadataStore implements MetadataStore {
|
|||
LOG.debug("removing parent's entry for {} ", path);
|
||||
if (tombstone) {
|
||||
dir.markDeleted(path);
|
||||
dir.setLastUpdated(ttlTimeProvider.getNow());
|
||||
dir.setLastUpdated(ttlTp.getNow());
|
||||
} else {
|
||||
dir.remove(path);
|
||||
}
|
||||
|
@ -594,9 +597,13 @@ public class LocalMetadataStore implements MetadataStore {
|
|||
null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setTtlTimeProvider(ITtlTimeProvider ttlTimeProvider) {
|
||||
this.ttlTimeProvider = ttlTimeProvider;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addAncestors(final Path qualifiedPath,
|
||||
ITtlTimeProvider ttlTimeProvider,
|
||||
@Nullable final BulkOperationState operationState) throws IOException {
|
||||
|
||||
Collection<PathMetadata> newDirs = new ArrayList<>();
|
||||
|
|
|
@ -50,17 +50,21 @@ public interface MetadataStore extends Closeable {
|
|||
* Performs one-time initialization of the metadata store.
|
||||
*
|
||||
* @param fs {@code FileSystem} associated with the MetadataStore
|
||||
* @param ttlTimeProvider the time provider to use for metadata expiry
|
||||
* @throws IOException if there is an error
|
||||
*/
|
||||
void initialize(FileSystem fs) throws IOException;
|
||||
void initialize(FileSystem fs, ITtlTimeProvider ttlTimeProvider)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Performs one-time initialization of the metadata store via configuration.
|
||||
* @see #initialize(FileSystem)
|
||||
* @see #initialize(FileSystem, ITtlTimeProvider)
|
||||
* @param conf Configuration.
|
||||
* @param ttlTimeProvider the time provider to use for metadata expiry
|
||||
* @throws IOException if there is an error
|
||||
*/
|
||||
void initialize(Configuration conf) throws IOException;
|
||||
void initialize(Configuration conf,
|
||||
ITtlTimeProvider ttlTimeProvider) throws IOException;
|
||||
|
||||
/**
|
||||
* Deletes exactly one path, leaving a tombstone to prevent lingering,
|
||||
|
@ -71,16 +75,14 @@ public interface MetadataStore extends Closeable {
|
|||
* the lastUpdated field of the record has to be updated to <pre>now</pre>.
|
||||
*
|
||||
* @param path the path to delete
|
||||
* @param ttlTimeProvider the time provider to set last_updated. Must not
|
||||
* be null.
|
||||
* @throws IOException if there is an error
|
||||
*/
|
||||
void delete(Path path, ITtlTimeProvider ttlTimeProvider)
|
||||
void delete(Path path)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Removes the record of exactly one path. Does not leave a tombstone (see
|
||||
* {@link MetadataStore#delete(Path, ITtlTimeProvider)}. It is currently
|
||||
* {@link MetadataStore#delete(Path)}. It is currently
|
||||
* intended for testing only, and a need to use it as part of normal
|
||||
* FileSystem usage is not anticipated.
|
||||
*
|
||||
|
@ -103,11 +105,9 @@ public interface MetadataStore extends Closeable {
|
|||
* the lastUpdated field of all records have to be updated to <pre>now</pre>.
|
||||
*
|
||||
* @param path the root of the sub-tree to delete
|
||||
* @param ttlTimeProvider the time provider to set last_updated. Must not
|
||||
* be null.
|
||||
* @throws IOException if there is an error
|
||||
*/
|
||||
void deleteSubtree(Path path, ITtlTimeProvider ttlTimeProvider)
|
||||
void deleteSubtree(Path path)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
|
@ -152,14 +152,11 @@ public interface MetadataStore extends Closeable {
|
|||
* must have their last updated timestamps set through
|
||||
* {@link S3Guard#patchLastUpdated(Collection, ITtlTimeProvider)}.
|
||||
* @param qualifiedPath path to update
|
||||
* @param timeProvider time provider for timestamps
|
||||
* @param operationState (nullable) operational state for a bulk update
|
||||
* @throws IOException failure
|
||||
*/
|
||||
@RetryTranslated
|
||||
void addAncestors(
|
||||
Path qualifiedPath,
|
||||
@Nullable ITtlTimeProvider timeProvider,
|
||||
void addAncestors(Path qualifiedPath,
|
||||
@Nullable BulkOperationState operationState) throws IOException;
|
||||
|
||||
/**
|
||||
|
@ -184,16 +181,12 @@ public interface MetadataStore extends Closeable {
|
|||
* source directory tree of the move.
|
||||
* @param pathsToCreate Collection of all PathMetadata for the new paths
|
||||
* that were created at the destination of the rename().
|
||||
* @param ttlTimeProvider the time provider to set last_updated. Must not
|
||||
* be null.
|
||||
* @param operationState Any ongoing state supplied to the rename tracker
|
||||
* which is to be passed in with each move operation.
|
||||
* @throws IOException if there is an error
|
||||
*/
|
||||
void move(
|
||||
@Nullable Collection<Path> pathsToDelete,
|
||||
void move(@Nullable Collection<Path> pathsToDelete,
|
||||
@Nullable Collection<PathMetadata> pathsToCreate,
|
||||
ITtlTimeProvider ttlTimeProvider,
|
||||
@Nullable BulkOperationState operationState) throws IOException;
|
||||
|
||||
/**
|
||||
|
@ -378,4 +371,13 @@ public interface MetadataStore extends Closeable {
|
|||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* The TtlTimeProvider has to be set during the initialization for the
|
||||
* metadatastore, but this method can be used for testing, and change the
|
||||
* instance during runtime.
|
||||
*
|
||||
* @param ttlTimeProvider
|
||||
*/
|
||||
void setTtlTimeProvider(ITtlTimeProvider ttlTimeProvider);
|
||||
|
||||
}
|
||||
|
|
|
@ -40,11 +40,13 @@ import java.util.Map;
|
|||
public class NullMetadataStore implements MetadataStore {
|
||||
|
||||
@Override
|
||||
public void initialize(FileSystem fs) throws IOException {
|
||||
public void initialize(FileSystem fs, ITtlTimeProvider ttlTimeProvider)
|
||||
throws IOException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void initialize(Configuration conf) throws IOException {
|
||||
public void initialize(Configuration conf, ITtlTimeProvider ttlTimeProvider)
|
||||
throws IOException {
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -52,7 +54,7 @@ public class NullMetadataStore implements MetadataStore {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void delete(Path path, ITtlTimeProvider ttlTimeProvider)
|
||||
public void delete(Path path)
|
||||
throws IOException {
|
||||
}
|
||||
|
||||
|
@ -61,7 +63,7 @@ public class NullMetadataStore implements MetadataStore {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void deleteSubtree(Path path, ITtlTimeProvider ttlTimeProvider)
|
||||
public void deleteSubtree(Path path)
|
||||
throws IOException {
|
||||
}
|
||||
|
||||
|
@ -84,7 +86,6 @@ public class NullMetadataStore implements MetadataStore {
|
|||
@Override
|
||||
public void move(Collection<Path> pathsToDelete,
|
||||
Collection<PathMetadata> pathsToCreate,
|
||||
ITtlTimeProvider ttlTimeProvider,
|
||||
final BulkOperationState operationState) throws IOException {
|
||||
}
|
||||
|
||||
|
@ -146,9 +147,12 @@ public class NullMetadataStore implements MetadataStore {
|
|||
return new NullRenameTracker(storeContext, source, dest, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setTtlTimeProvider(ITtlTimeProvider ttlTimeProvider) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addAncestors(final Path qualifiedPath,
|
||||
final ITtlTimeProvider timeProvider,
|
||||
@Nullable final BulkOperationState operationState) throws IOException {
|
||||
}
|
||||
|
||||
|
|
|
@ -155,9 +155,7 @@ public class ProgressiveRenameTracker extends RenameTracker {
|
|||
// no entries are deleted at this point.
|
||||
try (DurationInfo ignored = new DurationInfo(LOG, false,
|
||||
"Adding new metastore entries")) {
|
||||
store.move(null, entriesToAdd,
|
||||
getStoreContext().getTimeProvider(),
|
||||
getOperationState());
|
||||
store.move(null, entriesToAdd, getOperationState());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -199,9 +197,7 @@ public class ProgressiveRenameTracker extends RenameTracker {
|
|||
// ...so update the store.
|
||||
try (DurationInfo ignored = new DurationInfo(LOG, false,
|
||||
"adding %s metastore entries", entriesToAdd.size())) {
|
||||
store.move(null, entriesToAdd,
|
||||
getStoreContext().getTimeProvider(),
|
||||
getOperationState());
|
||||
store.move(null, entriesToAdd, getOperationState());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -218,9 +214,7 @@ public class ProgressiveRenameTracker extends RenameTracker {
|
|||
getSourceRoot(),
|
||||
getDest(),
|
||||
getOwner());
|
||||
getMetadataStore().move(toDelete, toAdd,
|
||||
getStoreContext().getTimeProvider(),
|
||||
getOperationState());
|
||||
getMetadataStore().move(toDelete, toAdd, getOperationState());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -236,9 +230,7 @@ public class ProgressiveRenameTracker extends RenameTracker {
|
|||
// delete the paths from the metastore
|
||||
try (DurationInfo ignored = new DurationInfo(LOG, false,
|
||||
"delete %s metastore entries", paths.size())) {
|
||||
getMetadataStore().move(paths, null,
|
||||
getStoreContext().getTimeProvider(),
|
||||
getOperationState());
|
||||
getMetadataStore().move(paths, null, getOperationState());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -83,17 +83,19 @@ public final class S3Guard {
|
|||
/**
|
||||
* Create a new instance of the configured MetadataStore.
|
||||
* The returned MetadataStore will have been initialized via
|
||||
* {@link MetadataStore#initialize(FileSystem)} by this function before
|
||||
* returning it. Callers must clean up by calling
|
||||
* {@link MetadataStore#initialize(FileSystem, ITtlTimeProvider)}
|
||||
* by this function before returning it. Callers must clean up by calling
|
||||
* {@link MetadataStore#close()} when done using the MetadataStore.
|
||||
*
|
||||
* @param fs FileSystem whose Configuration specifies which
|
||||
* implementation to use.
|
||||
* @param ttlTimeProvider
|
||||
* @return Reference to new MetadataStore.
|
||||
* @throws IOException if the metadata store cannot be instantiated
|
||||
*/
|
||||
@Retries.OnceTranslated
|
||||
public static MetadataStore getMetadataStore(FileSystem fs)
|
||||
public static MetadataStore getMetadataStore(FileSystem fs,
|
||||
ITtlTimeProvider ttlTimeProvider)
|
||||
throws IOException {
|
||||
Preconditions.checkNotNull(fs);
|
||||
Configuration conf = fs.getConf();
|
||||
|
@ -104,7 +106,7 @@ public final class S3Guard {
|
|||
msInstance = ReflectionUtils.newInstance(msClass, conf);
|
||||
LOG.debug("Using {} metadata store for {} filesystem",
|
||||
msClass.getSimpleName(), fs.getScheme());
|
||||
msInstance.initialize(fs);
|
||||
msInstance.initialize(fs, ttlTimeProvider);
|
||||
return msInstance;
|
||||
} catch (FileNotFoundException e) {
|
||||
// Don't log this exception as it means the table doesn't exist yet;
|
||||
|
@ -521,7 +523,7 @@ public final class S3Guard {
|
|||
/**
|
||||
* This adds all new ancestors of a path as directories.
|
||||
* This forwards to
|
||||
* {@link MetadataStore#addAncestors(Path, ITtlTimeProvider, BulkOperationState)}.
|
||||
* {@link MetadataStore#addAncestors(Path, BulkOperationState)}.
|
||||
* <p>
|
||||
* Originally it implemented the logic to probe for an add ancestors,
|
||||
* but with the addition of a store-specific bulk operation state
|
||||
|
@ -538,7 +540,7 @@ public final class S3Guard {
|
|||
final Path qualifiedPath,
|
||||
final ITtlTimeProvider timeProvider,
|
||||
@Nullable final BulkOperationState operationState) throws IOException {
|
||||
metadataStore.addAncestors(qualifiedPath, timeProvider, operationState);
|
||||
metadataStore.addAncestors(qualifiedPath, operationState);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -314,9 +314,9 @@ public abstract class S3GuardTool extends Configured implements Tool {
|
|||
}
|
||||
|
||||
if (filesystem == null) {
|
||||
getStore().initialize(conf);
|
||||
getStore().initialize(conf, new S3Guard.TtlTimeProvider(conf));
|
||||
} else {
|
||||
getStore().initialize(filesystem);
|
||||
getStore().initialize(filesystem, new S3Guard.TtlTimeProvider(conf));
|
||||
}
|
||||
LOG.info("Metadata store {} is initialized.", getStore());
|
||||
return getStore();
|
||||
|
|
|
@ -22,6 +22,7 @@ import java.io.IOException;
|
|||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
|
||||
import org.apache.hadoop.fs.s3a.s3guard.S3Guard;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Parameterized;
|
||||
|
@ -88,7 +89,8 @@ public class ITestS3AMetadataPersistenceException extends AbstractS3ATestBase {
|
|||
ioException = new IOException();
|
||||
IOExceptionMetadataStore metadataStore =
|
||||
new IOExceptionMetadataStore(ioException);
|
||||
metadataStore.initialize(getConfiguration());
|
||||
metadataStore.initialize(getConfiguration(),
|
||||
new S3Guard.TtlTimeProvider(getConfiguration()));
|
||||
fs.setMetadataStore(metadataStore);
|
||||
}
|
||||
|
||||
|
|
|
@ -261,11 +261,13 @@ public class TestPartialDeleteFailures {
|
|||
private final List<Path> created = new ArrayList<>();
|
||||
|
||||
@Override
|
||||
public void initialize(final FileSystem fs) {
|
||||
public void initialize(final FileSystem fs,
|
||||
ITtlTimeProvider ttlTimeProvider) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void initialize(final Configuration conf) {
|
||||
public void initialize(final Configuration conf,
|
||||
ITtlTimeProvider ttlTimeProvider) {
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -316,21 +318,18 @@ public class TestPartialDeleteFailures {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void delete(final Path path,
|
||||
final ITtlTimeProvider ttlTimeProvider) {
|
||||
public void delete(final Path path) {
|
||||
deleted.add(path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void deleteSubtree(final Path path,
|
||||
final ITtlTimeProvider ttlTimeProvider) {
|
||||
public void deleteSubtree(final Path path) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void move(@Nullable final Collection<Path> pathsToDelete,
|
||||
@Nullable final Collection<PathMetadata> pathsToCreate,
|
||||
final ITtlTimeProvider ttlTimeProvider,
|
||||
@Nullable final BulkOperationState operationState) {
|
||||
}
|
||||
|
||||
|
@ -352,6 +351,10 @@ public class TestPartialDeleteFailures {
|
|||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setTtlTimeProvider(ITtlTimeProvider ttlTimeProvider) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, String> getDiagnostics() {
|
||||
return null;
|
||||
|
@ -384,7 +387,6 @@ public class TestPartialDeleteFailures {
|
|||
|
||||
@Override
|
||||
public void addAncestors(final Path qualifiedPath,
|
||||
final ITtlTimeProvider timeProvider,
|
||||
@Nullable final BulkOperationState operationState) {
|
||||
|
||||
}
|
||||
|
|
|
@ -210,7 +210,7 @@ public class ITestDynamoDBMetadataStore extends MetadataStoreTestBase {
|
|||
enableOnDemand(conf);
|
||||
|
||||
ddbmsStatic = new DynamoDBMetadataStore();
|
||||
ddbmsStatic.initialize(conf);
|
||||
ddbmsStatic.initialize(conf, new S3Guard.TtlTimeProvider(conf));
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
|
@ -416,7 +416,7 @@ public class ITestDynamoDBMetadataStore extends MetadataStoreTestBase {
|
|||
conf.set(S3GUARD_DDB_TABLE_NAME_KEY, tableName);
|
||||
DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore();
|
||||
try {
|
||||
ddbms.initialize(s3afs);
|
||||
ddbms.initialize(s3afs, new S3Guard.TtlTimeProvider(conf));
|
||||
verifyTableInitialized(tableName, ddbms.getDynamoDB());
|
||||
assertNotNull(ddbms.getTable());
|
||||
assertEquals(tableName, ddbms.getTable().getTableName());
|
||||
|
@ -445,14 +445,14 @@ public class ITestDynamoDBMetadataStore extends MetadataStoreTestBase {
|
|||
getFileSystem().getBucketLocation());
|
||||
conf.unset(S3GUARD_DDB_REGION_KEY);
|
||||
try (DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) {
|
||||
ddbms.initialize(conf);
|
||||
ddbms.initialize(conf, new S3Guard.TtlTimeProvider(conf));
|
||||
fail("Should have failed because the table name is not set!");
|
||||
} catch (IllegalArgumentException ignored) {
|
||||
}
|
||||
// config table name
|
||||
conf.set(S3GUARD_DDB_TABLE_NAME_KEY, tableName);
|
||||
try (DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) {
|
||||
ddbms.initialize(conf);
|
||||
ddbms.initialize(conf, new S3Guard.TtlTimeProvider(conf));
|
||||
fail("Should have failed because as the region is not set!");
|
||||
} catch (IllegalArgumentException ignored) {
|
||||
}
|
||||
|
@ -460,7 +460,7 @@ public class ITestDynamoDBMetadataStore extends MetadataStoreTestBase {
|
|||
conf.set(S3GUARD_DDB_REGION_KEY, savedRegion);
|
||||
DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore();
|
||||
try {
|
||||
ddbms.initialize(conf);
|
||||
ddbms.initialize(conf, new S3Guard.TtlTimeProvider(conf));
|
||||
verifyTableInitialized(tableName, ddbms.getDynamoDB());
|
||||
assertNotNull(ddbms.getTable());
|
||||
assertEquals(tableName, ddbms.getTable().getTableName());
|
||||
|
@ -590,7 +590,7 @@ public class ITestDynamoDBMetadataStore extends MetadataStoreTestBase {
|
|||
ThrottleTracker throttleTracker = new ThrottleTracker(ms);
|
||||
try(DurationInfo ignored = new DurationInfo(LOG, true,
|
||||
"Move")) {
|
||||
ms.move(pathsToDelete, newMetas, getTtlTimeProvider(), state);
|
||||
ms.move(pathsToDelete, newMetas, state);
|
||||
}
|
||||
LOG.info("Throttle status {}", throttleTracker);
|
||||
assertEquals("Number of children in source directory",
|
||||
|
@ -662,7 +662,7 @@ public class ITestDynamoDBMetadataStore extends MetadataStoreTestBase {
|
|||
tagConfiguration(conf);
|
||||
DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore();
|
||||
try {
|
||||
ddbms.initialize(conf);
|
||||
ddbms.initialize(conf, new S3Guard.TtlTimeProvider(conf));
|
||||
Table table = verifyTableInitialized(tableName, ddbms.getDynamoDB());
|
||||
// check the tagging too
|
||||
verifyStoreTags(createTagMap(), ddbms);
|
||||
|
@ -718,7 +718,7 @@ public class ITestDynamoDBMetadataStore extends MetadataStoreTestBase {
|
|||
clearBucketOption(conf, b, S3GUARD_DDB_TABLE_NAME_KEY);
|
||||
conf.unset(S3GUARD_DDB_TABLE_CREATE_KEY);
|
||||
try (DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) {
|
||||
ddbms.initialize(s3afs);
|
||||
ddbms.initialize(s3afs, new S3Guard.TtlTimeProvider(conf));
|
||||
// if an exception was not raised, a table was created.
|
||||
// So destroy it before failing.
|
||||
ddbms.destroy();
|
||||
|
@ -820,8 +820,7 @@ public class ITestDynamoDBMetadataStore extends MetadataStoreTestBase {
|
|||
1024, false))
|
||||
);
|
||||
|
||||
ddbms.move(fullSourcePaths, pathsToCreate, getTtlTimeProvider(),
|
||||
bulkWrite);
|
||||
ddbms.move(fullSourcePaths, pathsToCreate, bulkWrite);
|
||||
bulkWrite.close();
|
||||
// assert that all the ancestors should have been populated automatically
|
||||
List<String> paths = Lists.newArrayList(
|
||||
|
@ -923,7 +922,7 @@ public class ITestDynamoDBMetadataStore extends MetadataStoreTestBase {
|
|||
enableOnDemand(conf);
|
||||
DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore();
|
||||
try {
|
||||
ddbms.initialize(s3afs);
|
||||
ddbms.initialize(s3afs, new S3Guard.TtlTimeProvider(conf));
|
||||
// we can list the empty table
|
||||
ddbms.listChildren(testPath);
|
||||
DynamoDB dynamoDB = ddbms.getDynamoDB();
|
||||
|
|
|
@ -148,7 +148,7 @@ public class ITestDynamoDBMetadataStoreScale
|
|||
conf.set(S3GUARD_DDB_BACKGROUND_SLEEP_MSEC_KEY, "5ms");
|
||||
|
||||
DynamoDBMetadataStore ms = new DynamoDBMetadataStore();
|
||||
ms.initialize(conf);
|
||||
ms.initialize(conf, new S3Guard.TtlTimeProvider(conf));
|
||||
// wire up the owner FS so that we can make assertions about throttle
|
||||
// events
|
||||
ms.bindToOwnerFilesystem(fs);
|
||||
|
@ -337,7 +337,7 @@ public class ITestDynamoDBMetadataStoreScale
|
|||
private void retryingDelete(final Path path) {
|
||||
try {
|
||||
ddbms.getInvoker().retry("Delete ", path.toString(), true,
|
||||
() -> ddbms.delete(path, new S3Guard.TtlTimeProvider(getConf())));
|
||||
() -> ddbms.delete(path));
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Failed to delete {}: ", path, e);
|
||||
}
|
||||
|
@ -432,7 +432,7 @@ public class ITestDynamoDBMetadataStoreScale
|
|||
OPERATIONS_PER_THREAD,
|
||||
expectThrottling(),
|
||||
() -> {
|
||||
ddbms.delete(path, time);
|
||||
ddbms.delete(path);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
|
@ -97,7 +97,7 @@ public class ITestS3GuardConcurrentOps extends AbstractS3ATestBase {
|
|||
|
||||
//now init the store; this should increment the ref count.
|
||||
DynamoDBMetadataStore ms = new DynamoDBMetadataStore();
|
||||
ms.initialize(fs);
|
||||
ms.initialize(fs, new S3Guard.TtlTimeProvider(conf));
|
||||
|
||||
// the ref count should have gone up
|
||||
assertEquals("Credential Ref count unchanged after initializing metastore "
|
||||
|
@ -145,7 +145,7 @@ public class ITestS3GuardConcurrentOps extends AbstractS3ATestBase {
|
|||
|
||||
Exception result = null;
|
||||
try (DynamoDBMetadataStore store = new DynamoDBMetadataStore()) {
|
||||
store.initialize(conf);
|
||||
store.initialize(conf, new S3Guard.TtlTimeProvider(conf));
|
||||
} catch (Exception e) {
|
||||
LOG.error(e.getClass() + ": " + e.getMessage());
|
||||
result = e;
|
||||
|
|
|
@ -151,7 +151,7 @@ public class ITestS3GuardToolDynamoDB extends AbstractS3GuardToolTestBase {
|
|||
|
||||
// Check. Should create new metadatastore with the table name set.
|
||||
try (DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) {
|
||||
ddbms.initialize(conf);
|
||||
ddbms.initialize(conf, new S3Guard.TtlTimeProvider(conf));
|
||||
ListTagsOfResourceRequest listTagsOfResourceRequest = new ListTagsOfResourceRequest()
|
||||
.withResourceArn(ddbms.getTable().getDescription().getTableArn());
|
||||
List<Tag> tags = ddbms.getAmazonDynamoDB().listTagsOfResource(listTagsOfResourceRequest).getTags();
|
||||
|
|
|
@ -126,7 +126,8 @@ public abstract class MetadataStoreTestBase extends HadoopTestBase {
|
|||
ms = contract.getMetadataStore();
|
||||
assertNotNull("null MetadataStore", ms);
|
||||
assertNotNull("null FileSystem", contract.getFileSystem());
|
||||
ms.initialize(contract.getFileSystem());
|
||||
ms.initialize(contract.getFileSystem(),
|
||||
new S3Guard.TtlTimeProvider(contract.getFileSystem().getConf()));
|
||||
ttlTimeProvider =
|
||||
new S3Guard.TtlTimeProvider(contract.getFileSystem().getConf());
|
||||
}
|
||||
|
@ -333,7 +334,7 @@ public abstract class MetadataStoreTestBase extends HadoopTestBase {
|
|||
public void testDelete() throws Exception {
|
||||
setUpDeleteTest();
|
||||
|
||||
ms.delete(strToPath("/ADirectory1/db1/file2"), ttlTimeProvider);
|
||||
ms.delete(strToPath("/ADirectory1/db1/file2"));
|
||||
|
||||
/* Ensure delete happened. */
|
||||
assertDirectorySize("/ADirectory1/db1", 1);
|
||||
|
@ -362,7 +363,7 @@ public abstract class MetadataStoreTestBase extends HadoopTestBase {
|
|||
if (!allowMissing()) {
|
||||
assertCached(p + "/ADirectory1/db1");
|
||||
}
|
||||
ms.deleteSubtree(strToPath(p + "/ADirectory1/db1/"), ttlTimeProvider);
|
||||
ms.deleteSubtree(strToPath(p + "/ADirectory1/db1/"));
|
||||
|
||||
assertEmptyDirectory(p + "/ADirectory1");
|
||||
assertDeleted(p + "/ADirectory1/db1");
|
||||
|
@ -382,7 +383,7 @@ public abstract class MetadataStoreTestBase extends HadoopTestBase {
|
|||
public void testDeleteRecursiveRoot() throws Exception {
|
||||
setUpDeleteTest();
|
||||
|
||||
ms.deleteSubtree(strToPath("/"), ttlTimeProvider);
|
||||
ms.deleteSubtree(strToPath("/"));
|
||||
assertDeleted("/ADirectory1");
|
||||
assertDeleted("/ADirectory2");
|
||||
assertDeleted("/ADirectory2/db1");
|
||||
|
@ -393,10 +394,10 @@ public abstract class MetadataStoreTestBase extends HadoopTestBase {
|
|||
@Test
|
||||
public void testDeleteNonExisting() throws Exception {
|
||||
// Path doesn't exist, but should silently succeed
|
||||
ms.delete(strToPath("/bobs/your/uncle"), ttlTimeProvider);
|
||||
ms.delete(strToPath("/bobs/your/uncle"));
|
||||
|
||||
// Ditto.
|
||||
ms.deleteSubtree(strToPath("/internets"), ttlTimeProvider);
|
||||
ms.deleteSubtree(strToPath("/internets"));
|
||||
}
|
||||
|
||||
|
||||
|
@ -434,7 +435,7 @@ public abstract class MetadataStoreTestBase extends HadoopTestBase {
|
|||
}
|
||||
|
||||
if (!(ms instanceof NullMetadataStore)) {
|
||||
ms.delete(strToPath(filePath), ttlTimeProvider);
|
||||
ms.delete(strToPath(filePath));
|
||||
meta = ms.get(strToPath(filePath));
|
||||
assertTrue("Tombstone not left for deleted file", meta.isDeleted());
|
||||
}
|
||||
|
@ -612,7 +613,7 @@ public abstract class MetadataStoreTestBase extends HadoopTestBase {
|
|||
destMetas.add(new PathMetadata(makeDirStatus("/b1")));
|
||||
destMetas.add(new PathMetadata(makeFileStatus("/b1/file1", 100)));
|
||||
destMetas.add(new PathMetadata(makeFileStatus("/b1/file2", 100)));
|
||||
ms.move(srcPaths, destMetas, ttlTimeProvider, null);
|
||||
ms.move(srcPaths, destMetas, null);
|
||||
|
||||
// Assert src is no longer there
|
||||
dirMeta = ms.listChildren(strToPath("/a1"));
|
||||
|
@ -662,11 +663,11 @@ public abstract class MetadataStoreTestBase extends HadoopTestBase {
|
|||
|
||||
// Make sure delete is correct as well
|
||||
if (!allowMissing()) {
|
||||
ms.delete(new Path(p2), ttlTimeProvider);
|
||||
ms.delete(new Path(p2));
|
||||
meta = ms.get(new Path(p1));
|
||||
assertNotNull("Path should not have been deleted", meta);
|
||||
}
|
||||
ms.delete(new Path(p1), ttlTimeProvider);
|
||||
ms.delete(new Path(p1));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -144,15 +144,15 @@ public abstract class AbstractITestS3AMetadataStoreScale extends
|
|||
toDelete = movedPaths;
|
||||
toCreate = origMetas;
|
||||
}
|
||||
ms.move(toDelete, toCreate, ttlTimeProvider, null);
|
||||
ms.move(toDelete, toCreate, null);
|
||||
}
|
||||
moveTimer.end();
|
||||
printTiming(LOG, "move", moveTimer, operations);
|
||||
} finally {
|
||||
// Cleanup
|
||||
clearMetadataStore(ms, count);
|
||||
ms.move(origPaths, null, ttlTimeProvider, null);
|
||||
ms.move(movedPaths, null, ttlTimeProvider, null);
|
||||
ms.move(origPaths, null, null);
|
||||
ms.move(movedPaths, null, null);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -215,7 +215,7 @@ public abstract class AbstractITestS3AMetadataStoreScale extends
|
|||
throws IOException {
|
||||
describe("Recursive deletion");
|
||||
NanoTimer deleteTimer = new NanoTimer();
|
||||
ms.deleteSubtree(BUCKET_ROOT, ttlTimeProvider);
|
||||
ms.deleteSubtree(BUCKET_ROOT);
|
||||
deleteTimer.end();
|
||||
printTiming(LOG, "delete", deleteTimer, count);
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.fs.s3a.scale;
|
|||
|
||||
import org.apache.hadoop.fs.s3a.s3guard.LocalMetadataStore;
|
||||
import org.apache.hadoop.fs.s3a.s3guard.MetadataStore;
|
||||
import org.apache.hadoop.fs.s3a.s3guard.S3Guard;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -31,7 +32,7 @@ public class ITestLocalMetadataStoreScale
|
|||
@Override
|
||||
public MetadataStore createMetadataStore() throws IOException {
|
||||
MetadataStore ms = new LocalMetadataStore();
|
||||
ms.initialize(getFileSystem());
|
||||
ms.initialize(getFileSystem(), new S3Guard.TtlTimeProvider(getConf()));
|
||||
return ms;
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue