HADOOP-16383. Pass ITtlTimeProvider instance in initialize method in MetadataStore interface. Contributed by Gabor Bota. (#1009)

This commit is contained in:
Gabor Bota 2019-07-17 16:24:39 +02:00 committed by GitHub
parent 256fcc160e
commit c58e11bf52
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 159 additions and 151 deletions

View File

@ -396,7 +396,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
DEFAULT_METADATASTORE_METADATA_TTL, TimeUnit.MILLISECONDS); DEFAULT_METADATASTORE_METADATA_TTL, TimeUnit.MILLISECONDS);
ttlTimeProvider = new S3Guard.TtlTimeProvider(authDirTtl); ttlTimeProvider = new S3Guard.TtlTimeProvider(authDirTtl);
setMetadataStore(S3Guard.getMetadataStore(this)); setMetadataStore(S3Guard.getMetadataStore(this, ttlTimeProvider));
allowAuthoritativeMetadataStore = conf.getBoolean(METADATASTORE_AUTHORITATIVE, allowAuthoritativeMetadataStore = conf.getBoolean(METADATASTORE_AUTHORITATIVE,
DEFAULT_METADATASTORE_AUTHORITATIVE); DEFAULT_METADATASTORE_AUTHORITATIVE);
allowAuthoritativePaths = S3Guard.getAuthoritativePaths(this); allowAuthoritativePaths = S3Guard.getAuthoritativePaths(this);
@ -1767,7 +1767,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
instrumentation.directoryDeleted(); instrumentation.directoryDeleted();
} }
deleteObject(key); deleteObject(key);
metadataStore.delete(f, ttlTimeProvider); metadataStore.delete(f);
} }
/** /**
@ -2293,7 +2293,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
} }
try(DurationInfo ignored = try(DurationInfo ignored =
new DurationInfo(LOG, false, "Delete metastore")) { new DurationInfo(LOG, false, "Delete metastore")) {
metadataStore.deleteSubtree(f, ttlTimeProvider); metadataStore.deleteSubtree(f);
} }
} else { } else {
LOG.debug("delete: Path is a file: {}", key); LOG.debug("delete: Path is a file: {}", key);
@ -4066,6 +4066,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
@VisibleForTesting @VisibleForTesting
protected void setTtlTimeProvider(ITtlTimeProvider ttlTimeProvider) { protected void setTtlTimeProvider(ITtlTimeProvider ttlTimeProvider) {
this.ttlTimeProvider = ttlTimeProvider; this.ttlTimeProvider = ttlTimeProvider;
metadataStore.setTtlTimeProvider(ttlTimeProvider);
} }
/** /**

View File

@ -178,7 +178,7 @@ public final class MultiObjectDeleteSupport extends AbstractStoreOperation {
// metastore entries // metastore entries
deleted.forEach(path -> { deleted.forEach(path -> {
try { try {
metadataStore.delete(path, getStoreContext().getTimeProvider()); metadataStore.delete(path);
} catch (IOException e) { } catch (IOException e) {
// trouble: we failed to delete the far end entry // trouble: we failed to delete the far end entry
// try with the next one. // try with the next one.

View File

@ -135,9 +135,7 @@ public class DelayedUpdateRenameTracker extends RenameTracker {
@Override @Override
public void completeRename() throws IOException { public void completeRename() throws IOException {
metadataStore.move(sourcePaths, destMetas, metadataStore.move(sourcePaths, destMetas, getOperationState());
getStoreContext().getTimeProvider(),
getOperationState());
super.completeRename(); super.completeRename();
} }
@ -147,12 +145,10 @@ public class DelayedUpdateRenameTracker extends RenameTracker {
try (DurationInfo ignored = new DurationInfo(LOG, try (DurationInfo ignored = new DurationInfo(LOG,
"Cleaning up deleted paths")) { "Cleaning up deleted paths")) {
// the destination paths are updated; the source is left alone. // the destination paths are updated; the source is left alone.
metadataStore.move(new ArrayList<>(0), destMetas, metadataStore.move(new ArrayList<>(0), destMetas, getOperationState());
getStoreContext().getTimeProvider(),
getOperationState());
for (Path deletedPath : deletedPaths) { for (Path deletedPath : deletedPaths) {
// this is not ideal in that it may leave parent stuff around. // this is not ideal in that it may leave parent stuff around.
metadataStore.delete(deletedPath, getStoreContext().getTimeProvider()); metadataStore.delete(deletedPath);
} }
deleteParentPaths(); deleteParentPaths();
} catch (IOException | SdkBaseException e) { } catch (IOException | SdkBaseException e) {
@ -185,7 +181,7 @@ public class DelayedUpdateRenameTracker extends RenameTracker {
PathMetadata md = metadataStore.get(parent, true); PathMetadata md = metadataStore.get(parent, true);
if (md != null && md.isEmptyDirectory() == Tristate.TRUE) { if (md != null && md.isEmptyDirectory() == Tristate.TRUE) {
// if were confident that this is empty: delete it. // if were confident that this is empty: delete it.
metadataStore.delete(parent, getStoreContext().getTimeProvider()); metadataStore.delete(parent);
} }
} }
} }

View File

@ -200,8 +200,8 @@ import static org.apache.hadoop.fs.s3a.s3guard.S3Guard.*;
* sub-tree. * sub-tree.
* *
* Some mutating operations, notably * Some mutating operations, notably
* {@link MetadataStore#deleteSubtree(Path, ITtlTimeProvider)} and * {@link MetadataStore#deleteSubtree(Path)} and
* {@link MetadataStore#move(Collection, Collection, ITtlTimeProvider, BulkOperationState)} * {@link MetadataStore#move(Collection, Collection, BulkOperationState)}
* are less efficient with this schema. * are less efficient with this schema.
* They require mutating multiple items in the DynamoDB table. * They require mutating multiple items in the DynamoDB table.
* *
@ -356,7 +356,7 @@ public class DynamoDBMetadataStore implements MetadataStore,
* Time source. This is used during writes when parent * Time source. This is used during writes when parent
* entries need to be created. * entries need to be created.
*/ */
private ITtlTimeProvider timeProvider; private ITtlTimeProvider ttlTimeProvider;
/** /**
* A utility function to create DynamoDB instance. * A utility function to create DynamoDB instance.
@ -391,11 +391,13 @@ public class DynamoDBMetadataStore implements MetadataStore,
* FS via {@link S3AFileSystem#shareCredentials(String)}; this will * FS via {@link S3AFileSystem#shareCredentials(String)}; this will
* increment the reference counter of these credentials. * increment the reference counter of these credentials.
* @param fs {@code S3AFileSystem} associated with the MetadataStore * @param fs {@code S3AFileSystem} associated with the MetadataStore
* @param ttlTp the time provider to use for metadata expiry
* @throws IOException on a failure * @throws IOException on a failure
*/ */
@Override @Override
@Retries.OnceRaw @Retries.OnceRaw
public void initialize(FileSystem fs) throws IOException { public void initialize(FileSystem fs, ITtlTimeProvider ttlTp)
throws IOException {
Preconditions.checkNotNull(fs, "Null filesystem"); Preconditions.checkNotNull(fs, "Null filesystem");
Preconditions.checkArgument(fs instanceof S3AFileSystem, Preconditions.checkArgument(fs instanceof S3AFileSystem,
"DynamoDBMetadataStore only supports S3A filesystem."); "DynamoDBMetadataStore only supports S3A filesystem.");
@ -433,7 +435,7 @@ public class DynamoDBMetadataStore implements MetadataStore,
this::retryEvent this::retryEvent
); );
timeProvider = new S3Guard.TtlTimeProvider(conf); this.ttlTimeProvider = ttlTp;
initTable(); initTable();
instrumentation.initialized(); instrumentation.initialized();
@ -453,7 +455,7 @@ public class DynamoDBMetadataStore implements MetadataStore,
instrumentation = context.getInstrumentation().getS3GuardInstrumentation(); instrumentation = context.getInstrumentation().getS3GuardInstrumentation();
username = context.getUsername(); username = context.getUsername();
executor = context.createThrottledExecutor(); executor = context.createThrottledExecutor();
timeProvider = Preconditions.checkNotNull( ttlTimeProvider = Preconditions.checkNotNull(
context.getTimeProvider(), context.getTimeProvider(),
"ttlTimeProvider must not be null"); "ttlTimeProvider must not be null");
} }
@ -468,7 +470,8 @@ public class DynamoDBMetadataStore implements MetadataStore,
* *
* This is used to operate the metadata store directly beyond the scope of the * This is used to operate the metadata store directly beyond the scope of the
* S3AFileSystem integration, e.g. command line tools. * S3AFileSystem integration, e.g. command line tools.
* Generally, callers should use {@link #initialize(FileSystem)} * Generally, callers should use
* {@link MetadataStore#initialize(FileSystem, ITtlTimeProvider)}
* with an initialized {@code S3AFileSystem} instance. * with an initialized {@code S3AFileSystem} instance.
* *
* Without a filesystem to act as a reference point, the configuration itself * Without a filesystem to act as a reference point, the configuration itself
@ -479,13 +482,14 @@ public class DynamoDBMetadataStore implements MetadataStore,
* using the base fs.s3a.* options, as there is no bucket to infer per-bucket * using the base fs.s3a.* options, as there is no bucket to infer per-bucket
* settings from. * settings from.
* *
* @see #initialize(FileSystem) * @see MetadataStore#initialize(FileSystem, ITtlTimeProvider)
* @throws IOException if there is an error * @throws IOException if there is an error
* @throws IllegalArgumentException if the configuration is incomplete * @throws IllegalArgumentException if the configuration is incomplete
*/ */
@Override @Override
@Retries.OnceRaw @Retries.OnceRaw
public void initialize(Configuration config) throws IOException { public void initialize(Configuration config,
ITtlTimeProvider ttlTp) throws IOException {
conf = config; conf = config;
// use the bucket as the DynamoDB table name if not specified in config // use the bucket as the DynamoDB table name if not specified in config
tableName = conf.getTrimmed(S3GUARD_DDB_TABLE_NAME_KEY); tableName = conf.getTrimmed(S3GUARD_DDB_TABLE_NAME_KEY);
@ -512,7 +516,7 @@ public class DynamoDBMetadataStore implements MetadataStore,
TimeUnit.SECONDS, TimeUnit.SECONDS,
"s3a-ddb-" + tableName); "s3a-ddb-" + tableName);
initDataAccessRetries(conf); initDataAccessRetries(conf);
timeProvider = new S3Guard.TtlTimeProvider(conf); this.ttlTimeProvider = ttlTp;
initTable(); initTable();
} }
@ -540,16 +544,16 @@ public class DynamoDBMetadataStore implements MetadataStore,
@Override @Override
@Retries.RetryTranslated @Retries.RetryTranslated
public void delete(Path path, ITtlTimeProvider ttlTimeProvider) public void delete(Path path)
throws IOException { throws IOException {
innerDelete(path, true, ttlTimeProvider, null); innerDelete(path, true, null);
} }
@Override @Override
@Retries.RetryTranslated @Retries.RetryTranslated
public void forgetMetadata(Path path) throws IOException { public void forgetMetadata(Path path) throws IOException {
LOG.debug("Forget metadata for {}", path); LOG.debug("Forget metadata for {}", path);
innerDelete(path, false, null, null); innerDelete(path, false, null);
} }
/** /**
@ -558,15 +562,12 @@ public class DynamoDBMetadataStore implements MetadataStore,
* There is no check as to whether the entry exists in the table first. * There is no check as to whether the entry exists in the table first.
* @param path path to delete * @param path path to delete
* @param tombstone flag to create a tombstone marker * @param tombstone flag to create a tombstone marker
* @param ttlTimeProvider The time provider to set last_updated. Must not
* be null if tombstone is true.
* @param ancestorState ancestor state for logging * @param ancestorState ancestor state for logging
* @throws IOException I/O error. * @throws IOException I/O error.
*/ */
@Retries.RetryTranslated @Retries.RetryTranslated
private void innerDelete(final Path path, private void innerDelete(final Path path,
final boolean tombstone, final boolean tombstone,
final ITtlTimeProvider ttlTimeProvider,
final AncestorState ancestorState) final AncestorState ancestorState)
throws IOException { throws IOException {
checkPath(path); checkPath(path);
@ -615,7 +616,7 @@ public class DynamoDBMetadataStore implements MetadataStore,
@Override @Override
@Retries.RetryTranslated @Retries.RetryTranslated
public void deleteSubtree(Path path, ITtlTimeProvider ttlTimeProvider) public void deleteSubtree(Path path)
throws IOException { throws IOException {
checkPath(path); checkPath(path);
LOG.debug("Deleting subtree from table {} in region {}: {}", LOG.debug("Deleting subtree from table {} in region {}: {}",
@ -639,7 +640,7 @@ public class DynamoDBMetadataStore implements MetadataStore,
desc.hasNext();) { desc.hasNext();) {
final Path pathToDelete = desc.next().getPath(); final Path pathToDelete = desc.next().getPath();
futures.add(submit(executor, () -> { futures.add(submit(executor, () -> {
innerDelete(pathToDelete, true, ttlTimeProvider, state); innerDelete(pathToDelete, true, state);
return null; return null;
})); }));
if (futures.size() > S3GUARD_DDB_SUBMITTED_TASK_LIMIT) { if (futures.size() > S3GUARD_DDB_SUBMITTED_TASK_LIMIT) {
@ -823,13 +824,11 @@ public class DynamoDBMetadataStore implements MetadataStore,
* Callers are required to synchronize on ancestorState. * Callers are required to synchronize on ancestorState.
* @param pathsToCreate paths to create * @param pathsToCreate paths to create
* @param ancestorState ongoing ancestor state. * @param ancestorState ongoing ancestor state.
* @param ttlTimeProvider Must not be null
* @return the full ancestry paths * @return the full ancestry paths
*/ */
private Collection<DDBPathMetadata> completeAncestry( private Collection<DDBPathMetadata> completeAncestry(
final Collection<DDBPathMetadata> pathsToCreate, final Collection<DDBPathMetadata> pathsToCreate,
final AncestorState ancestorState, final AncestorState ancestorState) throws PathIOException {
final ITtlTimeProvider ttlTimeProvider) throws PathIOException {
// Key on path to allow fast lookup // Key on path to allow fast lookup
Map<Path, DDBPathMetadata> ancestry = new HashMap<>(); Map<Path, DDBPathMetadata> ancestry = new HashMap<>();
LOG.debug("Completing ancestry for {} paths", pathsToCreate.size()); LOG.debug("Completing ancestry for {} paths", pathsToCreate.size());
@ -913,9 +912,7 @@ public class DynamoDBMetadataStore implements MetadataStore,
@SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter") @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
@Override @Override
@Retries.RetryTranslated @Retries.RetryTranslated
public void addAncestors( public void addAncestors(final Path qualifiedPath,
final Path qualifiedPath,
final ITtlTimeProvider ttlTimeProvider,
@Nullable final BulkOperationState operationState) throws IOException { @Nullable final BulkOperationState operationState) throws IOException {
Collection<DDBPathMetadata> newDirs = new ArrayList<>(); Collection<DDBPathMetadata> newDirs = new ArrayList<>();
@ -1000,10 +997,8 @@ public class DynamoDBMetadataStore implements MetadataStore,
*/ */
@Override @Override
@Retries.RetryTranslated @Retries.RetryTranslated
public void move( public void move(@Nullable Collection<Path> pathsToDelete,
@Nullable Collection<Path> pathsToDelete,
@Nullable Collection<PathMetadata> pathsToCreate, @Nullable Collection<PathMetadata> pathsToCreate,
final ITtlTimeProvider ttlTimeProvider,
@Nullable final BulkOperationState operationState) throws IOException { @Nullable final BulkOperationState operationState) throws IOException {
if (pathsToDelete == null && pathsToCreate == null) { if (pathsToDelete == null && pathsToCreate == null) {
return; return;
@ -1032,8 +1027,7 @@ public class DynamoDBMetadataStore implements MetadataStore,
newItems.addAll( newItems.addAll(
completeAncestry( completeAncestry(
pathMetaToDDBPathMeta(pathsToCreate), pathMetaToDDBPathMeta(pathsToCreate),
ancestorState, ancestorState));
extractTimeProvider(ttlTimeProvider)));
} }
} }
// sort all the new items topmost first. // sort all the new items topmost first.
@ -1222,7 +1216,7 @@ public class DynamoDBMetadataStore implements MetadataStore,
public void put( public void put(
final Collection<? extends PathMetadata> metas, final Collection<? extends PathMetadata> metas,
@Nullable final BulkOperationState operationState) throws IOException { @Nullable final BulkOperationState operationState) throws IOException {
innerPut(pathMetaToDDBPathMeta(metas), operationState, timeProvider); innerPut(pathMetaToDDBPathMeta(metas), operationState, ttlTimeProvider);
} }
/** /**
@ -1236,7 +1230,7 @@ public class DynamoDBMetadataStore implements MetadataStore,
* create entries in the table without parents. * create entries in the table without parents.
* @param metas metadata entries to write. * @param metas metadata entries to write.
* @param operationState (nullable) operational state for a bulk update * @param operationState (nullable) operational state for a bulk update
* @param ttlTimeProvider * @param ttlTp The time provider for metadata expiry
* @throws IOException failure. * @throws IOException failure.
*/ */
@SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter") @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
@ -1244,7 +1238,7 @@ public class DynamoDBMetadataStore implements MetadataStore,
private void innerPut( private void innerPut(
final Collection<DDBPathMetadata> metas, final Collection<DDBPathMetadata> metas,
@Nullable final BulkOperationState operationState, @Nullable final BulkOperationState operationState,
final ITtlTimeProvider ttlTimeProvider) throws IOException { final ITtlTimeProvider ttlTp) throws IOException {
if (metas.isEmpty()) { if (metas.isEmpty()) {
// Happens when someone calls put() with an empty list. // Happens when someone calls put() with an empty list.
LOG.debug("Ignoring empty list of entries to put"); LOG.debug("Ignoring empty list of entries to put");
@ -1258,7 +1252,7 @@ public class DynamoDBMetadataStore implements MetadataStore,
Item[] items; Item[] items;
synchronized (ancestorState) { synchronized (ancestorState) {
items = pathMetadataToItem( items = pathMetadataToItem(
completeAncestry(metas, ancestorState, ttlTimeProvider)); completeAncestry(metas, ancestorState));
} }
LOG.debug("Saving batch of {} items to table {}, region {}", items.length, LOG.debug("Saving batch of {} items to table {}, region {}", items.length,
tableName, region); tableName, region);
@ -1644,7 +1638,7 @@ public class DynamoDBMetadataStore implements MetadataStore,
try { try {
LOG.debug("innerPut on metas: {}", metas); LOG.debug("innerPut on metas: {}", metas);
if (!metas.isEmpty()) { if (!metas.isEmpty()) {
innerPut(metas, state, timeProvider); innerPut(metas, state, ttlTimeProvider);
} }
} catch (IOException e) { } catch (IOException e) {
String msg = String.format("IOException while setting false " String msg = String.format("IOException while setting false "
@ -2320,15 +2314,20 @@ public class DynamoDBMetadataStore implements MetadataStore,
return new AncestorState(this, operation, dest); return new AncestorState(this, operation, dest);
} }
@Override
public void setTtlTimeProvider(ITtlTimeProvider ttlTimeProvider) {
this.ttlTimeProvider = ttlTimeProvider;
}
/** /**
* Extract a time provider from the argument or fall back to the * Extract a time provider from the argument or fall back to the
* one in the constructor. * one in the constructor.
* @param ttlTimeProvider nullable time source passed in as an argument. * @param ttlTp nullable time source passed in as an argument.
* @return a non-null time source. * @return a non-null time source.
*/ */
private ITtlTimeProvider extractTimeProvider( private ITtlTimeProvider extractTimeProvider(
@Nullable ITtlTimeProvider ttlTimeProvider) { @Nullable ITtlTimeProvider ttlTp) {
return ttlTimeProvider != null ? ttlTimeProvider : timeProvider; return ttlTp != null ? ttlTp : this.ttlTimeProvider;
} }
/** /**

View File

@ -79,8 +79,11 @@ public class LocalMetadataStore implements MetadataStore {
private String username; private String username;
private ITtlTimeProvider ttlTimeProvider;
@Override @Override
public void initialize(FileSystem fileSystem) throws IOException { public void initialize(FileSystem fileSystem,
ITtlTimeProvider ttlTp) throws IOException {
Preconditions.checkNotNull(fileSystem); Preconditions.checkNotNull(fileSystem);
fs = fileSystem; fs = fileSystem;
URI fsURI = fs.getUri(); URI fsURI = fs.getUri();
@ -89,11 +92,12 @@ public class LocalMetadataStore implements MetadataStore {
uriHost = null; uriHost = null;
} }
initialize(fs.getConf()); initialize(fs.getConf(), ttlTp);
} }
@Override @Override
public void initialize(Configuration conf) throws IOException { public void initialize(Configuration conf, ITtlTimeProvider ttlTp)
throws IOException {
Preconditions.checkNotNull(conf); Preconditions.checkNotNull(conf);
int maxRecords = conf.getInt(S3GUARD_METASTORE_LOCAL_MAX_RECORDS, int maxRecords = conf.getInt(S3GUARD_METASTORE_LOCAL_MAX_RECORDS,
DEFAULT_S3GUARD_METASTORE_LOCAL_MAX_RECORDS); DEFAULT_S3GUARD_METASTORE_LOCAL_MAX_RECORDS);
@ -110,6 +114,7 @@ public class LocalMetadataStore implements MetadataStore {
localCache = builder.build(); localCache = builder.build();
username = UserGroupInformation.getCurrentUser().getShortUserName(); username = UserGroupInformation.getCurrentUser().getShortUserName();
this.ttlTimeProvider = ttlTp;
} }
@Override @Override
@ -122,7 +127,7 @@ public class LocalMetadataStore implements MetadataStore {
} }
@Override @Override
public void delete(Path p, ITtlTimeProvider ttlTimeProvider) public void delete(Path p)
throws IOException { throws IOException {
doDelete(p, false, true, ttlTimeProvider); doDelete(p, false, true, ttlTimeProvider);
} }
@ -133,23 +138,23 @@ public class LocalMetadataStore implements MetadataStore {
} }
@Override @Override
public void deleteSubtree(Path path, ITtlTimeProvider ttlTimeProvider) public void deleteSubtree(Path path)
throws IOException { throws IOException {
doDelete(path, true, true, ttlTimeProvider); doDelete(path, true, true, ttlTimeProvider);
} }
private synchronized void doDelete(Path p, boolean recursive, private synchronized void doDelete(Path p, boolean recursive,
boolean tombstone, ITtlTimeProvider ttlTimeProvider) { boolean tombstone, ITtlTimeProvider ttlTp) {
Path path = standardize(p); Path path = standardize(p);
// Delete entry from file cache, then from cached parent directory, if any // Delete entry from file cache, then from cached parent directory, if any
deleteCacheEntries(path, tombstone, ttlTimeProvider); deleteCacheEntries(path, tombstone, ttlTp);
if (recursive) { if (recursive) {
// Remove all entries that have this dir as path prefix. // Remove all entries that have this dir as path prefix.
deleteEntryByAncestor(path, localCache, tombstone, ttlTimeProvider); deleteEntryByAncestor(path, localCache, tombstone, ttlTp);
} }
} }
@ -202,10 +207,8 @@ public class LocalMetadataStore implements MetadataStore {
} }
@Override @Override
public void move( public void move(@Nullable Collection<Path> pathsToDelete,
@Nullable Collection<Path> pathsToDelete,
@Nullable Collection<PathMetadata> pathsToCreate, @Nullable Collection<PathMetadata> pathsToCreate,
ITtlTimeProvider ttlTimeProvider,
@Nullable final BulkOperationState operationState) throws IOException { @Nullable final BulkOperationState operationState) throws IOException {
LOG.info("Move {} to {}", pathsToDelete, pathsToCreate); LOG.info("Move {} to {}", pathsToDelete, pathsToCreate);
@ -222,7 +225,7 @@ public class LocalMetadataStore implements MetadataStore {
// 1. Delete pathsToDelete // 1. Delete pathsToDelete
for (Path meta : pathsToDelete) { for (Path meta : pathsToDelete) {
LOG.debug("move: deleting metadata {}", meta); LOG.debug("move: deleting metadata {}", meta);
delete(meta, ttlTimeProvider); delete(meta);
} }
// 2. Create new destination path metadata // 2. Create new destination path metadata
@ -487,7 +490,7 @@ public class LocalMetadataStore implements MetadataStore {
* lock held. * lock held.
*/ */
private void deleteCacheEntries(Path path, boolean tombstone, private void deleteCacheEntries(Path path, boolean tombstone,
ITtlTimeProvider ttlTimeProvider) { ITtlTimeProvider ttlTp) {
LocalMetadataEntry entry = localCache.getIfPresent(path); LocalMetadataEntry entry = localCache.getIfPresent(path);
// If there's no entry, delete should silently succeed // If there's no entry, delete should silently succeed
// (based on MetadataStoreTestBase#testDeleteNonExisting) // (based on MetadataStoreTestBase#testDeleteNonExisting)
@ -501,7 +504,7 @@ public class LocalMetadataStore implements MetadataStore {
if(entry.hasPathMeta()){ if(entry.hasPathMeta()){
if (tombstone) { if (tombstone) {
PathMetadata pmd = PathMetadata.tombstone(path); PathMetadata pmd = PathMetadata.tombstone(path);
pmd.setLastUpdated(ttlTimeProvider.getNow()); pmd.setLastUpdated(ttlTp.getNow());
entry.setPathMetadata(pmd); entry.setPathMetadata(pmd);
} else { } else {
entry.setPathMetadata(null); entry.setPathMetadata(null);
@ -528,7 +531,7 @@ public class LocalMetadataStore implements MetadataStore {
LOG.debug("removing parent's entry for {} ", path); LOG.debug("removing parent's entry for {} ", path);
if (tombstone) { if (tombstone) {
dir.markDeleted(path); dir.markDeleted(path);
dir.setLastUpdated(ttlTimeProvider.getNow()); dir.setLastUpdated(ttlTp.getNow());
} else { } else {
dir.remove(path); dir.remove(path);
} }
@ -594,9 +597,13 @@ public class LocalMetadataStore implements MetadataStore {
null); null);
} }
@Override
public void setTtlTimeProvider(ITtlTimeProvider ttlTimeProvider) {
this.ttlTimeProvider = ttlTimeProvider;
}
@Override @Override
public void addAncestors(final Path qualifiedPath, public void addAncestors(final Path qualifiedPath,
ITtlTimeProvider ttlTimeProvider,
@Nullable final BulkOperationState operationState) throws IOException { @Nullable final BulkOperationState operationState) throws IOException {
Collection<PathMetadata> newDirs = new ArrayList<>(); Collection<PathMetadata> newDirs = new ArrayList<>();

View File

@ -50,17 +50,21 @@ public interface MetadataStore extends Closeable {
* Performs one-time initialization of the metadata store. * Performs one-time initialization of the metadata store.
* *
* @param fs {@code FileSystem} associated with the MetadataStore * @param fs {@code FileSystem} associated with the MetadataStore
* @param ttlTimeProvider the time provider to use for metadata expiry
* @throws IOException if there is an error * @throws IOException if there is an error
*/ */
void initialize(FileSystem fs) throws IOException; void initialize(FileSystem fs, ITtlTimeProvider ttlTimeProvider)
throws IOException;
/** /**
* Performs one-time initialization of the metadata store via configuration. * Performs one-time initialization of the metadata store via configuration.
* @see #initialize(FileSystem) * @see #initialize(FileSystem, ITtlTimeProvider)
* @param conf Configuration. * @param conf Configuration.
* @param ttlTimeProvider the time provider to use for metadata expiry
* @throws IOException if there is an error * @throws IOException if there is an error
*/ */
void initialize(Configuration conf) throws IOException; void initialize(Configuration conf,
ITtlTimeProvider ttlTimeProvider) throws IOException;
/** /**
* Deletes exactly one path, leaving a tombstone to prevent lingering, * Deletes exactly one path, leaving a tombstone to prevent lingering,
@ -71,16 +75,14 @@ public interface MetadataStore extends Closeable {
* the lastUpdated field of the record has to be updated to <pre>now</pre>. * the lastUpdated field of the record has to be updated to <pre>now</pre>.
* *
* @param path the path to delete * @param path the path to delete
* @param ttlTimeProvider the time provider to set last_updated. Must not
* be null.
* @throws IOException if there is an error * @throws IOException if there is an error
*/ */
void delete(Path path, ITtlTimeProvider ttlTimeProvider) void delete(Path path)
throws IOException; throws IOException;
/** /**
* Removes the record of exactly one path. Does not leave a tombstone (see * Removes the record of exactly one path. Does not leave a tombstone (see
* {@link MetadataStore#delete(Path, ITtlTimeProvider)}. It is currently * {@link MetadataStore#delete(Path)}. It is currently
* intended for testing only, and a need to use it as part of normal * intended for testing only, and a need to use it as part of normal
* FileSystem usage is not anticipated. * FileSystem usage is not anticipated.
* *
@ -103,11 +105,9 @@ public interface MetadataStore extends Closeable {
* the lastUpdated field of all records have to be updated to <pre>now</pre>. * the lastUpdated field of all records have to be updated to <pre>now</pre>.
* *
* @param path the root of the sub-tree to delete * @param path the root of the sub-tree to delete
* @param ttlTimeProvider the time provider to set last_updated. Must not
* be null.
* @throws IOException if there is an error * @throws IOException if there is an error
*/ */
void deleteSubtree(Path path, ITtlTimeProvider ttlTimeProvider) void deleteSubtree(Path path)
throws IOException; throws IOException;
/** /**
@ -152,14 +152,11 @@ public interface MetadataStore extends Closeable {
* must have their last updated timestamps set through * must have their last updated timestamps set through
* {@link S3Guard#patchLastUpdated(Collection, ITtlTimeProvider)}. * {@link S3Guard#patchLastUpdated(Collection, ITtlTimeProvider)}.
* @param qualifiedPath path to update * @param qualifiedPath path to update
* @param timeProvider time provider for timestamps
* @param operationState (nullable) operational state for a bulk update * @param operationState (nullable) operational state for a bulk update
* @throws IOException failure * @throws IOException failure
*/ */
@RetryTranslated @RetryTranslated
void addAncestors( void addAncestors(Path qualifiedPath,
Path qualifiedPath,
@Nullable ITtlTimeProvider timeProvider,
@Nullable BulkOperationState operationState) throws IOException; @Nullable BulkOperationState operationState) throws IOException;
/** /**
@ -184,16 +181,12 @@ public interface MetadataStore extends Closeable {
* source directory tree of the move. * source directory tree of the move.
* @param pathsToCreate Collection of all PathMetadata for the new paths * @param pathsToCreate Collection of all PathMetadata for the new paths
* that were created at the destination of the rename(). * that were created at the destination of the rename().
* @param ttlTimeProvider the time provider to set last_updated. Must not
* be null.
* @param operationState Any ongoing state supplied to the rename tracker * @param operationState Any ongoing state supplied to the rename tracker
* which is to be passed in with each move operation. * which is to be passed in with each move operation.
* @throws IOException if there is an error * @throws IOException if there is an error
*/ */
void move( void move(@Nullable Collection<Path> pathsToDelete,
@Nullable Collection<Path> pathsToDelete,
@Nullable Collection<PathMetadata> pathsToCreate, @Nullable Collection<PathMetadata> pathsToCreate,
ITtlTimeProvider ttlTimeProvider,
@Nullable BulkOperationState operationState) throws IOException; @Nullable BulkOperationState operationState) throws IOException;
/** /**
@ -378,4 +371,13 @@ public interface MetadataStore extends Closeable {
return null; return null;
} }
/**
* The TtlTimeProvider has to be set during the initialization for the
* metadatastore, but this method can be used for testing, and change the
* instance during runtime.
*
* @param ttlTimeProvider
*/
void setTtlTimeProvider(ITtlTimeProvider ttlTimeProvider);
} }

View File

@ -40,11 +40,13 @@ import java.util.Map;
public class NullMetadataStore implements MetadataStore { public class NullMetadataStore implements MetadataStore {
@Override @Override
public void initialize(FileSystem fs) throws IOException { public void initialize(FileSystem fs, ITtlTimeProvider ttlTimeProvider)
throws IOException {
} }
@Override @Override
public void initialize(Configuration conf) throws IOException { public void initialize(Configuration conf, ITtlTimeProvider ttlTimeProvider)
throws IOException {
} }
@Override @Override
@ -52,7 +54,7 @@ public class NullMetadataStore implements MetadataStore {
} }
@Override @Override
public void delete(Path path, ITtlTimeProvider ttlTimeProvider) public void delete(Path path)
throws IOException { throws IOException {
} }
@ -61,7 +63,7 @@ public class NullMetadataStore implements MetadataStore {
} }
@Override @Override
public void deleteSubtree(Path path, ITtlTimeProvider ttlTimeProvider) public void deleteSubtree(Path path)
throws IOException { throws IOException {
} }
@ -84,7 +86,6 @@ public class NullMetadataStore implements MetadataStore {
@Override @Override
public void move(Collection<Path> pathsToDelete, public void move(Collection<Path> pathsToDelete,
Collection<PathMetadata> pathsToCreate, Collection<PathMetadata> pathsToCreate,
ITtlTimeProvider ttlTimeProvider,
final BulkOperationState operationState) throws IOException { final BulkOperationState operationState) throws IOException {
} }
@ -146,9 +147,12 @@ public class NullMetadataStore implements MetadataStore {
return new NullRenameTracker(storeContext, source, dest, this); return new NullRenameTracker(storeContext, source, dest, this);
} }
@Override
public void setTtlTimeProvider(ITtlTimeProvider ttlTimeProvider) {
}
@Override @Override
public void addAncestors(final Path qualifiedPath, public void addAncestors(final Path qualifiedPath,
final ITtlTimeProvider timeProvider,
@Nullable final BulkOperationState operationState) throws IOException { @Nullable final BulkOperationState operationState) throws IOException {
} }

View File

@ -155,9 +155,7 @@ public class ProgressiveRenameTracker extends RenameTracker {
// no entries are deleted at this point. // no entries are deleted at this point.
try (DurationInfo ignored = new DurationInfo(LOG, false, try (DurationInfo ignored = new DurationInfo(LOG, false,
"Adding new metastore entries")) { "Adding new metastore entries")) {
store.move(null, entriesToAdd, store.move(null, entriesToAdd, getOperationState());
getStoreContext().getTimeProvider(),
getOperationState());
} }
} }
@ -199,9 +197,7 @@ public class ProgressiveRenameTracker extends RenameTracker {
// ...so update the store. // ...so update the store.
try (DurationInfo ignored = new DurationInfo(LOG, false, try (DurationInfo ignored = new DurationInfo(LOG, false,
"adding %s metastore entries", entriesToAdd.size())) { "adding %s metastore entries", entriesToAdd.size())) {
store.move(null, entriesToAdd, store.move(null, entriesToAdd, getOperationState());
getStoreContext().getTimeProvider(),
getOperationState());
} }
} }
@ -218,9 +214,7 @@ public class ProgressiveRenameTracker extends RenameTracker {
getSourceRoot(), getSourceRoot(),
getDest(), getDest(),
getOwner()); getOwner());
getMetadataStore().move(toDelete, toAdd, getMetadataStore().move(toDelete, toAdd, getOperationState());
getStoreContext().getTimeProvider(),
getOperationState());
} }
} }
@ -236,9 +230,7 @@ public class ProgressiveRenameTracker extends RenameTracker {
// delete the paths from the metastore // delete the paths from the metastore
try (DurationInfo ignored = new DurationInfo(LOG, false, try (DurationInfo ignored = new DurationInfo(LOG, false,
"delete %s metastore entries", paths.size())) { "delete %s metastore entries", paths.size())) {
getMetadataStore().move(paths, null, getMetadataStore().move(paths, null, getOperationState());
getStoreContext().getTimeProvider(),
getOperationState());
} }
} }

View File

@ -83,17 +83,19 @@ public final class S3Guard {
/** /**
* Create a new instance of the configured MetadataStore. * Create a new instance of the configured MetadataStore.
* The returned MetadataStore will have been initialized via * The returned MetadataStore will have been initialized via
* {@link MetadataStore#initialize(FileSystem)} by this function before * {@link MetadataStore#initialize(FileSystem, ITtlTimeProvider)}
* returning it. Callers must clean up by calling * by this function before returning it. Callers must clean up by calling
* {@link MetadataStore#close()} when done using the MetadataStore. * {@link MetadataStore#close()} when done using the MetadataStore.
* *
* @param fs FileSystem whose Configuration specifies which * @param fs FileSystem whose Configuration specifies which
* implementation to use. * implementation to use.
* @param ttlTimeProvider
* @return Reference to new MetadataStore. * @return Reference to new MetadataStore.
* @throws IOException if the metadata store cannot be instantiated * @throws IOException if the metadata store cannot be instantiated
*/ */
@Retries.OnceTranslated @Retries.OnceTranslated
public static MetadataStore getMetadataStore(FileSystem fs) public static MetadataStore getMetadataStore(FileSystem fs,
ITtlTimeProvider ttlTimeProvider)
throws IOException { throws IOException {
Preconditions.checkNotNull(fs); Preconditions.checkNotNull(fs);
Configuration conf = fs.getConf(); Configuration conf = fs.getConf();
@ -104,7 +106,7 @@ public final class S3Guard {
msInstance = ReflectionUtils.newInstance(msClass, conf); msInstance = ReflectionUtils.newInstance(msClass, conf);
LOG.debug("Using {} metadata store for {} filesystem", LOG.debug("Using {} metadata store for {} filesystem",
msClass.getSimpleName(), fs.getScheme()); msClass.getSimpleName(), fs.getScheme());
msInstance.initialize(fs); msInstance.initialize(fs, ttlTimeProvider);
return msInstance; return msInstance;
} catch (FileNotFoundException e) { } catch (FileNotFoundException e) {
// Don't log this exception as it means the table doesn't exist yet; // Don't log this exception as it means the table doesn't exist yet;
@ -521,7 +523,7 @@ public final class S3Guard {
/** /**
* This adds all new ancestors of a path as directories. * This adds all new ancestors of a path as directories.
* This forwards to * This forwards to
* {@link MetadataStore#addAncestors(Path, ITtlTimeProvider, BulkOperationState)}. * {@link MetadataStore#addAncestors(Path, BulkOperationState)}.
* <p> * <p>
* Originally it implemented the logic to probe for an add ancestors, * Originally it implemented the logic to probe for an add ancestors,
* but with the addition of a store-specific bulk operation state * but with the addition of a store-specific bulk operation state
@ -538,7 +540,7 @@ public final class S3Guard {
final Path qualifiedPath, final Path qualifiedPath,
final ITtlTimeProvider timeProvider, final ITtlTimeProvider timeProvider,
@Nullable final BulkOperationState operationState) throws IOException { @Nullable final BulkOperationState operationState) throws IOException {
metadataStore.addAncestors(qualifiedPath, timeProvider, operationState); metadataStore.addAncestors(qualifiedPath, operationState);
} }
/** /**

View File

@ -314,9 +314,9 @@ public abstract class S3GuardTool extends Configured implements Tool {
} }
if (filesystem == null) { if (filesystem == null) {
getStore().initialize(conf); getStore().initialize(conf, new S3Guard.TtlTimeProvider(conf));
} else { } else {
getStore().initialize(filesystem); getStore().initialize(filesystem, new S3Guard.TtlTimeProvider(conf));
} }
LOG.info("Metadata store {} is initialized.", getStore()); LOG.info("Metadata store {} is initialized.", getStore());
return getStore(); return getStore();

View File

@ -22,6 +22,7 @@ import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import org.apache.hadoop.fs.s3a.s3guard.S3Guard;
import org.junit.Test; import org.junit.Test;
import org.junit.runner.RunWith; import org.junit.runner.RunWith;
import org.junit.runners.Parameterized; import org.junit.runners.Parameterized;
@ -88,7 +89,8 @@ public class ITestS3AMetadataPersistenceException extends AbstractS3ATestBase {
ioException = new IOException(); ioException = new IOException();
IOExceptionMetadataStore metadataStore = IOExceptionMetadataStore metadataStore =
new IOExceptionMetadataStore(ioException); new IOExceptionMetadataStore(ioException);
metadataStore.initialize(getConfiguration()); metadataStore.initialize(getConfiguration(),
new S3Guard.TtlTimeProvider(getConfiguration()));
fs.setMetadataStore(metadataStore); fs.setMetadataStore(metadataStore);
} }

View File

@ -261,11 +261,13 @@ public class TestPartialDeleteFailures {
private final List<Path> created = new ArrayList<>(); private final List<Path> created = new ArrayList<>();
@Override @Override
public void initialize(final FileSystem fs) { public void initialize(final FileSystem fs,
ITtlTimeProvider ttlTimeProvider) {
} }
@Override @Override
public void initialize(final Configuration conf) { public void initialize(final Configuration conf,
ITtlTimeProvider ttlTimeProvider) {
} }
@Override @Override
@ -316,21 +318,18 @@ public class TestPartialDeleteFailures {
} }
@Override @Override
public void delete(final Path path, public void delete(final Path path) {
final ITtlTimeProvider ttlTimeProvider) {
deleted.add(path); deleted.add(path);
} }
@Override @Override
public void deleteSubtree(final Path path, public void deleteSubtree(final Path path) {
final ITtlTimeProvider ttlTimeProvider) {
} }
@Override @Override
public void move(@Nullable final Collection<Path> pathsToDelete, public void move(@Nullable final Collection<Path> pathsToDelete,
@Nullable final Collection<PathMetadata> pathsToCreate, @Nullable final Collection<PathMetadata> pathsToCreate,
final ITtlTimeProvider ttlTimeProvider,
@Nullable final BulkOperationState operationState) { @Nullable final BulkOperationState operationState) {
} }
@ -352,6 +351,10 @@ public class TestPartialDeleteFailures {
return null; return null;
} }
@Override
public void setTtlTimeProvider(ITtlTimeProvider ttlTimeProvider) {
}
@Override @Override
public Map<String, String> getDiagnostics() { public Map<String, String> getDiagnostics() {
return null; return null;
@ -384,7 +387,6 @@ public class TestPartialDeleteFailures {
@Override @Override
public void addAncestors(final Path qualifiedPath, public void addAncestors(final Path qualifiedPath,
final ITtlTimeProvider timeProvider,
@Nullable final BulkOperationState operationState) { @Nullable final BulkOperationState operationState) {
} }

View File

@ -210,7 +210,7 @@ public class ITestDynamoDBMetadataStore extends MetadataStoreTestBase {
enableOnDemand(conf); enableOnDemand(conf);
ddbmsStatic = new DynamoDBMetadataStore(); ddbmsStatic = new DynamoDBMetadataStore();
ddbmsStatic.initialize(conf); ddbmsStatic.initialize(conf, new S3Guard.TtlTimeProvider(conf));
} }
@AfterClass @AfterClass
@ -416,7 +416,7 @@ public class ITestDynamoDBMetadataStore extends MetadataStoreTestBase {
conf.set(S3GUARD_DDB_TABLE_NAME_KEY, tableName); conf.set(S3GUARD_DDB_TABLE_NAME_KEY, tableName);
DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore(); DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore();
try { try {
ddbms.initialize(s3afs); ddbms.initialize(s3afs, new S3Guard.TtlTimeProvider(conf));
verifyTableInitialized(tableName, ddbms.getDynamoDB()); verifyTableInitialized(tableName, ddbms.getDynamoDB());
assertNotNull(ddbms.getTable()); assertNotNull(ddbms.getTable());
assertEquals(tableName, ddbms.getTable().getTableName()); assertEquals(tableName, ddbms.getTable().getTableName());
@ -445,14 +445,14 @@ public class ITestDynamoDBMetadataStore extends MetadataStoreTestBase {
getFileSystem().getBucketLocation()); getFileSystem().getBucketLocation());
conf.unset(S3GUARD_DDB_REGION_KEY); conf.unset(S3GUARD_DDB_REGION_KEY);
try (DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) { try (DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) {
ddbms.initialize(conf); ddbms.initialize(conf, new S3Guard.TtlTimeProvider(conf));
fail("Should have failed because the table name is not set!"); fail("Should have failed because the table name is not set!");
} catch (IllegalArgumentException ignored) { } catch (IllegalArgumentException ignored) {
} }
// config table name // config table name
conf.set(S3GUARD_DDB_TABLE_NAME_KEY, tableName); conf.set(S3GUARD_DDB_TABLE_NAME_KEY, tableName);
try (DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) { try (DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) {
ddbms.initialize(conf); ddbms.initialize(conf, new S3Guard.TtlTimeProvider(conf));
fail("Should have failed because as the region is not set!"); fail("Should have failed because as the region is not set!");
} catch (IllegalArgumentException ignored) { } catch (IllegalArgumentException ignored) {
} }
@ -460,7 +460,7 @@ public class ITestDynamoDBMetadataStore extends MetadataStoreTestBase {
conf.set(S3GUARD_DDB_REGION_KEY, savedRegion); conf.set(S3GUARD_DDB_REGION_KEY, savedRegion);
DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore(); DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore();
try { try {
ddbms.initialize(conf); ddbms.initialize(conf, new S3Guard.TtlTimeProvider(conf));
verifyTableInitialized(tableName, ddbms.getDynamoDB()); verifyTableInitialized(tableName, ddbms.getDynamoDB());
assertNotNull(ddbms.getTable()); assertNotNull(ddbms.getTable());
assertEquals(tableName, ddbms.getTable().getTableName()); assertEquals(tableName, ddbms.getTable().getTableName());
@ -590,7 +590,7 @@ public class ITestDynamoDBMetadataStore extends MetadataStoreTestBase {
ThrottleTracker throttleTracker = new ThrottleTracker(ms); ThrottleTracker throttleTracker = new ThrottleTracker(ms);
try(DurationInfo ignored = new DurationInfo(LOG, true, try(DurationInfo ignored = new DurationInfo(LOG, true,
"Move")) { "Move")) {
ms.move(pathsToDelete, newMetas, getTtlTimeProvider(), state); ms.move(pathsToDelete, newMetas, state);
} }
LOG.info("Throttle status {}", throttleTracker); LOG.info("Throttle status {}", throttleTracker);
assertEquals("Number of children in source directory", assertEquals("Number of children in source directory",
@ -662,7 +662,7 @@ public class ITestDynamoDBMetadataStore extends MetadataStoreTestBase {
tagConfiguration(conf); tagConfiguration(conf);
DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore(); DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore();
try { try {
ddbms.initialize(conf); ddbms.initialize(conf, new S3Guard.TtlTimeProvider(conf));
Table table = verifyTableInitialized(tableName, ddbms.getDynamoDB()); Table table = verifyTableInitialized(tableName, ddbms.getDynamoDB());
// check the tagging too // check the tagging too
verifyStoreTags(createTagMap(), ddbms); verifyStoreTags(createTagMap(), ddbms);
@ -718,7 +718,7 @@ public class ITestDynamoDBMetadataStore extends MetadataStoreTestBase {
clearBucketOption(conf, b, S3GUARD_DDB_TABLE_NAME_KEY); clearBucketOption(conf, b, S3GUARD_DDB_TABLE_NAME_KEY);
conf.unset(S3GUARD_DDB_TABLE_CREATE_KEY); conf.unset(S3GUARD_DDB_TABLE_CREATE_KEY);
try (DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) { try (DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) {
ddbms.initialize(s3afs); ddbms.initialize(s3afs, new S3Guard.TtlTimeProvider(conf));
// if an exception was not raised, a table was created. // if an exception was not raised, a table was created.
// So destroy it before failing. // So destroy it before failing.
ddbms.destroy(); ddbms.destroy();
@ -820,8 +820,7 @@ public class ITestDynamoDBMetadataStore extends MetadataStoreTestBase {
1024, false)) 1024, false))
); );
ddbms.move(fullSourcePaths, pathsToCreate, getTtlTimeProvider(), ddbms.move(fullSourcePaths, pathsToCreate, bulkWrite);
bulkWrite);
bulkWrite.close(); bulkWrite.close();
// assert that all the ancestors should have been populated automatically // assert that all the ancestors should have been populated automatically
List<String> paths = Lists.newArrayList( List<String> paths = Lists.newArrayList(
@ -923,7 +922,7 @@ public class ITestDynamoDBMetadataStore extends MetadataStoreTestBase {
enableOnDemand(conf); enableOnDemand(conf);
DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore(); DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore();
try { try {
ddbms.initialize(s3afs); ddbms.initialize(s3afs, new S3Guard.TtlTimeProvider(conf));
// we can list the empty table // we can list the empty table
ddbms.listChildren(testPath); ddbms.listChildren(testPath);
DynamoDB dynamoDB = ddbms.getDynamoDB(); DynamoDB dynamoDB = ddbms.getDynamoDB();

View File

@ -148,7 +148,7 @@ public class ITestDynamoDBMetadataStoreScale
conf.set(S3GUARD_DDB_BACKGROUND_SLEEP_MSEC_KEY, "5ms"); conf.set(S3GUARD_DDB_BACKGROUND_SLEEP_MSEC_KEY, "5ms");
DynamoDBMetadataStore ms = new DynamoDBMetadataStore(); DynamoDBMetadataStore ms = new DynamoDBMetadataStore();
ms.initialize(conf); ms.initialize(conf, new S3Guard.TtlTimeProvider(conf));
// wire up the owner FS so that we can make assertions about throttle // wire up the owner FS so that we can make assertions about throttle
// events // events
ms.bindToOwnerFilesystem(fs); ms.bindToOwnerFilesystem(fs);
@ -337,7 +337,7 @@ public class ITestDynamoDBMetadataStoreScale
private void retryingDelete(final Path path) { private void retryingDelete(final Path path) {
try { try {
ddbms.getInvoker().retry("Delete ", path.toString(), true, ddbms.getInvoker().retry("Delete ", path.toString(), true,
() -> ddbms.delete(path, new S3Guard.TtlTimeProvider(getConf()))); () -> ddbms.delete(path));
} catch (IOException e) { } catch (IOException e) {
LOG.warn("Failed to delete {}: ", path, e); LOG.warn("Failed to delete {}: ", path, e);
} }
@ -432,7 +432,7 @@ public class ITestDynamoDBMetadataStoreScale
OPERATIONS_PER_THREAD, OPERATIONS_PER_THREAD,
expectThrottling(), expectThrottling(),
() -> { () -> {
ddbms.delete(path, time); ddbms.delete(path);
}); });
} }
} }

View File

@ -97,7 +97,7 @@ public class ITestS3GuardConcurrentOps extends AbstractS3ATestBase {
//now init the store; this should increment the ref count. //now init the store; this should increment the ref count.
DynamoDBMetadataStore ms = new DynamoDBMetadataStore(); DynamoDBMetadataStore ms = new DynamoDBMetadataStore();
ms.initialize(fs); ms.initialize(fs, new S3Guard.TtlTimeProvider(conf));
// the ref count should have gone up // the ref count should have gone up
assertEquals("Credential Ref count unchanged after initializing metastore " assertEquals("Credential Ref count unchanged after initializing metastore "
@ -145,7 +145,7 @@ public class ITestS3GuardConcurrentOps extends AbstractS3ATestBase {
Exception result = null; Exception result = null;
try (DynamoDBMetadataStore store = new DynamoDBMetadataStore()) { try (DynamoDBMetadataStore store = new DynamoDBMetadataStore()) {
store.initialize(conf); store.initialize(conf, new S3Guard.TtlTimeProvider(conf));
} catch (Exception e) { } catch (Exception e) {
LOG.error(e.getClass() + ": " + e.getMessage()); LOG.error(e.getClass() + ": " + e.getMessage());
result = e; result = e;

View File

@ -151,7 +151,7 @@ public class ITestS3GuardToolDynamoDB extends AbstractS3GuardToolTestBase {
// Check. Should create new metadatastore with the table name set. // Check. Should create new metadatastore with the table name set.
try (DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) { try (DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) {
ddbms.initialize(conf); ddbms.initialize(conf, new S3Guard.TtlTimeProvider(conf));
ListTagsOfResourceRequest listTagsOfResourceRequest = new ListTagsOfResourceRequest() ListTagsOfResourceRequest listTagsOfResourceRequest = new ListTagsOfResourceRequest()
.withResourceArn(ddbms.getTable().getDescription().getTableArn()); .withResourceArn(ddbms.getTable().getDescription().getTableArn());
List<Tag> tags = ddbms.getAmazonDynamoDB().listTagsOfResource(listTagsOfResourceRequest).getTags(); List<Tag> tags = ddbms.getAmazonDynamoDB().listTagsOfResource(listTagsOfResourceRequest).getTags();

View File

@ -126,7 +126,8 @@ public abstract class MetadataStoreTestBase extends HadoopTestBase {
ms = contract.getMetadataStore(); ms = contract.getMetadataStore();
assertNotNull("null MetadataStore", ms); assertNotNull("null MetadataStore", ms);
assertNotNull("null FileSystem", contract.getFileSystem()); assertNotNull("null FileSystem", contract.getFileSystem());
ms.initialize(contract.getFileSystem()); ms.initialize(contract.getFileSystem(),
new S3Guard.TtlTimeProvider(contract.getFileSystem().getConf()));
ttlTimeProvider = ttlTimeProvider =
new S3Guard.TtlTimeProvider(contract.getFileSystem().getConf()); new S3Guard.TtlTimeProvider(contract.getFileSystem().getConf());
} }
@ -333,7 +334,7 @@ public abstract class MetadataStoreTestBase extends HadoopTestBase {
public void testDelete() throws Exception { public void testDelete() throws Exception {
setUpDeleteTest(); setUpDeleteTest();
ms.delete(strToPath("/ADirectory1/db1/file2"), ttlTimeProvider); ms.delete(strToPath("/ADirectory1/db1/file2"));
/* Ensure delete happened. */ /* Ensure delete happened. */
assertDirectorySize("/ADirectory1/db1", 1); assertDirectorySize("/ADirectory1/db1", 1);
@ -362,7 +363,7 @@ public abstract class MetadataStoreTestBase extends HadoopTestBase {
if (!allowMissing()) { if (!allowMissing()) {
assertCached(p + "/ADirectory1/db1"); assertCached(p + "/ADirectory1/db1");
} }
ms.deleteSubtree(strToPath(p + "/ADirectory1/db1/"), ttlTimeProvider); ms.deleteSubtree(strToPath(p + "/ADirectory1/db1/"));
assertEmptyDirectory(p + "/ADirectory1"); assertEmptyDirectory(p + "/ADirectory1");
assertDeleted(p + "/ADirectory1/db1"); assertDeleted(p + "/ADirectory1/db1");
@ -382,7 +383,7 @@ public abstract class MetadataStoreTestBase extends HadoopTestBase {
public void testDeleteRecursiveRoot() throws Exception { public void testDeleteRecursiveRoot() throws Exception {
setUpDeleteTest(); setUpDeleteTest();
ms.deleteSubtree(strToPath("/"), ttlTimeProvider); ms.deleteSubtree(strToPath("/"));
assertDeleted("/ADirectory1"); assertDeleted("/ADirectory1");
assertDeleted("/ADirectory2"); assertDeleted("/ADirectory2");
assertDeleted("/ADirectory2/db1"); assertDeleted("/ADirectory2/db1");
@ -393,10 +394,10 @@ public abstract class MetadataStoreTestBase extends HadoopTestBase {
@Test @Test
public void testDeleteNonExisting() throws Exception { public void testDeleteNonExisting() throws Exception {
// Path doesn't exist, but should silently succeed // Path doesn't exist, but should silently succeed
ms.delete(strToPath("/bobs/your/uncle"), ttlTimeProvider); ms.delete(strToPath("/bobs/your/uncle"));
// Ditto. // Ditto.
ms.deleteSubtree(strToPath("/internets"), ttlTimeProvider); ms.deleteSubtree(strToPath("/internets"));
} }
@ -434,7 +435,7 @@ public abstract class MetadataStoreTestBase extends HadoopTestBase {
} }
if (!(ms instanceof NullMetadataStore)) { if (!(ms instanceof NullMetadataStore)) {
ms.delete(strToPath(filePath), ttlTimeProvider); ms.delete(strToPath(filePath));
meta = ms.get(strToPath(filePath)); meta = ms.get(strToPath(filePath));
assertTrue("Tombstone not left for deleted file", meta.isDeleted()); assertTrue("Tombstone not left for deleted file", meta.isDeleted());
} }
@ -612,7 +613,7 @@ public abstract class MetadataStoreTestBase extends HadoopTestBase {
destMetas.add(new PathMetadata(makeDirStatus("/b1"))); destMetas.add(new PathMetadata(makeDirStatus("/b1")));
destMetas.add(new PathMetadata(makeFileStatus("/b1/file1", 100))); destMetas.add(new PathMetadata(makeFileStatus("/b1/file1", 100)));
destMetas.add(new PathMetadata(makeFileStatus("/b1/file2", 100))); destMetas.add(new PathMetadata(makeFileStatus("/b1/file2", 100)));
ms.move(srcPaths, destMetas, ttlTimeProvider, null); ms.move(srcPaths, destMetas, null);
// Assert src is no longer there // Assert src is no longer there
dirMeta = ms.listChildren(strToPath("/a1")); dirMeta = ms.listChildren(strToPath("/a1"));
@ -662,11 +663,11 @@ public abstract class MetadataStoreTestBase extends HadoopTestBase {
// Make sure delete is correct as well // Make sure delete is correct as well
if (!allowMissing()) { if (!allowMissing()) {
ms.delete(new Path(p2), ttlTimeProvider); ms.delete(new Path(p2));
meta = ms.get(new Path(p1)); meta = ms.get(new Path(p1));
assertNotNull("Path should not have been deleted", meta); assertNotNull("Path should not have been deleted", meta);
} }
ms.delete(new Path(p1), ttlTimeProvider); ms.delete(new Path(p1));
} }
@Test @Test

View File

@ -144,15 +144,15 @@ public abstract class AbstractITestS3AMetadataStoreScale extends
toDelete = movedPaths; toDelete = movedPaths;
toCreate = origMetas; toCreate = origMetas;
} }
ms.move(toDelete, toCreate, ttlTimeProvider, null); ms.move(toDelete, toCreate, null);
} }
moveTimer.end(); moveTimer.end();
printTiming(LOG, "move", moveTimer, operations); printTiming(LOG, "move", moveTimer, operations);
} finally { } finally {
// Cleanup // Cleanup
clearMetadataStore(ms, count); clearMetadataStore(ms, count);
ms.move(origPaths, null, ttlTimeProvider, null); ms.move(origPaths, null, null);
ms.move(movedPaths, null, ttlTimeProvider, null); ms.move(movedPaths, null, null);
} }
} }
} }
@ -215,7 +215,7 @@ public abstract class AbstractITestS3AMetadataStoreScale extends
throws IOException { throws IOException {
describe("Recursive deletion"); describe("Recursive deletion");
NanoTimer deleteTimer = new NanoTimer(); NanoTimer deleteTimer = new NanoTimer();
ms.deleteSubtree(BUCKET_ROOT, ttlTimeProvider); ms.deleteSubtree(BUCKET_ROOT);
deleteTimer.end(); deleteTimer.end();
printTiming(LOG, "delete", deleteTimer, count); printTiming(LOG, "delete", deleteTimer, count);
} }

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.fs.s3a.scale;
import org.apache.hadoop.fs.s3a.s3guard.LocalMetadataStore; import org.apache.hadoop.fs.s3a.s3guard.LocalMetadataStore;
import org.apache.hadoop.fs.s3a.s3guard.MetadataStore; import org.apache.hadoop.fs.s3a.s3guard.MetadataStore;
import org.apache.hadoop.fs.s3a.s3guard.S3Guard;
import java.io.IOException; import java.io.IOException;
@ -31,7 +32,7 @@ public class ITestLocalMetadataStoreScale
@Override @Override
public MetadataStore createMetadataStore() throws IOException { public MetadataStore createMetadataStore() throws IOException {
MetadataStore ms = new LocalMetadataStore(); MetadataStore ms = new LocalMetadataStore();
ms.initialize(getFileSystem()); ms.initialize(getFileSystem(), new S3Guard.TtlTimeProvider(getConf()));
return ms; return ms;
} }
} }