HDFS-16599. Fix typo in hadoop-hdfs-rbf module (#4368). Contributed by fanshilun.

Signed-off-by: Ayush Saxena <ayushsaxena@apache.org>
This commit is contained in:
slfan1989 2022-05-30 13:29:17 -07:00 committed by GitHub
parent 21fa693d38
commit 5952934ad5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
35 changed files with 62 additions and 62 deletions

View File

@ -63,7 +63,7 @@
* dst sub-namespace with distcp. * dst sub-namespace with distcp.
* *
* 1. Move data from the source path to the destination path with distcp. * 1. Move data from the source path to the destination path with distcp.
* 2. Update the the mount entry. * 2. Update the mount entry.
* 3. Delete the source path to trash. * 3. Delete the source path to trash.
*/ */
public class RouterFedBalance extends Configured implements Tool { public class RouterFedBalance extends Configured implements Tool {
@ -77,7 +77,7 @@ public class RouterFedBalance extends Configured implements Tool {
private static final String TRASH_PROCEDURE = "trash-procedure"; private static final String TRASH_PROCEDURE = "trash-procedure";
/** /**
* This class helps building the balance job. * This class helps to build the balance job.
*/ */
private class Builder { private class Builder {
/* Force close all open files while there is no diff. */ /* Force close all open files while there is no diff. */

View File

@ -50,7 +50,7 @@ public interface FederationMBean {
/** /**
* Get the latest state of all routers. * Get the latest state of all routers.
* @return JSON with all of the known routers or null if failure. * @return JSON with all the known routers or null if failure.
*/ */
String getRouters(); String getRouters();

View File

@ -52,7 +52,7 @@ public class FederationRPCPerformanceMonitor implements RouterRpcMonitor {
/** Time for an operation to be received in the Router. */ /** Time for an operation to be received in the Router. */
private static final ThreadLocal<Long> START_TIME = new ThreadLocal<>(); private static final ThreadLocal<Long> START_TIME = new ThreadLocal<>();
/** Time for an operation to be send to the Namenode. */ /** Time for an operation to be sent to the Namenode. */
private static final ThreadLocal<Long> PROXY_TIME = new ThreadLocal<>(); private static final ThreadLocal<Long> PROXY_TIME = new ThreadLocal<>();
/** Configuration for the performance monitor. */ /** Configuration for the performance monitor. */

View File

@ -290,7 +290,7 @@ public String getMountTable() {
// Dump mount table entries information into JSON // Dump mount table entries information into JSON
for (MountTable entry : orderedMounts) { for (MountTable entry : orderedMounts) {
// Sumarize destinations // Summarize destinations
Set<String> nameservices = new LinkedHashSet<>(); Set<String> nameservices = new LinkedHashSet<>();
Set<String> paths = new LinkedHashSet<>(); Set<String> paths = new LinkedHashSet<>();
for (RemoteLocation location : entry.getDestinations()) { for (RemoteLocation location : entry.getDestinations()) {

View File

@ -124,7 +124,7 @@ public synchronized void release() {
*/ */
public synchronized void close(boolean force) { public synchronized void close(boolean force) {
if (!force && this.numThreads > 0) { if (!force && this.numThreads > 0) {
// this is an erroneous case but we have to close the connection // this is an erroneous case, but we have to close the connection
// anyway since there will be connection leak if we don't do so // anyway since there will be connection leak if we don't do so
// the connection has been moved out of the pool // the connection has been moved out of the pool
LOG.error("Active connection with {} handlers will be closed", LOG.error("Active connection with {} handlers will be closed",
@ -132,7 +132,7 @@ public synchronized void close(boolean force) {
} }
this.closed = true; this.closed = true;
Object proxy = this.client.getProxy(); Object proxy = this.client.getProxy();
// Nobody should be using this anymore so it should close right away // Nobody should be using this anymore, so it should close right away
RPC.stopProxy(proxy); RPC.stopProxy(proxy);
} }

View File

@ -135,12 +135,12 @@ public void start() {
this.creator.start(); this.creator.start();
// Schedule a task to remove stale connection pools and sockets // Schedule a task to remove stale connection pools and sockets
long recyleTimeMs = Math.min( long recycleTimeMs = Math.min(
poolCleanupPeriodMs, connectionCleanupPeriodMs); poolCleanupPeriodMs, connectionCleanupPeriodMs);
LOG.info("Cleaning every {} seconds", LOG.info("Cleaning every {} seconds",
TimeUnit.MILLISECONDS.toSeconds(recyleTimeMs)); TimeUnit.MILLISECONDS.toSeconds(recycleTimeMs));
this.cleaner.scheduleAtFixedRate( this.cleaner.scheduleAtFixedRate(
new CleanupTask(), 0, recyleTimeMs, TimeUnit.MILLISECONDS); new CleanupTask(), 0, recycleTimeMs, TimeUnit.MILLISECONDS);
// Mark the manager as running // Mark the manager as running
this.running = true; this.running = true;
@ -364,9 +364,9 @@ void cleanup(ConnectionPool pool) {
long timeSinceLastActive = Time.now() - pool.getLastActiveTime(); long timeSinceLastActive = Time.now() - pool.getLastActiveTime();
int total = pool.getNumConnections(); int total = pool.getNumConnections();
// Active is a transient status in many cases for a connection since // Active is a transient status in many cases for a connection since
// the handler thread uses the connection very quickly. Thus the number // the handler thread uses the connection very quickly. Thus, the number
// of connections with handlers using at the call time is constantly low. // of connections with handlers using at the call time is constantly low.
// Recently active is more lasting status and it shows how many // Recently active is more lasting status, and it shows how many
// connections have been used with a recent time period. (i.e. 30 seconds) // connections have been used with a recent time period. (i.e. 30 seconds)
int active = pool.getNumActiveConnectionsRecently(); int active = pool.getNumActiveConnectionsRecently();
float poolMinActiveRatio = pool.getMinActiveRatio(); float poolMinActiveRatio = pool.getMinActiveRatio();
@ -376,9 +376,9 @@ void cleanup(ConnectionPool pool) {
// The number should at least be 1 // The number should at least be 1
int targetConnectionsCount = Math.max(1, int targetConnectionsCount = Math.max(1,
(int)(poolMinActiveRatio * total) - active); (int)(poolMinActiveRatio * total) - active);
List<ConnectionContext> conns = List<ConnectionContext> connections =
pool.removeConnections(targetConnectionsCount); pool.removeConnections(targetConnectionsCount);
for (ConnectionContext conn : conns) { for (ConnectionContext conn : connections) {
conn.close(); conn.close();
} }
LOG.debug("Removed connection {} used {} seconds ago. " + LOG.debug("Removed connection {} used {} seconds ago. " +

View File

@ -231,7 +231,7 @@ public static ActiveNamenodeResolver newActiveNamenodeResolver(
} }
/** /**
* Add the the number of children for an existing HdfsFileStatus object. * Add the number of children for an existing HdfsFileStatus object.
* @param dirStatus HdfsfileStatus object. * @param dirStatus HdfsfileStatus object.
* @param children number of children to be added. * @param children number of children to be added.
* @return HdfsFileStatus with the number of children specified. * @return HdfsFileStatus with the number of children specified.

View File

@ -50,14 +50,14 @@ public MountTableRefresherThread(MountTableManager manager,
/** /**
* Refresh mount table cache of local and remote routers. Local and remote * Refresh mount table cache of local and remote routers. Local and remote
* routers will be refreshed differently. Lets understand what are the * routers will be refreshed differently. Let's understand what are the
* local and remote routers and refresh will be done differently on these * local and remote routers and refresh will be done differently on these
* routers. Suppose there are three routers R1, R2 and R3. User want to add * routers. Suppose there are three routers R1, R2 and R3. User want to add
* new mount table entry. He will connect to only one router, not all the * new mount table entry. He will connect to only one router, not all the
* routers. Suppose He connects to R1 and calls add mount table entry through * routers. Suppose He connects to R1 and calls add mount table entry through
* API or CLI. Now in this context R1 is local router, R2 and R3 are remote * API or CLI. Now in this context R1 is local router, R2 and R3 are remote
* routers. Because add mount table entry is invoked on R1, R1 will update the * routers. Because add mount table entry is invoked on R1, R1 will update the
* cache locally it need not to make RPC call. But R1 will make RPC calls to * cache locally it need not make RPC call. But R1 will make RPC calls to
* update cache on R2 and R3. * update cache on R2 and R3.
*/ */
@Override @Override

View File

@ -255,7 +255,7 @@ public class RBFConfigKeys extends CommonConfigurationKeysPublic {
TimeUnit.MINUTES.toMillis(1); TimeUnit.MINUTES.toMillis(1);
/** /**
* Remote router mount table cache is updated through RouterClient(RPC call). * Remote router mount table cache is updated through RouterClient(RPC call).
* To improve performance, RouterClient connections are cached but it should * To improve performance, RouterClient connections are cached, but it should
* not be kept in cache forever. This property defines the max time a * not be kept in cache forever. This property defines the max time a
* connection can be cached. * connection can be cached.
*/ */

View File

@ -36,7 +36,7 @@ public class RemoteMethod {
private static final Logger LOG = LoggerFactory.getLogger(RemoteMethod.class); private static final Logger LOG = LoggerFactory.getLogger(RemoteMethod.class);
/** List of parameters: static and dynamic values, matchings types. */ /** List of parameters: static and dynamic values, matching types. */
private final Object[] params; private final Object[] params;
/** List of method parameters types, matches parameters. */ /** List of method parameters types, matches parameters. */
private final Class<?>[] types; private final Class<?>[] types;

View File

@ -104,7 +104,7 @@
import org.apache.hadoop.thirdparty.protobuf.BlockingService; import org.apache.hadoop.thirdparty.protobuf.BlockingService;
/** /**
* This class is responsible for handling all of the Admin calls to the HDFS * This class is responsible for handling all the Admin calls to the HDFS
* router. It is created, started, and stopped by {@link Router}. * router. It is created, started, and stopped by {@link Router}.
*/ */
public class RouterAdminServer extends AbstractService public class RouterAdminServer extends AbstractService

View File

@ -305,7 +305,7 @@ public HdfsFileStatus create(String src, FsPermission masked,
* Check if an exception is caused by an unavailable subcluster or not. It * Check if an exception is caused by an unavailable subcluster or not. It
* also checks the causes. * also checks the causes.
* @param ioe IOException to check. * @param ioe IOException to check.
* @return If caused by an unavailable subcluster. False if the should not be * @return If caused by an unavailable subcluster. False if they should not be
* retried (e.g., NSQuotaExceededException). * retried (e.g., NSQuotaExceededException).
*/ */
protected static boolean isUnavailableSubclusterException( protected static boolean isUnavailableSubclusterException(

View File

@ -200,7 +200,7 @@ private List<MountTable> getMountTableEntries() throws IOException {
* During this time, the quota usage cache will also be updated by * During this time, the quota usage cache will also be updated by
* quota manager: * quota manager:
* 1. Stale paths (entries) will be removed. * 1. Stale paths (entries) will be removed.
* 2. Existing entries will be override and updated. * 2. Existing entries will be overridden and updated.
* @return List of mount tables which quota was set. * @return List of mount tables which quota was set.
* @throws IOException * @throws IOException
*/ */

View File

@ -139,7 +139,7 @@ public class RouterRpcClient {
/** /**
* Create a router RPC client to manage remote procedure calls to NNs. * Create a router RPC client to manage remote procedure calls to NNs.
* *
* @param conf Hdfs Configuation. * @param conf Hdfs Configuration.
* @param router A router using this RPC client. * @param router A router using this RPC client.
* @param resolver A NN resolver to determine the currently active NN in HA. * @param resolver A NN resolver to determine the currently active NN in HA.
* @param monitor Optional performance monitor. * @param monitor Optional performance monitor.
@ -444,7 +444,7 @@ private RetryDecision shouldRetry(final IOException ioe, final int retryCount,
* @param ugi User group information. * @param ugi User group information.
* @param namenodes A prioritized list of namenodes within the same * @param namenodes A prioritized list of namenodes within the same
* nameservice. * nameservice.
* @param method Remote ClientProtcol method to invoke. * @param method Remote ClientProtocol method to invoke.
* @param params Variable list of parameters matching the method. * @param params Variable list of parameters matching the method.
* @return The result of invoking the method. * @return The result of invoking the method.
* @throws ConnectException If it cannot connect to any Namenode. * @throws ConnectException If it cannot connect to any Namenode.
@ -1027,7 +1027,7 @@ public <R extends RemoteLocationContext, T> RemoteResult invokeSequential(
} catch (Exception e) { } catch (Exception e) {
// Unusual error, ClientProtocol calls always use IOException (or // Unusual error, ClientProtocol calls always use IOException (or
// RemoteException). Re-wrap in IOException for compatibility with // RemoteException). Re-wrap in IOException for compatibility with
// ClientProtcol. // ClientProtocol.
LOG.error("Unexpected exception {} proxying {} to {}", LOG.error("Unexpected exception {} proxying {} to {}",
e.getClass(), m.getName(), ns, e); e.getClass(), m.getName(), ns, e);
IOException ioe = new IOException( IOException ioe = new IOException(
@ -1449,7 +1449,7 @@ public <T extends RemoteLocationContext, R> Map<T, R> invokeConcurrent(
results.add(new RemoteResult<>(location, ioe)); results.add(new RemoteResult<>(location, ioe));
} catch (ExecutionException ex) { } catch (ExecutionException ex) {
Throwable cause = ex.getCause(); Throwable cause = ex.getCause();
LOG.debug("Canot execute {} in {}: {}", LOG.debug("Cannot execute {} in {}: {}",
m.getName(), location, cause.getMessage()); m.getName(), location, cause.getMessage());
// Convert into IOException if needed // Convert into IOException if needed

View File

@ -54,7 +54,7 @@ void init(
/** /**
* Start proxying an operation to the Namenode. * Start proxying an operation to the Namenode.
* @return Id of the thread doing the proxying. * @return id of the thread doing the proxying.
*/ */
long proxyOp(); long proxyOp();

View File

@ -410,7 +410,7 @@ protected Response get(
* @param path Path to check. * @param path Path to check.
* @param op Operation to perform. * @param op Operation to perform.
* @param openOffset Offset for opening a file. * @param openOffset Offset for opening a file.
* @param excludeDatanodes Blocks to excluded. * @param excludeDatanodes Blocks to exclude.
* @param parameters Other parameters. * @param parameters Other parameters.
* @return Redirection URI. * @return Redirection URI.
* @throws URISyntaxException If it cannot parse the URI. * @throws URISyntaxException If it cannot parse the URI.

View File

@ -85,7 +85,7 @@ protected CachedRecordStore(Class<R> clazz, StateStoreDriver driver) {
* *
* @param clazz Class of the record to store. * @param clazz Class of the record to store.
* @param driver State Store driver. * @param driver State Store driver.
* @param over If the entries should be override if they expire * @param over If the entries should be overridden if they expire
*/ */
protected CachedRecordStore( protected CachedRecordStore(
Class<R> clazz, StateStoreDriver driver, boolean over) { Class<R> clazz, StateStoreDriver driver, boolean over) {
@ -153,7 +153,7 @@ public boolean loadCache(boolean force) throws IOException {
} }
/** /**
* Check if it's time to update the cache. Update it it was never updated. * Check if it's time to update the cache. Update it was never updated.
* *
* @return If it's time to update this cache. * @return If it's time to update this cache.
*/ */
@ -206,7 +206,7 @@ public void overrideExpiredRecords(QueryResult<R> query) throws IOException {
* Updates the state store with any record overrides we detected, such as an * Updates the state store with any record overrides we detected, such as an
* expired state. * expired state.
* *
* @param record Record record to be updated. * @param record record to be updated.
* @throws IOException If the values cannot be updated. * @throws IOException If the values cannot be updated.
*/ */
public void overrideExpiredRecord(R record) throws IOException { public void overrideExpiredRecord(R record) throws IOException {

View File

@ -331,7 +331,7 @@ public StateStoreDriver getDriver() {
} }
/** /**
* Fetch a unique identifier for this state store instance. Typically it is * Fetch a unique identifier for this state store instance. Typically, it is
* the address of the router. * the address of the router.
* *
* @return Unique identifier for this store. * @return Unique identifier for this store.

View File

@ -97,7 +97,7 @@ protected Configuration getConf() {
} }
/** /**
* Gets a unique identifier for the running task/process. Typically the * Gets a unique identifier for the running task/process. Typically, the
* router address. * router address.
* *
* @return Unique identifier for the running task. * @return Unique identifier for the running task.

View File

@ -22,7 +22,7 @@
import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreSerializer; import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreSerializer;
/** /**
* API request for retrieving a all non-expired router registrations present in * API request for retrieving an all non-expired router registrations present in
* the state store. * the state store.
*/ */
public abstract class GetRouterRegistrationsRequest { public abstract class GetRouterRegistrationsRequest {

View File

@ -92,7 +92,7 @@ public B getBuilder() {
/** /**
* Get the serialized proto object. If the translator was created from a byte * Get the serialized proto object. If the translator was created from a byte
* stream, returns the intitial byte stream. Otherwise creates a new byte * stream, returns the initial byte stream. Otherwise, creates a new byte
* stream from the cached builder. * stream from the cached builder.
* *
* @return Protobuf message object * @return Protobuf message object

View File

@ -193,7 +193,7 @@ private String getUsage(String cmd) {
/** /**
* Usage: validates the maximum number of arguments for a command. * Usage: validates the maximum number of arguments for a command.
* @param arg List of of command line parameters. * @param arg List of command line parameters.
*/ */
private void validateMax(String[] arg) { private void validateMax(String[] arg) {
if (arg[0].equals("-ls")) { if (arg[0].equals("-ls")) {
@ -407,7 +407,7 @@ public int run(String[] argv) throws Exception {
System.err.println(cmd.substring(1) + ": " + arge.getLocalizedMessage()); System.err.println(cmd.substring(1) + ": " + arge.getLocalizedMessage());
printUsage(cmd); printUsage(cmd);
} catch (RemoteException e) { } catch (RemoteException e) {
// This is a error returned by the server. // This is an error returned by the server.
// Print out the first line of the error message, ignore the stack trace. // Print out the first line of the error message, ignore the stack trace.
exitCode = -1; exitCode = -1;
debugException = e; debugException = e;
@ -807,7 +807,7 @@ public void listMounts(String[] argv, int i) throws IOException {
} else if (argv[i].equals("-d")) { // Check if -d parameter is specified. } else if (argv[i].equals("-d")) { // Check if -d parameter is specified.
detail = true; detail = true;
if (argv.length == 2) { if (argv.length == 2) {
path = "/"; // If no path is provide with -ls -d. path = "/"; // If no path is provided with -ls -d.
} else { } else {
path = argv[++i]; path = argv[++i];
} }

View File

@ -789,7 +789,7 @@ public void startCluster(Configuration overrideConf) {
Configuration nnConf = generateNamenodeConfiguration(ns0); Configuration nnConf = generateNamenodeConfiguration(ns0);
if (overrideConf != null) { if (overrideConf != null) {
nnConf.addResource(overrideConf); nnConf.addResource(overrideConf);
// Router also uses this configurations as initial values. // Router also uses these configurations as initial values.
routerConf = new Configuration(overrideConf); routerConf = new Configuration(overrideConf);
} }

View File

@ -137,7 +137,7 @@ public void updateActiveNamenode(
break; break;
} }
} }
// This operation modifies the list so we need to be careful // This operation modifies the list, so we need to be careful
synchronized(namenodes) { synchronized(namenodes) {
Collections.sort(namenodes, new NamenodePriorityComparator()); Collections.sort(namenodes, new NamenodePriorityComparator());
} }

View File

@ -297,7 +297,7 @@ public void testCacheUpdateOnNamenodeStateUpdate() throws IOException {
String rpcAddr = namenode.getRpcAddress(); String rpcAddr = namenode.getRpcAddress();
InetSocketAddress inetAddr = getInetSocketAddress(rpcAddr); InetSocketAddress inetAddr = getInetSocketAddress(rpcAddr);
// If the namenode state changes and it serves request, // If the namenode state changes, and it serves request,
// RouterRpcClient calls updateActiveNamenode to update the state to active, // RouterRpcClient calls updateActiveNamenode to update the state to active,
// Check whether correct updated state is returned post update. // Check whether correct updated state is returned post update.
namenodeResolver.updateActiveNamenode(NAMESERVICES[0], inetAddr); namenodeResolver.updateActiveNamenode(NAMESERVICES[0], inetAddr);

View File

@ -45,7 +45,7 @@
import org.junit.Test; import org.junit.Test;
/** /**
* The the safe mode for the {@link Router} controlled by * The safe mode for the {@link Router} controlled by
* {@link SafeModeTimer}. * {@link SafeModeTimer}.
*/ */
public class TestRouter { public class TestRouter {

View File

@ -742,7 +742,7 @@ public void testMountTablePermissions() throws Exception {
* @param mount * @param mount
* target mount table * target mount table
* @param canRead * @param canRead
* whether can list mount tables under specified mount * whether you can list mount tables under specified mount
* @param addCommandCode * @param addCommandCode
* expected return code of add command executed for specified mount * expected return code of add command executed for specified mount
* @param rmCommandCode * @param rmCommandCode
@ -1467,7 +1467,7 @@ public void testUpdateErrorCase() throws Exception {
err.toString().contains("update: /noMount doesn't exist.")); err.toString().contains("update: /noMount doesn't exist."));
err.reset(); err.reset();
// Check update if non true/false value is passed for readonly. // Check update if no true/false value is passed for readonly.
argv = new String[] {"-update", src, "-readonly", "check"}; argv = new String[] {"-update", src, "-readonly", "check"};
assertEquals(-1, ToolRunner.run(admin, argv)); assertEquals(-1, ToolRunner.run(admin, argv));
assertTrue(err.toString(), err.toString().contains("update: " assertTrue(err.toString(), err.toString().contains("update: "

View File

@ -582,7 +582,7 @@ public String toString() {
} }
/** /**
* Asserts that the results are the expected amount and it has both success * Asserts that the results are the expected amount, and it has both success
* and failure. * and failure.
* @param msg Message to show when the assertion fails. * @param msg Message to show when the assertion fails.
* @param expected Expected number of results. * @param expected Expected number of results.

View File

@ -308,9 +308,9 @@ public void run() {
TimeUnit.SECONDS); TimeUnit.SECONDS);
mountTableRefresherService.init(config); mountTableRefresherService.init(config);
// One router is not responding for 1 minute, still refresh should // One router is not responding for 1 minute, still refresh should
// finished in 5 second as cache update timeout is set 5 second. // finish in 5 second as cache update timeout is set 5 second.
mountTableRefresherService.refresh(); mountTableRefresherService.refresh();
// Test case timeout is assert for this test case. // Test case timeout is asserted for this test case.
} }
/** /**
@ -349,7 +349,7 @@ protected RouterClient createRouterClient(
mountTableRefresherService.refresh(); mountTableRefresherService.refresh();
assertNotEquals("No RouterClient is created.", 0, createCounter.get()); assertNotEquals("No RouterClient is created.", 0, createCounter.get());
/* /*
* Wait for clients to expire. Lets wait triple the cache eviction period. * Wait for clients to expire. Let's wait triple the cache eviction period.
* After cache eviction period all created client must be removed and * After cache eviction period all created client must be removed and
* closed. * closed.
*/ */

View File

@ -75,11 +75,11 @@ public void testGetChildrenPaths() {
public void testGetQuotaUsage() { public void testGetQuotaUsage() {
RouterQuotaUsage quotaGet; RouterQuotaUsage quotaGet;
// test case1: get quota with an non-exist path // test case1: get quota with a non-exist path
quotaGet = manager.getQuotaUsage("/non-exist-path"); quotaGet = manager.getQuotaUsage("/non-exist-path");
assertNull(quotaGet); assertNull(quotaGet);
// test case2: get quota from an no-quota set path // test case2: get quota from a no-quota set path
RouterQuotaUsage.Builder quota = new RouterQuotaUsage.Builder() RouterQuotaUsage.Builder quota = new RouterQuotaUsage.Builder()
.quota(HdfsConstants.QUOTA_RESET) .quota(HdfsConstants.QUOTA_RESET)
.spaceQuota(HdfsConstants.QUOTA_RESET); .spaceQuota(HdfsConstants.QUOTA_RESET);
@ -88,7 +88,7 @@ public void testGetQuotaUsage() {
// it should return null // it should return null
assertNull(quotaGet); assertNull(quotaGet);
// test case3: get quota from an quota-set path // test case3: get quota from a quota-set path
quota.quota(1); quota.quota(1);
quota.spaceQuota(HdfsConstants.QUOTA_RESET); quota.spaceQuota(HdfsConstants.QUOTA_RESET);
manager.put("/hasQuotaSet", quota.build()); manager.put("/hasQuotaSet", quota.build());
@ -96,24 +96,24 @@ public void testGetQuotaUsage() {
assertEquals(1, quotaGet.getQuota()); assertEquals(1, quotaGet.getQuota());
assertEquals(HdfsConstants.QUOTA_RESET, quotaGet.getSpaceQuota()); assertEquals(HdfsConstants.QUOTA_RESET, quotaGet.getSpaceQuota());
// test case4: get quota with an non-exist child path // test case4: get quota with a non-exist child path
quotaGet = manager.getQuotaUsage("/hasQuotaSet/file"); quotaGet = manager.getQuotaUsage("/hasQuotaSet/file");
// it will return the nearest ancestor which quota was set // it will return the nearest ancestor which quota was set
assertEquals(1, quotaGet.getQuota()); assertEquals(1, quotaGet.getQuota());
assertEquals(HdfsConstants.QUOTA_RESET, quotaGet.getSpaceQuota()); assertEquals(HdfsConstants.QUOTA_RESET, quotaGet.getSpaceQuota());
// test case5: get quota with an child path which its parent // test case5: get quota with a child path which its parent
// wasn't quota set // wasn't quota set
quota.quota(HdfsConstants.QUOTA_RESET); quota.quota(HdfsConstants.QUOTA_RESET);
quota.spaceQuota(HdfsConstants.QUOTA_RESET); quota.spaceQuota(HdfsConstants.QUOTA_RESET);
manager.put("/hasQuotaSet/noQuotaSet", quota.build()); manager.put("/hasQuotaSet/noQuotaSet", quota.build());
// here should returns the quota of path /hasQuotaSet // here should return the quota of path /hasQuotaSet
// (the nearest ancestor which quota was set) // (the nearest ancestor which quota was set)
quotaGet = manager.getQuotaUsage("/hasQuotaSet/noQuotaSet/file"); quotaGet = manager.getQuotaUsage("/hasQuotaSet/noQuotaSet/file");
assertEquals(1, quotaGet.getQuota()); assertEquals(1, quotaGet.getQuota());
assertEquals(HdfsConstants.QUOTA_RESET, quotaGet.getSpaceQuota()); assertEquals(HdfsConstants.QUOTA_RESET, quotaGet.getSpaceQuota());
// test case6: get quota with an child path which its parent was quota set // test case6: get quota with a child path which its parent was quota set
quota.quota(2); quota.quota(2);
quota.spaceQuota(HdfsConstants.QUOTA_RESET); quota.spaceQuota(HdfsConstants.QUOTA_RESET);
manager.put("/hasQuotaSet/hasQuotaSet", quota.build()); manager.put("/hasQuotaSet/hasQuotaSet", quota.build());

View File

@ -511,7 +511,7 @@ public void testIsMultiDestDir() throws Exception {
*/ */
@Test @Test
public void testSnapshotPathResolution() throws Exception { public void testSnapshotPathResolution() throws Exception {
// Create a mount entry with non isPathAll order, so as to call // Create a mount entry with non isPathAll order, to call
// invokeSequential. // invokeSequential.
Map<String, String> destMap = new HashMap<>(); Map<String, String> destMap = new HashMap<>();
destMap.put("ns0", "/tmp_ns0"); destMap.put("ns0", "/tmp_ns0");

View File

@ -75,7 +75,7 @@
import org.junit.Test; import org.junit.Test;
/** /**
* The the RPC interface of the {@link getRouter()} implemented by * The RPC interface of the {@link getRouter()} implemented by
* {@link RouterRpcServer}. * {@link RouterRpcServer}.
*/ */
public class TestRouterRpcMultiDestination extends TestRouterRpc { public class TestRouterRpcMultiDestination extends TestRouterRpc {

View File

@ -110,7 +110,7 @@ public void testMultiNodeTokenRemovalShortSyncWithoutWatch()
conf.setBoolean(ZK_DTSM_TOKEN_WATCHER_ENABLED, false); conf.setBoolean(ZK_DTSM_TOKEN_WATCHER_ENABLED, false);
// make sync quick // make sync quick
conf.setInt(ZK_DTSM_ROUTER_TOKEN_SYNC_INTERVAL, 3); conf.setInt(ZK_DTSM_ROUTER_TOKEN_SYNC_INTERVAL, 3);
// set the renew window and removal interval to be a // set the renewal window and removal interval to be a
// short time to trigger the background cleanup // short time to trigger the background cleanup
conf.setInt(RENEW_INTERVAL, 10); conf.setInt(RENEW_INTERVAL, 10);
conf.setInt(REMOVAL_SCAN_INTERVAL, 10); conf.setInt(REMOVAL_SCAN_INTERVAL, 10);
@ -170,7 +170,7 @@ public void testMultiNodeTokenRemovalLongSyncWithoutWatch()
conf.setBoolean(ZK_DTSM_TOKEN_WATCHER_ENABLED, false); conf.setBoolean(ZK_DTSM_TOKEN_WATCHER_ENABLED, false);
// make sync quick // make sync quick
conf.setInt(ZK_DTSM_ROUTER_TOKEN_SYNC_INTERVAL, 20); conf.setInt(ZK_DTSM_ROUTER_TOKEN_SYNC_INTERVAL, 20);
// set the renew window and removal interval to be a // set the renewal window and removal interval to be a
// short time to trigger the background cleanup // short time to trigger the background cleanup
conf.setInt(RENEW_INTERVAL, 10); conf.setInt(RENEW_INTERVAL, 10);
conf.setInt(REMOVAL_SCAN_INTERVAL, 10); conf.setInt(REMOVAL_SCAN_INTERVAL, 10);

View File

@ -186,7 +186,7 @@ public void testRegistrationMajorityQuorum()
// 1) ns0:nn0 - Standby (newest) // 1) ns0:nn0 - Standby (newest)
// 2) ns0:nn0 - Active (oldest) // 2) ns0:nn0 - Active (oldest)
// 3) ns0:nn0 - Active (2nd oldest) // 3) ns0:nn0 - Active (2nd oldest)
// 4) ns0:nn0 - Active (3nd oldest element, newest active element) // 4) ns0:nn0 - Active (3rd oldest element, newest active element)
// Verify the selected entry is the newest majority opinion (4) // Verify the selected entry is the newest majority opinion (4)
String ns = "ns0"; String ns = "ns0";
String nn = "nn0"; String nn = "nn0";

View File

@ -556,7 +556,7 @@ private static Map<String, Class<?>> getFields(BaseRecord record) {
} }
/** /**
* Get the type of a field. * Get the type of field.
* *
* @param fieldName * @param fieldName
* @return Field type * @return Field type
@ -601,7 +601,7 @@ private static Method locateGetter(BaseRecord record, String fieldName) {
} }
/** /**
* Expands a data object from the store into an record object. Default store * Expands a data object from the store into a record object. Default store
* data type is a String. Override if additional serialization is required. * data type is a String. Override if additional serialization is required.
* *
* @param data Object containing the serialized data. Only string is * @param data Object containing the serialized data. Only string is