diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/rbfbalance/RouterFedBalance.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/rbfbalance/RouterFedBalance.java index 4161ab503fe..0cb4b54bfc4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/rbfbalance/RouterFedBalance.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/rbfbalance/RouterFedBalance.java @@ -63,7 +63,7 @@ * dst sub-namespace with distcp. * * 1. Move data from the source path to the destination path with distcp. - * 2. Update the the mount entry. + * 2. Update the mount entry. * 3. Delete the source path to trash. */ public class RouterFedBalance extends Configured implements Tool { @@ -77,7 +77,7 @@ public class RouterFedBalance extends Configured implements Tool { private static final String TRASH_PROCEDURE = "trash-procedure"; /** - * This class helps building the balance job. + * This class helps to build the balance job. */ private class Builder { /* Force close all open files while there is no diff. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java index e8b00d0b5dc..ed3069af836 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java @@ -50,7 +50,7 @@ public interface FederationMBean { /** * Get the latest state of all routers. - * @return JSON with all of the known routers or null if failure. + * @return JSON with all the known routers or null if failure. */ String getRouters(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java index 5c6dac465fb..159d08e26a1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java @@ -52,7 +52,7 @@ public class FederationRPCPerformanceMonitor implements RouterRpcMonitor { /** Time for an operation to be received in the Router. */ private static final ThreadLocal START_TIME = new ThreadLocal<>(); - /** Time for an operation to be send to the Namenode. */ + /** Time for an operation to be sent to the Namenode. */ private static final ThreadLocal PROXY_TIME = new ThreadLocal<>(); /** Configuration for the performance monitor. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RBFMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RBFMetrics.java index d5eabd1a3da..be88069b491 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RBFMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RBFMetrics.java @@ -290,7 +290,7 @@ public String getMountTable() { // Dump mount table entries information into JSON for (MountTable entry : orderedMounts) { - // Sumarize destinations + // Summarize destinations Set nameservices = new LinkedHashSet<>(); Set paths = new LinkedHashSet<>(); for (RemoteLocation location : entry.getDestinations()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionContext.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionContext.java index 9a5434b91ce..bd2d8c9d697 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionContext.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionContext.java @@ -124,7 +124,7 @@ public synchronized void release() { */ public synchronized void close(boolean force) { if (!force && this.numThreads > 0) { - // this is an erroneous case but we have to close the connection + // this is an erroneous case, but we have to close the connection // anyway since there will be connection leak if we don't do so // the connection has been moved out of the pool LOG.error("Active connection with {} handlers will be closed", @@ -132,7 +132,7 @@ public synchronized void close(boolean force) { } this.closed = true; Object proxy = this.client.getProxy(); - // Nobody should be using this anymore so it should close right away + // Nobody should be using this anymore, so it should close right away RPC.stopProxy(proxy); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java index aad272f5831..5fe797bf5ce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java @@ -135,12 +135,12 @@ public void start() { this.creator.start(); // Schedule a task to remove stale connection pools and sockets - long recyleTimeMs = Math.min( + long recycleTimeMs = Math.min( poolCleanupPeriodMs, connectionCleanupPeriodMs); LOG.info("Cleaning every {} seconds", - TimeUnit.MILLISECONDS.toSeconds(recyleTimeMs)); + TimeUnit.MILLISECONDS.toSeconds(recycleTimeMs)); this.cleaner.scheduleAtFixedRate( - new CleanupTask(), 0, recyleTimeMs, TimeUnit.MILLISECONDS); + new CleanupTask(), 0, recycleTimeMs, TimeUnit.MILLISECONDS); // Mark the manager as running this.running = true; @@ -364,9 +364,9 @@ void cleanup(ConnectionPool pool) { long timeSinceLastActive = Time.now() - pool.getLastActiveTime(); int total = pool.getNumConnections(); // Active is a transient status in many cases for a connection since - // the handler thread uses the connection very quickly. Thus the number + // the handler thread uses the connection very quickly. Thus, the number // of connections with handlers using at the call time is constantly low. - // Recently active is more lasting status and it shows how many + // Recently active is more lasting status, and it shows how many // connections have been used with a recent time period. (i.e. 30 seconds) int active = pool.getNumActiveConnectionsRecently(); float poolMinActiveRatio = pool.getMinActiveRatio(); @@ -376,9 +376,9 @@ void cleanup(ConnectionPool pool) { // The number should at least be 1 int targetConnectionsCount = Math.max(1, (int)(poolMinActiveRatio * total) - active); - List conns = + List connections = pool.removeConnections(targetConnectionsCount); - for (ConnectionContext conn : conns) { + for (ConnectionContext conn : connections) { conn.close(); } LOG.debug("Removed connection {} used {} seconds ago. " + diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java index 7ff853946d7..e593e888c9a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java @@ -231,7 +231,7 @@ public static ActiveNamenodeResolver newActiveNamenodeResolver( } /** - * Add the the number of children for an existing HdfsFileStatus object. + * Add the number of children for an existing HdfsFileStatus object. * @param dirStatus HdfsfileStatus object. * @param children number of children to be added. * @return HdfsFileStatus with the number of children specified. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherThread.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherThread.java index a077c4b3f45..40ff843fa1d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherThread.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherThread.java @@ -50,14 +50,14 @@ public MountTableRefresherThread(MountTableManager manager, /** * Refresh mount table cache of local and remote routers. Local and remote - * routers will be refreshed differently. Lets understand what are the + * routers will be refreshed differently. Let's understand what are the * local and remote routers and refresh will be done differently on these * routers. Suppose there are three routers R1, R2 and R3. User want to add * new mount table entry. He will connect to only one router, not all the * routers. Suppose He connects to R1 and calls add mount table entry through * API or CLI. Now in this context R1 is local router, R2 and R3 are remote * routers. Because add mount table entry is invoked on R1, R1 will update the - * cache locally it need not to make RPC call. But R1 will make RPC calls to + * cache locally it need not make RPC call. But R1 will make RPC calls to * update cache on R2 and R3. */ @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java index 741e470c6fc..c0a9e3f294c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java @@ -255,7 +255,7 @@ public class RBFConfigKeys extends CommonConfigurationKeysPublic { TimeUnit.MINUTES.toMillis(1); /** * Remote router mount table cache is updated through RouterClient(RPC call). - * To improve performance, RouterClient connections are cached but it should + * To improve performance, RouterClient connections are cached, but it should * not be kept in cache forever. This property defines the max time a * connection can be cached. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteMethod.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteMethod.java index 6f1121ef9fd..e5df4893a91 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteMethod.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteMethod.java @@ -36,7 +36,7 @@ public class RemoteMethod { private static final Logger LOG = LoggerFactory.getLogger(RemoteMethod.class); - /** List of parameters: static and dynamic values, matchings types. */ + /** List of parameters: static and dynamic values, matching types. */ private final Object[] params; /** List of method parameters types, matches parameters. */ private final Class[] types; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java index 127470a1264..db1922ad808 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java @@ -104,7 +104,7 @@ import org.apache.hadoop.thirdparty.protobuf.BlockingService; /** - * This class is responsible for handling all of the Admin calls to the HDFS + * This class is responsible for handling all the Admin calls to the HDFS * router. It is created, started, and stopped by {@link Router}. */ public class RouterAdminServer extends AbstractService diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java index 469b16178a2..c1dafec9220 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java @@ -305,7 +305,7 @@ public HdfsFileStatus create(String src, FsPermission masked, * Check if an exception is caused by an unavailable subcluster or not. It * also checks the causes. * @param ioe IOException to check. - * @return If caused by an unavailable subcluster. False if the should not be + * @return If caused by an unavailable subcluster. False if they should not be * retried (e.g., NSQuotaExceededException). */ protected static boolean isUnavailableSubclusterException( diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java index a4a7d9e9ddc..e9b780d5bca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java @@ -200,7 +200,7 @@ private List getMountTableEntries() throws IOException { * During this time, the quota usage cache will also be updated by * quota manager: * 1. Stale paths (entries) will be removed. - * 2. Existing entries will be override and updated. + * 2. Existing entries will be overridden and updated. * @return List of mount tables which quota was set. * @throws IOException */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java index 34a2c47c3ef..ff90854ebb7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java @@ -139,7 +139,7 @@ public class RouterRpcClient { /** * Create a router RPC client to manage remote procedure calls to NNs. * - * @param conf Hdfs Configuation. + * @param conf Hdfs Configuration. * @param router A router using this RPC client. * @param resolver A NN resolver to determine the currently active NN in HA. * @param monitor Optional performance monitor. @@ -444,7 +444,7 @@ private RetryDecision shouldRetry(final IOException ioe, final int retryCount, * @param ugi User group information. * @param namenodes A prioritized list of namenodes within the same * nameservice. - * @param method Remote ClientProtcol method to invoke. + * @param method Remote ClientProtocol method to invoke. * @param params Variable list of parameters matching the method. * @return The result of invoking the method. * @throws ConnectException If it cannot connect to any Namenode. @@ -1027,7 +1027,7 @@ public RemoteResult invokeSequential( } catch (Exception e) { // Unusual error, ClientProtocol calls always use IOException (or // RemoteException). Re-wrap in IOException for compatibility with - // ClientProtcol. + // ClientProtocol. LOG.error("Unexpected exception {} proxying {} to {}", e.getClass(), m.getName(), ns, e); IOException ioe = new IOException( @@ -1449,7 +1449,7 @@ public Map invokeConcurrent( results.add(new RemoteResult<>(location, ioe)); } catch (ExecutionException ex) { Throwable cause = ex.getCause(); - LOG.debug("Canot execute {} in {}: {}", + LOG.debug("Cannot execute {} in {}: {}", m.getName(), location, cause.getMessage()); // Convert into IOException if needed diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcMonitor.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcMonitor.java index 388fc5a0da4..039b40ae2e5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcMonitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcMonitor.java @@ -54,7 +54,7 @@ void init( /** * Start proxying an operation to the Namenode. - * @return Id of the thread doing the proxying. + * @return id of the thread doing the proxying. */ long proxyOp(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java index accec4627ed..a66953b1bd7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java @@ -410,7 +410,7 @@ protected Response get( * @param path Path to check. * @param op Operation to perform. * @param openOffset Offset for opening a file. - * @param excludeDatanodes Blocks to excluded. + * @param excludeDatanodes Blocks to exclude. * @param parameters Other parameters. * @return Redirection URI. * @throws URISyntaxException If it cannot parse the URI. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/CachedRecordStore.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/CachedRecordStore.java index 7b28c03a529..2b693aa936f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/CachedRecordStore.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/CachedRecordStore.java @@ -85,7 +85,7 @@ protected CachedRecordStore(Class clazz, StateStoreDriver driver) { * * @param clazz Class of the record to store. * @param driver State Store driver. - * @param over If the entries should be override if they expire + * @param over If the entries should be overridden if they expire */ protected CachedRecordStore( Class clazz, StateStoreDriver driver, boolean over) { @@ -153,7 +153,7 @@ public boolean loadCache(boolean force) throws IOException { } /** - * Check if it's time to update the cache. Update it it was never updated. + * Check if it's time to update the cache. Update it was never updated. * * @return If it's time to update this cache. */ @@ -206,7 +206,7 @@ public void overrideExpiredRecords(QueryResult query) throws IOException { * Updates the state store with any record overrides we detected, such as an * expired state. * - * @param record Record record to be updated. + * @param record record to be updated. * @throws IOException If the values cannot be updated. */ public void overrideExpiredRecord(R record) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java index 507c83786a8..201c7a325f1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java @@ -331,7 +331,7 @@ public StateStoreDriver getDriver() { } /** - * Fetch a unique identifier for this state store instance. Typically it is + * Fetch a unique identifier for this state store instance. Typically, it is * the address of the router. * * @return Unique identifier for this store. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreDriver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreDriver.java index d595a97d017..a4e9c1ce82b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreDriver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreDriver.java @@ -97,7 +97,7 @@ protected Configuration getConf() { } /** - * Gets a unique identifier for the running task/process. Typically the + * Gets a unique identifier for the running task/process. Typically, the * router address. * * @return Unique identifier for the running task. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/GetRouterRegistrationsRequest.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/GetRouterRegistrationsRequest.java index b70cccf593a..10b172eb50a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/GetRouterRegistrationsRequest.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/GetRouterRegistrationsRequest.java @@ -22,7 +22,7 @@ import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreSerializer; /** - * API request for retrieving a all non-expired router registrations present in + * API request for retrieving an all non-expired router registrations present in * the state store. */ public abstract class GetRouterRegistrationsRequest { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/FederationProtocolPBTranslator.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/FederationProtocolPBTranslator.java index 8422a8c4b6d..98988f8bb2d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/FederationProtocolPBTranslator.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/FederationProtocolPBTranslator.java @@ -92,7 +92,7 @@ public B getBuilder() { /** * Get the serialized proto object. If the translator was created from a byte - * stream, returns the intitial byte stream. Otherwise creates a new byte + * stream, returns the initial byte stream. Otherwise, creates a new byte * stream from the cached builder. * * @return Protobuf message object diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java index b8e7c796a14..d7fcf862fb6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java @@ -193,7 +193,7 @@ private String getUsage(String cmd) { /** * Usage: validates the maximum number of arguments for a command. - * @param arg List of of command line parameters. + * @param arg List of command line parameters. */ private void validateMax(String[] arg) { if (arg[0].equals("-ls")) { @@ -407,7 +407,7 @@ public int run(String[] argv) throws Exception { System.err.println(cmd.substring(1) + ": " + arge.getLocalizedMessage()); printUsage(cmd); } catch (RemoteException e) { - // This is a error returned by the server. + // This is an error returned by the server. // Print out the first line of the error message, ignore the stack trace. exitCode = -1; debugException = e; @@ -807,7 +807,7 @@ public void listMounts(String[] argv, int i) throws IOException { } else if (argv[i].equals("-d")) { // Check if -d parameter is specified. detail = true; if (argv.length == 2) { - path = "/"; // If no path is provide with -ls -d. + path = "/"; // If no path is provided with -ls -d. } else { path = argv[++i]; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java index ac6ecd4398c..87b99e5d952 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java @@ -789,7 +789,7 @@ public void startCluster(Configuration overrideConf) { Configuration nnConf = generateNamenodeConfiguration(ns0); if (overrideConf != null) { nnConf.addResource(overrideConf); - // Router also uses this configurations as initial values. + // Router also uses these configurations as initial values. routerConf = new Configuration(overrideConf); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java index 43efd85228d..1519bad74b5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java @@ -137,7 +137,7 @@ public void updateActiveNamenode( break; } } - // This operation modifies the list so we need to be careful + // This operation modifies the list, so we need to be careful synchronized(namenodes) { Collections.sort(namenodes, new NamenodePriorityComparator()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestNamenodeResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestNamenodeResolver.java index df80037c699..ed10a3a8731 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestNamenodeResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestNamenodeResolver.java @@ -297,7 +297,7 @@ public void testCacheUpdateOnNamenodeStateUpdate() throws IOException { String rpcAddr = namenode.getRpcAddress(); InetSocketAddress inetAddr = getInetSocketAddress(rpcAddr); - // If the namenode state changes and it serves request, + // If the namenode state changes, and it serves request, // RouterRpcClient calls updateActiveNamenode to update the state to active, // Check whether correct updated state is returned post update. namenodeResolver.updateActiveNamenode(NAMESERVICES[0], inetAddr); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouter.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouter.java index 0464877d3cd..9b8fb67e681 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouter.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouter.java @@ -45,7 +45,7 @@ import org.junit.Test; /** - * The the safe mode for the {@link Router} controlled by + * The safe mode for the {@link Router} controlled by * {@link SafeModeTimer}. */ public class TestRouter { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java index b9dff65b28a..677f3b5e947 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java @@ -742,7 +742,7 @@ public void testMountTablePermissions() throws Exception { * @param mount * target mount table * @param canRead - * whether can list mount tables under specified mount + * whether you can list mount tables under specified mount * @param addCommandCode * expected return code of add command executed for specified mount * @param rmCommandCode @@ -1467,7 +1467,7 @@ public void testUpdateErrorCase() throws Exception { err.toString().contains("update: /noMount doesn't exist.")); err.reset(); - // Check update if non true/false value is passed for readonly. + // Check update if no true/false value is passed for readonly. argv = new String[] {"-update", src, "-readonly", "check"}; assertEquals(-1, ToolRunner.run(admin, argv)); assertTrue(err.toString(), err.toString().contains("update: " diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFaultTolerant.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFaultTolerant.java index bf571e2ff79..ef5322ba218 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFaultTolerant.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFaultTolerant.java @@ -582,7 +582,7 @@ public String toString() { } /** - * Asserts that the results are the expected amount and it has both success + * Asserts that the results are the expected amount, and it has both success * and failure. * @param msg Message to show when the assertion fails. * @param expected Expected number of results. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableCacheRefresh.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableCacheRefresh.java index c90e614a5cd..82bc7d905e6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableCacheRefresh.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableCacheRefresh.java @@ -308,9 +308,9 @@ public void run() { TimeUnit.SECONDS); mountTableRefresherService.init(config); // One router is not responding for 1 minute, still refresh should - // finished in 5 second as cache update timeout is set 5 second. + // finish in 5 second as cache update timeout is set 5 second. mountTableRefresherService.refresh(); - // Test case timeout is assert for this test case. + // Test case timeout is asserted for this test case. } /** @@ -349,7 +349,7 @@ protected RouterClient createRouterClient( mountTableRefresherService.refresh(); assertNotEquals("No RouterClient is created.", 0, createCounter.get()); /* - * Wait for clients to expire. Lets wait triple the cache eviction period. + * Wait for clients to expire. Let's wait triple the cache eviction period. * After cache eviction period all created client must be removed and * closed. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuotaManager.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuotaManager.java index 4a1dd2e091b..d24ef8ebc3c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuotaManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuotaManager.java @@ -75,11 +75,11 @@ public void testGetChildrenPaths() { public void testGetQuotaUsage() { RouterQuotaUsage quotaGet; - // test case1: get quota with an non-exist path + // test case1: get quota with a non-exist path quotaGet = manager.getQuotaUsage("/non-exist-path"); assertNull(quotaGet); - // test case2: get quota from an no-quota set path + // test case2: get quota from a no-quota set path RouterQuotaUsage.Builder quota = new RouterQuotaUsage.Builder() .quota(HdfsConstants.QUOTA_RESET) .spaceQuota(HdfsConstants.QUOTA_RESET); @@ -88,7 +88,7 @@ public void testGetQuotaUsage() { // it should return null assertNull(quotaGet); - // test case3: get quota from an quota-set path + // test case3: get quota from a quota-set path quota.quota(1); quota.spaceQuota(HdfsConstants.QUOTA_RESET); manager.put("/hasQuotaSet", quota.build()); @@ -96,24 +96,24 @@ public void testGetQuotaUsage() { assertEquals(1, quotaGet.getQuota()); assertEquals(HdfsConstants.QUOTA_RESET, quotaGet.getSpaceQuota()); - // test case4: get quota with an non-exist child path + // test case4: get quota with a non-exist child path quotaGet = manager.getQuotaUsage("/hasQuotaSet/file"); // it will return the nearest ancestor which quota was set assertEquals(1, quotaGet.getQuota()); assertEquals(HdfsConstants.QUOTA_RESET, quotaGet.getSpaceQuota()); - // test case5: get quota with an child path which its parent + // test case5: get quota with a child path which its parent // wasn't quota set quota.quota(HdfsConstants.QUOTA_RESET); quota.spaceQuota(HdfsConstants.QUOTA_RESET); manager.put("/hasQuotaSet/noQuotaSet", quota.build()); - // here should returns the quota of path /hasQuotaSet + // here should return the quota of path /hasQuotaSet // (the nearest ancestor which quota was set) quotaGet = manager.getQuotaUsage("/hasQuotaSet/noQuotaSet/file"); assertEquals(1, quotaGet.getQuota()); assertEquals(HdfsConstants.QUOTA_RESET, quotaGet.getSpaceQuota()); - // test case6: get quota with an child path which its parent was quota set + // test case6: get quota with a child path which its parent was quota set quota.quota(2); quota.spaceQuota(HdfsConstants.QUOTA_RESET); manager.put("/hasQuotaSet/hasQuotaSet", quota.build()); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java index 238d1b03011..b05337443f6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java @@ -511,7 +511,7 @@ public void testIsMultiDestDir() throws Exception { */ @Test public void testSnapshotPathResolution() throws Exception { - // Create a mount entry with non isPathAll order, so as to call + // Create a mount entry with non isPathAll order, to call // invokeSequential. Map destMap = new HashMap<>(); destMap.put("ns0", "/tmp_ns0"); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java index 370a1250a7c..6ade57326e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java @@ -75,7 +75,7 @@ import org.junit.Test; /** - * The the RPC interface of the {@link getRouter()} implemented by + * The RPC interface of the {@link getRouter()} implemented by * {@link RouterRpcServer}. */ public class TestRouterRpcMultiDestination extends TestRouterRpc { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/security/token/TestZKDelegationTokenSecretManagerImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/security/token/TestZKDelegationTokenSecretManagerImpl.java index 3c7f8e88a91..e4c293091b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/security/token/TestZKDelegationTokenSecretManagerImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/security/token/TestZKDelegationTokenSecretManagerImpl.java @@ -110,7 +110,7 @@ public void testMultiNodeTokenRemovalShortSyncWithoutWatch() conf.setBoolean(ZK_DTSM_TOKEN_WATCHER_ENABLED, false); // make sync quick conf.setInt(ZK_DTSM_ROUTER_TOKEN_SYNC_INTERVAL, 3); - // set the renew window and removal interval to be a + // set the renewal window and removal interval to be a // short time to trigger the background cleanup conf.setInt(RENEW_INTERVAL, 10); conf.setInt(REMOVAL_SCAN_INTERVAL, 10); @@ -170,7 +170,7 @@ public void testMultiNodeTokenRemovalLongSyncWithoutWatch() conf.setBoolean(ZK_DTSM_TOKEN_WATCHER_ENABLED, false); // make sync quick conf.setInt(ZK_DTSM_ROUTER_TOKEN_SYNC_INTERVAL, 20); - // set the renew window and removal interval to be a + // set the renewal window and removal interval to be a // short time to trigger the background cleanup conf.setInt(RENEW_INTERVAL, 10); conf.setInt(REMOVAL_SCAN_INTERVAL, 10); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreMembershipState.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreMembershipState.java index 63bc6235a61..45a240b866b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreMembershipState.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreMembershipState.java @@ -186,7 +186,7 @@ public void testRegistrationMajorityQuorum() // 1) ns0:nn0 - Standby (newest) // 2) ns0:nn0 - Active (oldest) // 3) ns0:nn0 - Active (2nd oldest) - // 4) ns0:nn0 - Active (3nd oldest element, newest active element) + // 4) ns0:nn0 - Active (3rd oldest element, newest active element) // Verify the selected entry is the newest majority opinion (4) String ns = "ns0"; String nn = "nn0"; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java index fe1b9a5bfa0..b8bb7c4d2d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java @@ -556,7 +556,7 @@ private static Map> getFields(BaseRecord record) { } /** - * Get the type of a field. + * Get the type of field. * * @param fieldName * @return Field type @@ -601,7 +601,7 @@ private static Method locateGetter(BaseRecord record, String fieldName) { } /** - * Expands a data object from the store into an record object. Default store + * Expands a data object from the store into a record object. Default store * data type is a String. Override if additional serialization is required. * * @param data Object containing the serialized data. Only string is