From 876fd8ab7913a259ff9f69c16cc2d9af46ad3f9b Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Wed, 23 Apr 2014 20:13:32 +0000 Subject: [PATCH] HDFS-6274. Cleanup javadoc warnings in HDFS code. Contributed by Suresh Srinivas. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1589506 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../main/java/org/apache/hadoop/fs/Hdfs.java | 5 +-- .../apache/hadoop/hdfs/BlockReaderLocal.java | 2 +- .../hadoop/hdfs/BlockStorageLocationUtil.java | 3 +- .../org/apache/hadoop/hdfs/DFSClient.java | 4 +- .../apache/hadoop/hdfs/DFSInputStream.java | 20 +++------- .../java/org/apache/hadoop/hdfs/DFSUtil.java | 13 +++---- .../hadoop/hdfs/DistributedFileSystem.java | 2 +- .../hdfs/protocol/ClientDatanodeProtocol.java | 5 ++- .../hadoop/hdfs/protocol/ClientProtocol.java | 3 +- .../datatransfer/DataTransferProtocol.java | 2 +- .../token/block/BlockTokenSecretManager.java | 9 ++--- .../DelegationTokenSecretManager.java | 5 +-- .../server/blockmanagement/BlockInfo.java | 3 -- .../server/blockmanagement/BlockManager.java | 2 - .../blockmanagement/BlockPlacementPolicy.java | 12 +++--- .../blockmanagement/DatanodeManager.java | 4 +- .../hadoop/hdfs/server/common/Storage.java | 8 ++-- .../hdfs/server/common/StorageInfo.java | 4 +- .../hdfs/server/datanode/BPServiceActor.java | 6 +-- .../server/datanode/BlockMetadataHeader.java | 11 +----- .../hdfs/server/datanode/BlockReceiver.java | 6 +-- .../hdfs/server/datanode/BlockSender.java | 2 +- .../hadoop/hdfs/server/datanode/DataNode.java | 17 ++++---- .../hdfs/server/datanode/DataXceiver.java | 2 +- .../server/datanode/DirectoryScanner.java | 3 +- .../server/datanode/FinalizedReplica.java | 2 +- .../server/datanode/ReplicaBeingWritten.java | 2 +- .../server/datanode/ReplicaInPipeline.java | 2 +- .../datanode/ReplicaInPipelineInterface.java | 2 +- .../hdfs/server/datanode/ReplicaInfo.java | 2 +- .../server/datanode/ReplicaUnderRecovery.java | 2 +- .../datanode/ReplicaWaitingToBeRecovered.java | 2 +- .../AvailableSpaceVolumeChoosingPolicy.java | 9 +---- .../datanode/fsdataset/FsDatasetSpi.java | 17 ++------ .../fsdataset/impl/BlockPoolSlice.java | 2 +- .../fsdataset/impl/FsDatasetImpl.java | 8 ++-- .../fsdataset/impl/MappableBlock.java | 1 - .../hdfs/server/namenode/BackupNode.java | 2 +- .../hdfs/server/namenode/CacheManager.java | 2 +- .../hdfs/server/namenode/CachedBlock.java | 2 +- .../server/namenode/ClusterJspHelper.java | 3 -- .../namenode/EditLogFileInputStream.java | 4 -- .../server/namenode/EditLogOutputStream.java | 4 +- .../hdfs/server/namenode/FSDirectory.java | 17 ++++---- .../hdfs/server/namenode/FSEditLog.java | 9 +++-- .../hdfs/server/namenode/FSEditLogLoader.java | 3 -- .../hdfs/server/namenode/FSEditLogOp.java | 4 +- .../hadoop/hdfs/server/namenode/FSImage.java | 3 +- .../hdfs/server/namenode/FSImageFormat.java | 2 +- .../server/namenode/FSImageSerialization.java | 2 +- .../hdfs/server/namenode/FSNamesystem.java | 39 ++++++++----------- .../server/namenode/FileJournalManager.java | 2 +- .../hadoop/hdfs/server/namenode/INode.java | 8 ++-- .../hdfs/server/namenode/LeaseManager.java | 1 - .../hdfs/server/namenode/LogsPurgeable.java | 1 - .../hdfs/server/namenode/NNStorage.java | 3 +- .../hadoop/hdfs/server/namenode/NameNode.java | 6 +-- .../server/namenode/NameNodeRpcServer.java | 5 +-- .../hdfs/server/namenode/NamenodeFsck.java | 1 - .../server/namenode/SecondaryNameNode.java | 1 - .../hdfs/server/namenode/ha/HAState.java | 4 +- .../namenode/snapshot/AbstractINodeDiff.java | 1 - .../hdfs/server/protocol/BlockIdCommand.java | 1 - .../server/protocol/DatanodeProtocol.java | 6 +-- .../hdfs/server/protocol/DatanodeStorage.java | 2 - .../hdfs/server/protocol/ServerCommand.java | 2 +- .../org/apache/hadoop/hdfs/tools/DFSck.java | 3 -- .../org/apache/hadoop/hdfs/tools/GetConf.java | 3 +- .../apache/hadoop/hdfs/tools/HDFSConcat.java | 4 +- .../org/apache/hadoop/hdfs/tools/JMXGet.java | 8 +--- .../BinaryEditsVisitor.java | 3 +- .../hdfs/util/DataTransferThrottler.java | 2 - .../web/resources/AclPermissionParam.java | 5 +-- .../hadoop/hdfs/BenchmarkThroughput.java | 2 +- .../apache/hadoop/hdfs/MiniDFSCluster.java | 10 ++--- .../blockmanagement/BlockManagerTestUtil.java | 8 +--- .../server/datanode/BlockReportTestBase.java | 3 -- .../server/datanode/SimulatedFSDataset.java | 9 ++--- .../hdfs/server/namenode/CreateEditsLog.java | 8 +--- .../namenode/NNThroughputBenchmark.java | 5 +-- 81 files changed, 153 insertions(+), 266 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index fadb3aa5af2..94678b0b5d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -372,6 +372,8 @@ Release 2.5.0 - UNRELEASED HDFS-6213. TestDataNodeConfig failing on Jenkins runs due to DN web port in use. (wang) + HDFS-6274. Cleanup javadoc warnings in HDFS code. (suresh) + Release 2.4.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java index 0fcb43dfe79..2897f3562a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java @@ -67,9 +67,8 @@ public class Hdfs extends AbstractFileSystem { * This constructor has the signature needed by * {@link AbstractFileSystem#createFileSystem(URI, Configuration)} * - * @param theUri - * which must be that of Hdfs - * @param conf + * @param theUri which must be that of Hdfs + * @param conf configuration * @throws IOException */ Hdfs(final URI theUri, final Configuration conf) throws IOException, URISyntaxException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java index 1cb0e8aaf4b..bb9612a9956 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java @@ -182,7 +182,7 @@ class BlockReaderLocal implements BlockReader { /** * Maximum amount of readahead we'll do. This will always be at least the, - * size of a single chunk, even if {@link zeroReadaheadRequested} is true. + * size of a single chunk, even if {@link #zeroReadaheadRequested} is true. * The reason is because we need to do a certain amount of buffering in order * to do checksumming. * diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockStorageLocationUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockStorageLocationUtil.java index fddaf0a5aaa..ba749783f5f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockStorageLocationUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockStorageLocationUtil.java @@ -191,7 +191,8 @@ class BlockStorageLocationUtil { /** * Group the per-replica {@link VolumeId} info returned from - * {@link DFSClient#queryDatanodesForHdfsBlocksMetadata(Map)} to be associated + * {@link DFSClient#queryDatanodesForHdfsBlocksMetadata(Map)} to be + * associated * with the corresponding {@link LocatedBlock}. * * @param blocks diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 5aa4d180e27..c83cf502333 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -1606,7 +1606,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory { /** * Set replication for an existing file. * @param src file name - * @param replication + * @param replication replication to set the file to * * @see ClientProtocol#setReplication(String, short) */ @@ -2116,7 +2116,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory { /** * Set permissions to a file or directory. * @param src path name. - * @param permission + * @param permission permission to set to * * @see ClientProtocol#setPermission(String, FsPermission) */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java index 301bd4c15ab..2239de08aab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java @@ -389,7 +389,7 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead, * Get block at the specified position. * Fetch it from the namenode if not cached. * - * @param offset + * @param offset block corresponding to this offset in file is returned * @param updatePosition whether to update current position * @return located block * @throws IOException @@ -453,14 +453,13 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead, * Get blocks in the specified range. * Fetch them from the namenode if not cached. This function * will not get a read request beyond the EOF. - * @param offset - * @param length + * @param offset starting offset in file + * @param length length of data * @return consequent segment of located blocks * @throws IOException */ - private synchronized List getBlockRange(long offset, - long length) - throws IOException { + private synchronized List getBlockRange(long offset, + long length) throws IOException { // getFileLength(): returns total file length // locatedBlocks.getFileLength(): returns length of completed blocks if (offset >= getFileLength()) { @@ -847,7 +846,6 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead, /** * Add corrupted block replica into map. - * @param corruptedBlockMap */ private void addIntoCorruptedBlockMap(ExtendedBlock blk, DatanodeInfo node, Map> corruptedBlockMap) { @@ -1091,14 +1089,6 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead, * int, Map)} except we start up a second, parallel, 'hedged' read * if the first read is taking longer than configured amount of * time. We then wait on which ever read returns first. - * - * @param block - * @param start - * @param end - * @param buf - * @param offset - * @param corruptedBlockMap - * @throws IOException */ private void hedgedFetchBlockByteRange(LocatedBlock block, long start, long end, byte[] buf, int offset, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 347237bea6e..5e83575d733 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -288,9 +288,7 @@ public class DFSUtil { *

* Note that some components are only reserved under certain directories, e.g. * "/.reserved" is reserved, while "/hadoop/.reserved" is not. - * - * @param component - * @return if the component is reserved + * @return true, if the component is reserved */ public static boolean isReservedPathComponent(String component) { for (String reserved : HdfsConstants.RESERVED_PATH_COMPONENTS) { @@ -1015,8 +1013,8 @@ public class DFSUtil { /** * return server http or https address from the configuration for a * given namenode rpc address. - * @param conf * @param namenodeAddr - namenode RPC address + * @param conf configuration * @param scheme - the scheme (http / https) * @return server http or https address * @throws IOException @@ -1327,7 +1325,7 @@ public class DFSUtil { /** * For given set of {@code keys} adds nameservice Id and or namenode Id * and returns {nameserviceId, namenodeId} when address match is found. - * @see #getSuffixIDs(Configuration, String, AddressMatcher) + * @see #getSuffixIDs(Configuration, String, String, String, AddressMatcher) */ static String[] getSuffixIDs(final Configuration conf, final InetSocketAddress address, final String... keys) { @@ -1499,9 +1497,8 @@ public class DFSUtil { /** * Get SPNEGO keytab Key from configuration * - * @param conf - * Configuration - * @param defaultKey + * @param conf Configuration + * @param defaultKey default key to be used for config lookup * @return DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY if the key is not empty * else return defaultKey */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 4adf0532610..56cac71ff29 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -1429,7 +1429,7 @@ public class DistributedFileSystem extends FileSystem { * Get the difference between two snapshots, or between a snapshot and the * current tree of a directory. * - * @see DFSClient#getSnapshotDiffReport(Path, String, String) + * @see DFSClient#getSnapshotDiffReport(String, String, String) */ public SnapshotDiffReport getSnapshotDiffReport(final Path snapshotDir, final String fromSnapshot, final String toSnapshot) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java index f6d6528f489..475686572f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java @@ -110,8 +110,9 @@ public interface ClientDatanodeProtocol { /** * Retrieves volume location information about a list of blocks on a datanode. - * This is in the form of an opaque {@link VolumeId} for each configured - * data directory, which is not guaranteed to be the same across DN restarts. + * This is in the form of an opaque {@link org.apache.hadoop.fs.VolumeId} + * for each configured data directory, which is not guaranteed to be + * the same across DN restarts. * * @param blockPoolId the pool to query * @param blockIds diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index 0e085553e6d..f19935011ec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -268,7 +268,7 @@ public interface ClientProtocol { /** * Set Owner of a path (i.e. a file or a directory). * The parameters username and groupname cannot both be null. - * @param src + * @param src file path * @param username If it is null, the original username remains unchanged. * @param groupname If it is null, the original groupname remains unchanged. * @@ -1126,7 +1126,6 @@ public interface ClientProtocol { /** * Modify a CacheDirective in the CacheManager. * - * @return directive The directive to modify. Must contain a directive ID. * @param flags {@link CacheFlag}s to use for this operation. * @throws IOException if the directive could not be modified */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java index 772853a85aa..d620c6b502b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java @@ -136,7 +136,7 @@ public interface DataTransferProtocol { /** * Request a short circuit shared memory area from a DataNode. * - * @pram clientName The name of the client. + * @param clientName The name of the client. */ public void requestShortCircuitShm(String clientName) throws IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java index ebd3e7c94c2..a3685ca39b6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java @@ -101,9 +101,9 @@ public class BlockTokenSecretManager extends * * @param keyUpdateInterval how often a new key will be generated * @param tokenLifetime how long an individual token is valid - * @param isHaEnabled whether or not HA is enabled - * @param thisNnId the NN ID of this NN in an HA setup - * @param otherNnId the NN ID of the other NN in an HA setup + * @param nnIndex namenode index + * @param blockPoolId block pool ID + * @param encryptionAlgorithm encryption algorithm to use */ public BlockTokenSecretManager(long keyUpdateInterval, long tokenLifetime, int nnIndex, String blockPoolId, @@ -412,8 +412,7 @@ public class BlockTokenSecretManager extends * @param keyId identifier of the secret key used to generate the encryption key. * @param nonce random value used to create the encryption key * @return the encryption key which corresponds to this (keyId, blockPoolId, nonce) - * @throws InvalidToken - * @throws InvalidEncryptionKeyException + * @throws InvalidEncryptionKeyException */ public byte[] retrieveDataEncryptionKey(int keyId, byte[] nonce) throws InvalidEncryptionKeyException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java index c89ca7b57a9..e0a8f305d3c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java @@ -299,7 +299,7 @@ public class DelegationTokenSecretManager * Update the token cache with renewal record in edit logs. * * @param identifier DelegationTokenIdentifier of the renewed token - * @param expiryTime + * @param expiryTime expirty time in milliseconds * @throws IOException */ public synchronized void updatePersistedTokenRenewal( @@ -429,8 +429,7 @@ public class DelegationTokenSecretManager /** * Private helper method to load delegation keys from fsimage. - * @param in - * @throws IOException + * @throws IOException on error */ private synchronized void loadAllKeys(DataInput in) throws IOException { StartupProgress prog = NameNode.getStartupProgress(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java index 706042f580e..983f60bb8f8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java @@ -239,7 +239,6 @@ public class BlockInfo extends Block implements LightWeightGSet.LinkedElement { /** * Find specified DatanodeDescriptor. - * @param dn * @return index or -1 if not found. */ int findDatanode(DatanodeDescriptor dn) { @@ -255,7 +254,6 @@ public class BlockInfo extends Block implements LightWeightGSet.LinkedElement { } /** * Find specified DatanodeStorageInfo. - * @param dn * @return index or -1 if not found. */ int findStorageInfo(DatanodeInfo dn) { @@ -272,7 +270,6 @@ public class BlockInfo extends Block implements LightWeightGSet.LinkedElement { /** * Find specified DatanodeStorageInfo. - * @param storageInfo * @return index or -1 if not found. */ int findStorageInfo(DatanodeStorageInfo storageInfo) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 0bcc82fb3c4..cd9ef110e69 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -549,7 +549,6 @@ public class BlockManager { } /** - * @param block * @return true if the block has minimum replicas */ public boolean checkMinReplication(Block block) { @@ -3382,7 +3381,6 @@ public class BlockManager { * heartbeat. * * @return number of blocks scheduled for replication or removal. - * @throws IOException */ int computeDatanodeWork() { // Blocks should not be replicated or removed if in safe mode. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java index 73f474cba4e..dc4848414f9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java @@ -61,7 +61,7 @@ public abstract class BlockPlacementPolicy { * @param srcPath the file to which this chooseTargets is being invoked. * @param numOfReplicas additional number of replicas wanted. * @param writer the writer's machine, null if not in the cluster. - * @param chosenNodes datanodes that have been chosen as targets. + * @param chosen datanodes that have been chosen as targets. * @param returnChosenNodes decide if the chosenNodes are returned. * @param excludedNodes datanodes that should not be considered as targets. * @param blocksize size of the data to be written. @@ -78,8 +78,8 @@ public abstract class BlockPlacementPolicy { StorageType storageType); /** - * Same as {@link #chooseTarget(String, int, Node, List, boolean, - * Set, long)} with added parameter {@code favoredDatanodes} + * Same as {@link #chooseTarget(String, int, Node, Set, long, List, StorageType)} + * with added parameter {@code favoredDatanodes} * @param favoredNodes datanodes that should be favored as targets. This * is only a hint and due to cluster state, namenode may not be * able to place the blocks on these datanodes. @@ -143,7 +143,8 @@ public abstract class BlockPlacementPolicy { /** * Get an instance of the configured Block Placement Policy based on the - * the configuration property {@link DFS_BLOCK_REPLICATOR_CLASSNAME_KEY}. + * the configuration property + * {@link DFSConfigKeys#DFS_BLOCK_REPLICATOR_CLASSNAME_KEY}. * * @param conf the configuration to be used * @param stats an object that is used to retrieve the load on the cluster @@ -195,7 +196,6 @@ public abstract class BlockPlacementPolicy { /** * Get rack string from a data node - * @param datanode * @return rack of data node */ protected String getRack(final DatanodeInfo datanode) { @@ -206,7 +206,7 @@ public abstract class BlockPlacementPolicy { * Split data nodes into two sets, one set includes nodes on rack with * more than one replica, the other set contains the remaining nodes. * - * @param dataNodes + * @param dataNodes datanodes to be split into two sets * @param rackMap a map from rack to datanodes * @param moreThanOne contains nodes on rack with more than one replica * @param exactlyOne remains contains the remaining nodes diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 8f790077010..1ca74b6c889 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -435,9 +435,9 @@ public class DatanodeManager { } /** - * Get data node by storage ID. + * Get data node by datanode ID. * - * @param nodeID + * @param nodeID datanode ID * @return DatanodeDescriptor or null if the node is not found. * @throws UnregisteredNodeException */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java index f1724655ed4..1d68727b1ff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java @@ -831,10 +831,10 @@ public abstract class Storage extends StorageInfo { } /** - * Checks if the upgrade from the given old version is supported. If - * no upgrade is supported, it throws IncorrectVersionException. - * - * @param oldVersion + * Checks if the upgrade from {@code oldVersion} is supported. + * @param oldVersion the version of the metadata to check with the current + * version + * @throws IOException if upgrade is not supported */ public static void checkVersionUpgradable(int oldVersion) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java index d802b9ad3b9..545fb132c01 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java @@ -148,8 +148,8 @@ public class StorageInfo { * Get common storage fields. * Should be overloaded if additional fields need to be get. * - * @param props - * @throws IOException + * @param props properties + * @throws IOException on error */ protected void setFieldsFromProperties( Properties props, StorageDirectory sd) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java index 207d613a3f0..979193e59d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java @@ -314,9 +314,7 @@ class BPServiceActor implements Runnable { } /** - * Retrieve the incremental BR state for a given storage UUID - * @param storageUuid - * @return + * @return pending incremental block report for given {@code storage} */ private PerStoragePendingIncrementalBR getIncrementalBRMapForStorage( DatanodeStorage storage) { @@ -339,8 +337,6 @@ class BPServiceActor implements Runnable { * exists for the same block it is removed. * * Caller must synchronize access using pendingIncrementalBRperStorage. - * @param bInfo - * @param storageUuid */ void addPendingReplicationBlockInfo(ReceivedDeletedBlockInfo bInfo, DatanodeStorage storage) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java index 8417ffc6b48..b86cad45be7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java @@ -98,7 +98,6 @@ public class BlockMetadataHeader { /** * This reads all the fields till the beginning of checksum. - * @param in * @return Metadata Header * @throws IOException */ @@ -109,9 +108,7 @@ public class BlockMetadataHeader { /** * Reads header at the top of metadata file and returns the header. * - * @param dataset - * @param block - * @return + * @return metadata header for the block * @throws IOException */ public static BlockMetadataHeader readHeader(File file) throws IOException { @@ -147,8 +144,6 @@ public class BlockMetadataHeader { /** * This writes all the fields till the beginning of checksum. * @param out DataOutputStream - * @param header - * @return * @throws IOException */ @VisibleForTesting @@ -161,9 +156,7 @@ public class BlockMetadataHeader { /** * Writes all the fields till the beginning of checksum. - * @param out - * @param checksum - * @throws IOException + * @throws IOException on error */ static void writeHeader(DataOutputStream out, DataChecksum checksum) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java index 7e0a1f64cdb..14ed254cc9f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java @@ -956,9 +956,9 @@ class BlockReceiver implements Closeable { /** * enqueue the seqno that is still be to acked by the downstream datanode. - * @param seqno - * @param lastPacketInBlock - * @param offsetInBlock + * @param seqno sequence number of the packet + * @param lastPacketInBlock if true, this is the last packet in block + * @param offsetInBlock offset of this packet in block */ void enqueue(final long seqno, final boolean lastPacketInBlock, final long offsetInBlock, final Status ackStatus) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java index 1de50ec7f2c..86d88c2a44e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java @@ -168,7 +168,7 @@ class BlockSender implements java.io.Closeable { * @param block Block that is being read * @param startOffset starting offset to read from * @param length length of data to read - * @param corruptChecksumOk + * @param corruptChecksumOk if true, corrupt checksum is okay * @param verifyChecksum verify checksum while reading the data * @param sendChecksum send checksum to client. * @param datanode datanode from which the block is being read diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 0b9b69ff6bd..ffae35f1f1b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -646,7 +646,6 @@ public class DataNode extends Configured /** * Return the BPOfferService instance corresponding to the given block. - * @param block * @return the BPOS * @throws IOException if no such BPOS can be found */ @@ -811,9 +810,7 @@ public class DataNode extends Configured /** * After the block pool has contacted the NN, registers that block pool * with the secret manager, updating it with the secrets provided by the NN. - * @param bpRegistration - * @param blockPoolId - * @throws IOException + * @throws IOException on error */ private synchronized void registerBlockPoolWithSecretManager( DatanodeRegistration bpRegistration, String blockPoolId) throws IOException { @@ -981,9 +978,8 @@ public class DataNode extends Configured /** * get BP registration by blockPool id - * @param bpid * @return BP registration object - * @throws IOException + * @throws IOException on error */ @VisibleForTesting public DatanodeRegistration getDNRegistrationForBP(String bpid) @@ -1687,8 +1683,9 @@ public class DataNode extends Configured /** * After a block becomes finalized, a datanode increases metric counter, * notifies namenode, and adds it to the block scanner - * @param block - * @param delHint + * @param block block to close + * @param delHint hint on which excess block to delete + * @param storageUuid UUID of the storage where block is stored */ void closeBlock(ExtendedBlock block, String delHint, String storageUuid) { metrics.incrBlocksWritten(); @@ -2318,8 +2315,8 @@ public class DataNode extends Configured * The corresponding replica must be an RBW or a Finalized. * Its GS and numBytes will be set to * the stored GS and the visible length. - * @param targets - * @param client + * @param targets targets to transfer the block to + * @param client client name */ void transferReplicaForPipelineRecovery(final ExtendedBlock b, final DatanodeInfo[] targets, final String client) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java index 6d793507e07..a0de9fa83ef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java @@ -1077,7 +1077,7 @@ class DataXceiver extends Receiver implements Runnable { /** * Utility function for sending a response. * - * @param opStatus status message to write + * @param status status message to write * @param message message to send to the client or other DN */ private void sendResponse(Status status, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java index 164fd45993f..a47f2ef4a06 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java @@ -108,8 +108,7 @@ public class DirectoryScanner implements Runnable { ScanInfoPerBlockPool(int sz) {super(sz);} /** - * Merges "that" ScanInfoPerBlockPool into this one - * @param that + * Merges {@code that} ScanInfoPerBlockPool into this one */ public void addAll(ScanInfoPerBlockPool that) { if (that == null) return; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java index 90658733ecd..cc3287406a2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java @@ -54,7 +54,7 @@ public class FinalizedReplica extends ReplicaInfo { /** * Copy constructor. - * @param from + * @param from where to copy construct from */ public FinalizedReplica(FinalizedReplica from) { super(from); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java index c61807b2d87..728dd3806f9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java @@ -68,7 +68,7 @@ public class ReplicaBeingWritten extends ReplicaInPipeline { /** * Copy constructor. - * @param from + * @param from where to copy from */ public ReplicaBeingWritten(ReplicaBeingWritten from) { super(from); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java index 9ea7b7d7e74..f808e0107f2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java @@ -89,7 +89,7 @@ public class ReplicaInPipeline extends ReplicaInfo /** * Copy constructor. - * @param from + * @param from where to copy from */ public ReplicaInPipeline(ReplicaInPipeline from) { super(from); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipelineInterface.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipelineInterface.java index acc3113af50..7f08b81b416 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipelineInterface.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipelineInterface.java @@ -40,7 +40,7 @@ public interface ReplicaInPipelineInterface extends Replica { /** * Set the number bytes that have acked - * @param bytesAcked + * @param bytesAcked number bytes acked */ void setBytesAcked(long bytesAcked); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java index c9aa171d984..738e16df78f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java @@ -100,7 +100,7 @@ abstract public class ReplicaInfo extends Block implements Replica { /** * Copy constructor. - * @param from + * @param from where to copy from */ ReplicaInfo(ReplicaInfo from) { this(from, from.getVolume(), from.getDir()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java index e92a6b5a1eb..35f7c93a9d2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java @@ -50,7 +50,7 @@ public class ReplicaUnderRecovery extends ReplicaInfo { /** * Copy constructor. - * @param from + * @param from where to copy from */ public ReplicaUnderRecovery(ReplicaUnderRecovery from) { super(from); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java index 49c8d3dbadf..26ab3dbe243 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java @@ -60,7 +60,7 @@ public class ReplicaWaitingToBeRecovered extends ReplicaInfo { /** * Copy constructor. - * @param from + * @param from where to copy from */ public ReplicaWaitingToBeRecovered(ReplicaWaitingToBeRecovered from) { super(from); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/AvailableSpaceVolumeChoosingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/AvailableSpaceVolumeChoosingPolicy.java index b1d04aa83cf..e624effb779 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/AvailableSpaceVolumeChoosingPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/AvailableSpaceVolumeChoosingPolicy.java @@ -165,13 +165,8 @@ public class AvailableSpaceVolumeChoosingPolicy } /** - * Check if the available space on all the volumes is roughly equal. - * - * @param volumes the volumes to check - * @return true if all volumes' free space is within the configured threshold, - * false otherwise. - * @throws IOException - * in the event of error checking amount of available space + * @return true if all volumes' free space is within the + * configured threshold, false otherwise. */ public boolean areAllVolumesWithinFreeSpaceThreshold() { long leastAvailable = Long.MAX_VALUE; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java index 1f6f965dce7..6c78ac7b19b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java @@ -124,16 +124,14 @@ public interface FsDatasetSpi extends FSDatasetMBean { /** * Returns the specified block's on-disk length (excluding metadata) - * @param b * @return the specified block's on-disk length (excluding metadta) - * @throws IOException + * @throws IOException on error */ public long getLength(ExtendedBlock b) throws IOException; /** * Get reference to the replica meta info in the replicasMap. * To be called from methods that are synchronized on {@link FSDataset} - * @param blockId * @return replica from the replicas map */ @Deprecated @@ -151,8 +149,8 @@ public interface FsDatasetSpi extends FSDatasetMBean { /** * Returns an input stream at specified offset of the specified block - * @param b - * @param seekOffset + * @param b block + * @param seekOffset offset with in the block to seek to * @return an input stream to read the contents of the specified block, * starting at the offset * @throws IOException @@ -163,9 +161,6 @@ public interface FsDatasetSpi extends FSDatasetMBean { /** * Returns an input stream at specified offset of the specified block * The block is still in the tmp directory and is not finalized - * @param b - * @param blkoff - * @param ckoff * @return an input stream to read the contents of the specified block, * starting at the offset * @throws IOException @@ -256,7 +251,6 @@ public interface FsDatasetSpi extends FSDatasetMBean { * Finalizes the block previously opened for writing using writeToBlock. * The block size is what is in the parameter b and it must match the amount * of data written - * @param b * @throws IOException */ public void finalizeBlock(ExtendedBlock b) throws IOException; @@ -264,7 +258,6 @@ public interface FsDatasetSpi extends FSDatasetMBean { /** * Unfinalizes the block previously opened for writing using writeToBlock. * The temporary file associated with this block is deleted. - * @param b * @throws IOException */ public void unfinalizeBlock(ExtendedBlock b) throws IOException; @@ -289,14 +282,12 @@ public interface FsDatasetSpi extends FSDatasetMBean { /** * Is the block valid? - * @param b * @return - true if the specified block is valid */ public boolean isValidBlock(ExtendedBlock b); /** * Is the block a valid RBW? - * @param b * @return - true if the specified block is a valid RBW */ public boolean isValidRbw(ExtendedBlock b); @@ -327,7 +318,7 @@ public interface FsDatasetSpi extends FSDatasetMBean { * Determine if the specified block is cached. * @param bpid Block pool id * @param blockIds - block id - * @returns true if the block is cached + * @return true if the block is cached */ public boolean isCached(String bpid, long blockId); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java index 24c24fe75e2..e47a302836d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java @@ -74,7 +74,7 @@ class BlockPoolSlice { * @param bpid Block pool Id * @param volume {@link FsVolumeImpl} to which this BlockPool belongs to * @param bpDir directory corresponding to the BlockPool - * @param conf + * @param conf configuration * @throws IOException */ BlockPoolSlice(String bpid, FsVolumeImpl volume, File bpDir, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 874fab1d640..4d836fed834 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -120,10 +120,8 @@ class FsDatasetImpl implements FsDatasetSpi { /** - * Returns a clone of a replica stored in data-node memory. - * Should be primarily used for testing. - * @param blockId - * @return + * This should be primarily used for testing. + * @return clone of replica store in datanode memory */ ReplicaInfo fetchReplicaInfo(String bpid, long blockId) { ReplicaInfo r = volumeMap.get(bpid, blockId); @@ -1581,7 +1579,7 @@ class FsDatasetImpl implements FsDatasetSpi { datanode.getDnConf().getXceiverStopTimeout()); } - /** static version of {@link #initReplicaRecovery(Block, long)}. */ + /** static version of {@link #initReplicaRecovery(RecoveringBlock)}. */ static ReplicaRecoveryInfo initReplicaRecovery(String bpid, ReplicaMap map, Block block, long recoveryId, long xceiverStopTimeout) throws IOException { final ReplicaInfo replica = map.get(bpid, block.getBlockId()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlock.java index 948c694faf2..45aa364bf8d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlock.java @@ -100,7 +100,6 @@ public class MappableBlock implements Closeable { /** * Verifies the block's checksum. This is an I/O intensive operation. - * @return if the block was successfully checksummed. */ private static void verifyChecksum(long length, FileInputStream metaIn, FileChannel blockChannel, String blockFileName) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java index 0a5df306a1f..827c6b5d07e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java @@ -355,7 +355,7 @@ public class BackupNode extends NameNode { /** * Register this backup node with the active name-node. - * @param nsInfo + * @param nsInfo namespace information * @throws IOException */ private void registerWith(NamespaceInfo nsInfo) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java index cc92ac91eff..48e9c1e38e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java @@ -216,7 +216,7 @@ public final class CacheManager { /** * Resets all tracked directives and pools. Called during 2NN checkpointing to - * reset FSNamesystem state. See {FSNamesystem{@link #clear()}. + * reset FSNamesystem state. See {@link FSNamesystem#clear()}. */ void clear() { directivesById.clear(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachedBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachedBlock.java index 9584ea7100f..98c65434f39 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachedBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachedBlock.java @@ -125,7 +125,7 @@ public final class CachedBlock implements Element, * @param type If null, this parameter is ignored. * If it is non-null, we match only datanodes which * have it on this list. - * See {@link DatanodeDescriptor#CachedBlocksList#Type} + * See {@link DatanodeDescriptor.CachedBlocksList.Type} * for a description of all the lists. * * @return The list of datanodes. Modifying this list does not diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java index 9345098990b..e8ea028eb86 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java @@ -388,7 +388,6 @@ class ClusterJspHelper { * is an inner map whose key is namenode, value is datanode status. * reported by each namenode. * @param namenodeHost host name of the namenode - * @param decomnode update DecommissionNode with alive node status * @param json JSON string contains datanode status * @throws IOException */ @@ -426,7 +425,6 @@ class ClusterJspHelper { * @param statusMap map with key being datanode, value being an * inner map (key:namenode, value:decommisionning state). * @param host datanode hostname - * @param decomnode DecommissionNode * @param json String * @throws IOException */ @@ -468,7 +466,6 @@ class ClusterJspHelper { * @param dataNodeStatusMap map with key being datanode, value being an * inner map (key:namenode, value:decommisionning state). * @param host datanode - * @param decomnode DecommissionNode * @param json String */ private static void getDecommissionNodeStatus( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java index 887f55064a5..fa25604d306 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java @@ -93,10 +93,6 @@ public class EditLogFileInputStream extends EditLogInputStream { * @param name filename to open * @param firstTxId first transaction found in file * @param lastTxId last transaction id found in file - * @throws LogHeaderCorruptException if the header is either missing or - * appears to be corrupt/truncated - * @throws IOException if an actual IO error occurs while reading the - * header */ public EditLogFileInputStream(File name, long firstTxId, long lastTxId, boolean isInProgress) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java index 39c3363b8a9..5e6d9d8f686 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java @@ -127,14 +127,14 @@ public abstract class EditLogOutputStream implements Closeable { } /** - * Return total time spent in {@link #flushAndSync()} + * Return total time spent in {@link #flushAndSync(boolean)} */ long getTotalSyncTime() { return totalTimeSync; } /** - * Return number of calls to {@link #flushAndSync()} + * Return number of calls to {@link #flushAndSync(boolean)} */ protected long getNumSync() { return numSync; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index c47a8a360e0..f072d635d6f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -524,7 +524,7 @@ public class FSDirectory implements Closeable { /** * @throws SnapshotAccessControlException * @see #unprotectedRenameTo(String, String, long) - * @deprecated Use {@link #renameTo(String, String, Rename...)} instead. + * @deprecated Use {@link #renameTo(String, String, boolean, Rename...)} */ @Deprecated boolean renameTo(String src, String dst, boolean logRetryCache) @@ -581,7 +581,7 @@ public class FSDirectory implements Closeable { * @throws QuotaExceededException if the operation violates any quota limit * @throws FileAlreadyExistsException if the src is a symlink that points to dst * @throws SnapshotAccessControlException if path is in RO snapshot - * @deprecated See {@link #renameTo(String, String)} + * @deprecated See {@link #renameTo(String, String, boolean, Rename...)} */ @Deprecated boolean unprotectedRenameTo(String src, String dst, long timestamp) @@ -1844,7 +1844,7 @@ public class FSDirectory implements Closeable { /** * update quota of each inode and check to see if quota is exceeded. - * See {@link #updateCount(INode[], int, long, long, boolean)} + * See {@link #updateCount(INodesInPath, long, long, boolean)} */ private void updateCountNoQuotaCheck(INodesInPath inodesInPath, int numOfINodes, long nsDelta, long dsDelta) { @@ -1928,14 +1928,13 @@ public class FSDirectory implements Closeable { * @param src string representation of the path to the directory * @param permissions the permission of the directory - * @param isAutocreate if the permission of the directory should inherit + * @param inheritPermission if the permission of the directory should inherit * from its parent or not. u+wx is implicitly added to * the automatically created directories, and to the * given directory if inheritPermission is true * @param now creation time * @return true if the operation succeeds false otherwise - * @throws FileNotFoundException if an ancestor or itself is a file - * @throws QuotaExceededException if directory creation violates + * @throws QuotaExceededException if directory creation violates * any quota limit * @throws UnresolvedLinkException if a symlink is encountered in src. * @throws SnapshotAccessControlException if path is in RO snapshot @@ -2064,7 +2063,7 @@ public class FSDirectory implements Closeable { /** * Add the given child to the namespace. * @param src The full path name of the child node. - * @throw QuotaExceededException is thrown if it violates quota limit + * @throws QuotaExceededException is thrown if it violates quota limit */ private boolean addINode(String src, INode child ) throws QuotaExceededException, UnresolvedLinkException { @@ -2260,7 +2259,7 @@ public class FSDirectory implements Closeable { * Its ancestors are stored at [0, pos-1]. * @return false if the child with this name already exists; * otherwise return true; - * @throw QuotaExceededException is thrown if it violates quota limit + * @throws QuotaExceededException is thrown if it violates quota limit */ private boolean addChild(INodesInPath iip, int pos, INode child, boolean checkQuota) throws QuotaExceededException { @@ -2446,7 +2445,7 @@ public class FSDirectory implements Closeable { /** * See {@link ClientProtocol#setQuota(String, long, long)} for the contract. * Sets quota for for a directory. - * @returns INodeDirectory if any of the quotas have changed. null other wise. + * @return INodeDirectory if any of the quotas have changed. null other wise. * @throws FileNotFoundException if the path does not exist. * @throws PathIsNotDirectoryException if the path is not a directory. * @throws QuotaExceededException if the directory tree size is diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index 405a44e9b19..0521a72e21a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -424,7 +424,6 @@ public class FSEditLog implements LogsPurgeable { /** * Wait if an automatic sync is scheduled - * @throws InterruptedException */ synchronized void waitIfAutoSyncScheduled() { try { @@ -802,7 +801,8 @@ public class FSEditLog implements LogsPurgeable { /** Add set namespace quota record to edit log * * @param src the string representation of the path to a directory - * @param quota the directory size limit + * @param nsQuota namespace quota + * @param dsQuota diskspace quota */ void logSetQuota(String src, long nsQuota, long dsQuota) { SetQuotaOp op = SetQuotaOp.getInstance(cache.get()) @@ -1452,8 +1452,9 @@ public class FSEditLog implements LogsPurgeable { * Select a list of input streams. * * @param fromTxId first transaction in the selected streams - * @param toAtLeast the selected streams must contain this transaction - * @param inProgessOk set to true if in-progress streams are OK + * @param toAtLeastTxId the selected streams must contain this transaction + * @param recovery recovery context + * @param inProgressOk set to true if in-progress streams are OK */ public synchronized Collection selectInputStreams( long fromTxId, long toAtLeastTxId, MetaRecoveryContext recovery, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index 06e74a3bf07..a11427af576 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -992,9 +992,6 @@ public class FSEditLogLoader { * If there are invalid or corrupt transactions in the middle of the stream, * validateEditLog will skip over them. * This reads through the stream but does not close it. - * - * @throws IOException if the stream cannot be read due to an IO error (eg - * if the log does not exist) */ static EditLogValidation validateEditLog(EditLogInputStream in) { long lastPos = 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java index 46abd788551..25c7cfd3e40 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java @@ -666,8 +666,8 @@ public abstract class FSEditLogOp { } /** - * {@literal @AtMostOnce} for {@link ClientProtocol#startFile} and - * {@link ClientProtocol#appendFile} + * {@literal @AtMostOnce} for {@link ClientProtocol#create} and + * {@link ClientProtocol#append} */ static class AddOp extends AddCloseOp { private AddOp() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java index b4f22772a53..75505dbea9f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java @@ -156,7 +156,7 @@ public class FSImage implements Closeable { * directory to allow them to format anyway. Otherwise, returns * false, unless 'force' is specified. * - * @param force format regardless of whether dirs exist + * @param force if true, format regardless of whether dirs exist * @param interactive prompt the user when a dir exists * @return true if formatting should proceed * @throws IOException if some storage cannot be accessed @@ -1002,7 +1002,6 @@ public class FSImage implements Closeable { /** * Save the contents of the FS image to a new image file in each of the * current storage directories. - * @param canceler */ public synchronized void saveNamespace(FSNamesystem source, NameNodeFile nnf, Canceler canceler) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index 7457f160ec9..0d791698eeb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -484,7 +484,7 @@ public class FSImageFormat { /** * Load all children of a directory * - * @param in + * @param in input to load from * @param counter Counter to increment for namenode startup progress * @return number of child inodes read * @throws IOException diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java index 62127c87364..b863737c0b3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java @@ -206,7 +206,7 @@ public class FSImageSerialization { /** * Reading the path from the image and converting it to byte[][] directly * this saves us an array copy and conversions to and from String - * @param in + * @param in input to read from * @return the array each element of which is a byte[] representation * of a path component * @throws IOException diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 2141f49c176..63652fcda71 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -1353,7 +1353,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, /** * Returns edit directories that are shared between primary and secondary. * @param conf configuration - * @return Collection of edit directories. + * @return collection of edit directories from {@code conf} */ public static List getSharedEditsDirs(Configuration conf) { // don't use getStorageDirs here, because we want an empty default @@ -1789,9 +1789,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats, * before we start actual move. * * This does not support ".inodes" relative path - * @param target target file path to concatenate into - * @param srcs files that are concatenated - * @throws IOException + * @param target target to concat into + * @param srcs file that will be concatenated + * @throws IOException on error */ void concat(String target, String [] srcs) throws IOException, UnresolvedLinkException { @@ -4087,11 +4087,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } /** - * - * @param pendingFile - * @param storedBlock + * @param pendingFile open file that needs to be closed + * @param storedBlock last block * @return Path of the file that was closed. - * @throws IOException + * @throws IOException on error */ @VisibleForTesting String closeFileCommitBlocks(INodeFile pendingFile, BlockInfo storedBlock) @@ -4299,7 +4298,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats, /** * Perform resource checks and cache the results. - * @throws IOException */ void checkAvailableResources() { Preconditions.checkState(nnResourceChecker != null, @@ -5350,7 +5348,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats, /** * Leave safe mode. - * @throws IOException */ void leaveSafeMode() { writeLock(); @@ -5767,7 +5764,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, /** * Sets the generation stamp that delineates random and sequentially * allocated block IDs. - * @param stamp + * @param stamp set generation stamp limit to this value */ void setGenerationStampV1Limit(long stamp) { Preconditions.checkState(generationStampV1Limit == @@ -5852,7 +5849,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats, * Determine whether the block ID was randomly generated (legacy) or * sequentially generated. The generation stamp value is used to * make the distinction. - * @param block * @return true if the block ID was randomly generated, false otherwise. */ boolean isLegacyBlock(Block block) { @@ -6089,7 +6085,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats, * Release (unregister) backup node. *

* Find and remove the backup stream corresponding to the node. - * @param registration * @throws IOException */ void releaseBackupNode(NamenodeRegistration registration) @@ -6225,8 +6220,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats, /** * @param renewer Renewer information - * @return Token - * @throws IOException + * @return delegation toek + * @throws IOException on error */ Token getDelegationToken(Text renewer) throws IOException { @@ -6267,10 +6262,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats, /** * - * @param token delegation token - * @return New expiryTime of the token - * @throws InvalidToken - * @throws IOException + * @param token token to renew + * @return new expiryTime of the token + * @throws InvalidToken if {@code token} is invalid + * @throws IOException on other errors */ long renewDelegationToken(Token token) throws InvalidToken, IOException { @@ -6301,8 +6296,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats, /** * - * @param token delegation token that needs to be canceled - * @throws IOException + * @param token token to cancel + * @throws IOException on error */ void cancelDelegationToken(Token token) throws IOException { @@ -7213,7 +7208,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, /** * Update internal state to indicate that a rolling upgrade is in progress. - * @param startTime start time of the rolling upgrade + * @param startTime rolling upgrade start time */ void startRollingUpgradeInternal(long startTime) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java index 22d5fab3709..2840e8a0450 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java @@ -168,7 +168,7 @@ public class FileJournalManager implements JournalManager { /** * Find all editlog segments starting at or above the given txid. - * @param fromTxId the txnid which to start looking + * @param firstTxId the txnid which to start looking * @param inProgressOk whether or not to include the in-progress edit log * segment * @return a list of remote edit logs diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index 6e31b2fad3a..8efcd14f2dd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -652,9 +652,8 @@ public abstract class INode implements INodeAttributes, Diff.Element { /** - * Breaks file path into components. - * @param path - * @return array of byte arrays each of which represents + * Breaks {@code path} into components. + * @return array of byte arrays each of which represents * a single path component. */ static byte[][] getPathComponents(String path) { @@ -673,8 +672,7 @@ public abstract class INode implements INodeAttributes, Diff.Element { } /** - * Splits an absolute path into an array of path components. - * @param path + * Splits an absolute {@code path} into an array of path components. * @throws AssertionError if the given path is invalid. * @return array of path components. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java index d324f76fbc0..fea5b147db9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java @@ -402,7 +402,6 @@ public class LeaseManager { /** * Get the list of inodes corresponding to valid leases. * @return list of inodes - * @throws UnresolvedLinkException */ Map getINodesUnderConstruction() { Map inodes = new TreeMap(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LogsPurgeable.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LogsPurgeable.java index f997fa0b35e..71e3a3563fd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LogsPurgeable.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LogsPurgeable.java @@ -42,7 +42,6 @@ interface LogsPurgeable { * * @param fromTxId the first transaction id we want to read * @param inProgressOk whether or not in-progress streams should be returned - * @return a list of streams * @throws IOException if the underlying storage has an error or is otherwise * inaccessible */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java index 313d5c8db43..09d1ce3df78 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java @@ -425,8 +425,7 @@ public class NNStorage extends Storage implements Closeable, /** * Write last checkpoint time into a separate file. - * - * @param sd + * @param sd storage directory * @throws IOException */ void writeTransactionIdFile(StorageDirectory sd, long txid) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 441eedef5ae..5984e5b6d90 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -356,8 +356,6 @@ public class NameNode implements NameNodeStatusMXBean { /** - * TODO:FEDERATION - * @param filesystemURI * @return address of file system */ public static InetSocketAddress getAddress(URI filesystemURI) { @@ -800,8 +798,8 @@ public class NameNode implements NameNodeStatusMXBean { * Interactively confirm that formatting is desired * for each existing directory and format them. * - * @param conf - * @param force + * @param conf configuration to use + * @param force if true, format regardless of whether dirs exist * @return true if formatting was aborted, false otherwise * @throws IOException */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index eca2f00ba53..566a0573ad3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -1180,9 +1180,8 @@ class NameNodeRpcServer implements NamenodeProtocols { /** * Verify version. - * - * @param version - * @throws IOException + * @param version layout version + * @throws IOException on layout version mismatch */ void verifyLayoutVersion(int version) throws IOException { if (version != HdfsConstants.NAMENODE_LAYOUT_VERSION) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java index c3605cc23fd..b73a46d6744 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java @@ -159,7 +159,6 @@ public class NamenodeFsck { * @param totalDatanodes number of live datanodes * @param minReplication minimum replication * @param remoteAddress source address of the fsck request - * @throws IOException */ NamenodeFsck(Configuration conf, NameNode namenode, NetworkTopology networktopology, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java index 1245be5d3a0..ecffd508182 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java @@ -209,7 +209,6 @@ public class SecondaryNameNode implements Runnable { /** * Initialize SecondaryNameNode. - * @param commandLineOpts */ private void initialize(final Configuration conf, CommandLineOpts commandLineOpts) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/HAState.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/HAState.java index 20ea854b461..34a5da272e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/HAState.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/HAState.java @@ -130,8 +130,8 @@ abstract public class HAState { * Check if an operation is supported in a given state. * @param context HA context * @param op Type of the operation. - * @throws UnsupportedActionException if a given type of operation is not - * supported in this state. + * @throws StandbyException if a given type of operation is not + * supported in standby state */ public abstract void checkOperation(final HAContext context, final OperationCategory op) throws StandbyException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiff.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiff.java index 5df84fc6a35..9952317e802 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiff.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiff.java @@ -86,7 +86,6 @@ abstract class AbstractINodeDiff - * @return String + * @return parse {@code aclEntry} and return aclspec */ private static String parseAclSpec(List aclEntry) { return StringUtils.join(aclEntry, ","); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BenchmarkThroughput.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BenchmarkThroughput.java index b3dd55a9afd..96e1f29e07b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BenchmarkThroughput.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BenchmarkThroughput.java @@ -229,7 +229,7 @@ public class BenchmarkThroughput extends Configured implements Tool { } /** - * @param args + * @param args arguments */ public static void main(String[] args) throws Exception { int res = ToolRunner.run(new HdfsConfiguration(), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index 6c2a5a6d831..19f7cbd033b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -1384,8 +1384,8 @@ public class MiniDFSCluster { /** * Finalize cluster for the namenode at the given index * @see MiniDFSCluster#finalizeCluster(Configuration) - * @param nnIndex - * @param conf + * @param nnIndex index of the namenode + * @param conf configuration * @throws Exception */ public void finalizeCluster(int nnIndex, Configuration conf) throws Exception { @@ -2216,7 +2216,7 @@ public class MiniDFSCluster { * to determine the location of the storage of a DN instance in the mini cluster * @param dnIndex datanode index * @param dirIndex directory index. - * @return + * @return storage directory path */ private static String getStorageDirPath(int dnIndex, int dirIndex) { return "data/data" + (2 * dnIndex + 1 + dirIndex); @@ -2242,8 +2242,8 @@ public class MiniDFSCluster { } /** * Get directory relative to block pool directory in the datanode - * @param storageDir - * @return current directory + * @param storageDir storage directory + * @return current directory in the given storage directory */ public static String getBPDir(File storageDir, String bpid, String dirName) { return getBPDir(storageDir, bpid) + dirName + "/"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java index fecc7be992b..0336fb4ede6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java @@ -101,7 +101,6 @@ public class BlockManagerTestUtil { } /** - * @param blockManager * @return replication monitor thread instance from block manager. */ public static Daemon getReplicationThread(final BlockManager blockManager) @@ -111,7 +110,6 @@ public class BlockManagerTestUtil { /** * Stop the replication monitor thread - * @param blockManager */ public static void stopReplicationThread(final BlockManager blockManager) throws IOException { @@ -126,7 +124,6 @@ public class BlockManagerTestUtil { } /** - * @param blockManager * @return corruptReplicas from block manager */ public static CorruptReplicasMap getCorruptReplicas(final BlockManager blockManager){ @@ -135,7 +132,6 @@ public class BlockManagerTestUtil { } /** - * @param blockManager * @return computed block replication and block invalidation work that can be * scheduled on data-nodes. * @throws IOException @@ -158,7 +154,7 @@ public class BlockManagerTestUtil { * regardless of invalidation/replication limit configurations. * * NB: you may want to set - * {@link DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY} to + * {@link DFSConfigKeys#DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY} to * a high value to ensure that all work is calculated. */ public static int computeAllPendingWork(BlockManager bm) { @@ -200,7 +196,7 @@ public class BlockManagerTestUtil { /** * Change whether the block placement policy will prefer the writer's * local Datanode or not. - * @param prefer + * @param prefer if true, prefer local node */ public static void setWritingPrefersLocalNode( BlockManager bm, boolean prefer) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java index b18ff47e39b..473e9c0f191 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java @@ -171,9 +171,6 @@ public abstract class BlockReportTestBase { * Utility routine to send block reports to the NN, either in a single call * or reporting one storage per call. * - * @param dnR - * @param poolId - * @param reports * @throws IOException */ protected abstract void sendBlockReports(DatanodeRegistration dnR, String poolId, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java index 8f98574a05a..d6ca7981438 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java @@ -833,8 +833,8 @@ public class SimulatedFSDataset implements FsDatasetSpi { /** * An input stream of size l with repeated bytes - * @param l - * @param iRepeatedData + * @param l size of the stream + * @param iRepeatedData byte that is repeated in the stream */ SimulatedInputStream(long l, byte iRepeatedData) { length = l; @@ -843,17 +843,14 @@ public class SimulatedFSDataset implements FsDatasetSpi { /** * An input stream of of the supplied data - * - * @param iData + * @param iData data to construct the stream */ SimulatedInputStream(byte[] iData) { data = iData; length = data.length; - } /** - * * @return the lenght of the input stream */ long getLength() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java index 3ca924554d6..2bd75f8dd7a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java @@ -131,14 +131,10 @@ public class CreateEditsLog { printUsageExit(); } /** - * @param args + * @param args arguments * @throws IOException */ - public static void main(String[] args) - throws IOException { - - - + public static void main(String[] args) throws IOException { long startingBlockId = 1; int numFiles = 0; short replication = 1; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java index d63439be797..7279aff9387 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java @@ -202,7 +202,7 @@ public class NNThroughputBenchmark implements Tool { * {@link #executeOp(int, int, String)}, which can have different meanings * depending on the operation performed. * - * @param daemonId + * @param daemonId id of the daemon calling this method * @return the argument */ abstract String getExecutionArgument(int daemonId); @@ -322,11 +322,10 @@ public class NNThroughputBenchmark implements Tool { /** * Parse first 2 arguments, corresponding to the "-op" option. * - * @param args + * @param args argument list * @return true if operation is all, which means that options not related * to this operation should be ignored, or false otherwise, meaning * that usage should be printed when an unrelated option is encountered. - * @throws IOException */ protected boolean verifyOpArgument(List args) { if(args.size() < 2 || ! args.get(0).startsWith("-op"))