HDFS-6274. Cleanup javadoc warnings in HDFS code. Contributed by Suresh Srinivas.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1589506 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Suresh Srinivas 2014-04-23 20:13:32 +00:00
parent 6eba48cbde
commit 876fd8ab79
81 changed files with 153 additions and 266 deletions

View File

@ -372,6 +372,8 @@ Release 2.5.0 - UNRELEASED
HDFS-6213. TestDataNodeConfig failing on Jenkins runs due to DN web port HDFS-6213. TestDataNodeConfig failing on Jenkins runs due to DN web port
in use. (wang) in use. (wang)
HDFS-6274. Cleanup javadoc warnings in HDFS code. (suresh)
Release 2.4.1 - UNRELEASED Release 2.4.1 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -67,9 +67,8 @@ public class Hdfs extends AbstractFileSystem {
* This constructor has the signature needed by * This constructor has the signature needed by
* {@link AbstractFileSystem#createFileSystem(URI, Configuration)} * {@link AbstractFileSystem#createFileSystem(URI, Configuration)}
* *
* @param theUri * @param theUri which must be that of Hdfs
* which must be that of Hdfs * @param conf configuration
* @param conf
* @throws IOException * @throws IOException
*/ */
Hdfs(final URI theUri, final Configuration conf) throws IOException, URISyntaxException { Hdfs(final URI theUri, final Configuration conf) throws IOException, URISyntaxException {

View File

@ -182,7 +182,7 @@ class BlockReaderLocal implements BlockReader {
/** /**
* Maximum amount of readahead we'll do. This will always be at least the, * Maximum amount of readahead we'll do. This will always be at least the,
* size of a single chunk, even if {@link zeroReadaheadRequested} is true. * size of a single chunk, even if {@link #zeroReadaheadRequested} is true.
* The reason is because we need to do a certain amount of buffering in order * The reason is because we need to do a certain amount of buffering in order
* to do checksumming. * to do checksumming.
* *

View File

@ -191,7 +191,8 @@ class BlockStorageLocationUtil {
/** /**
* Group the per-replica {@link VolumeId} info returned from * Group the per-replica {@link VolumeId} info returned from
* {@link DFSClient#queryDatanodesForHdfsBlocksMetadata(Map)} to be associated * {@link DFSClient#queryDatanodesForHdfsBlocksMetadata(Map)} to be
* associated
* with the corresponding {@link LocatedBlock}. * with the corresponding {@link LocatedBlock}.
* *
* @param blocks * @param blocks

View File

@ -1606,7 +1606,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory {
/** /**
* Set replication for an existing file. * Set replication for an existing file.
* @param src file name * @param src file name
* @param replication * @param replication replication to set the file to
* *
* @see ClientProtocol#setReplication(String, short) * @see ClientProtocol#setReplication(String, short)
*/ */
@ -2116,7 +2116,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory {
/** /**
* Set permissions to a file or directory. * Set permissions to a file or directory.
* @param src path name. * @param src path name.
* @param permission * @param permission permission to set to
* *
* @see ClientProtocol#setPermission(String, FsPermission) * @see ClientProtocol#setPermission(String, FsPermission)
*/ */

View File

@ -389,7 +389,7 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
* Get block at the specified position. * Get block at the specified position.
* Fetch it from the namenode if not cached. * Fetch it from the namenode if not cached.
* *
* @param offset * @param offset block corresponding to this offset in file is returned
* @param updatePosition whether to update current position * @param updatePosition whether to update current position
* @return located block * @return located block
* @throws IOException * @throws IOException
@ -453,14 +453,13 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
* Get blocks in the specified range. * Get blocks in the specified range.
* Fetch them from the namenode if not cached. This function * Fetch them from the namenode if not cached. This function
* will not get a read request beyond the EOF. * will not get a read request beyond the EOF.
* @param offset * @param offset starting offset in file
* @param length * @param length length of data
* @return consequent segment of located blocks * @return consequent segment of located blocks
* @throws IOException * @throws IOException
*/ */
private synchronized List<LocatedBlock> getBlockRange(long offset, private synchronized List<LocatedBlock> getBlockRange(long offset,
long length) long length) throws IOException {
throws IOException {
// getFileLength(): returns total file length // getFileLength(): returns total file length
// locatedBlocks.getFileLength(): returns length of completed blocks // locatedBlocks.getFileLength(): returns length of completed blocks
if (offset >= getFileLength()) { if (offset >= getFileLength()) {
@ -847,7 +846,6 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
/** /**
* Add corrupted block replica into map. * Add corrupted block replica into map.
* @param corruptedBlockMap
*/ */
private void addIntoCorruptedBlockMap(ExtendedBlock blk, DatanodeInfo node, private void addIntoCorruptedBlockMap(ExtendedBlock blk, DatanodeInfo node,
Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap) { Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap) {
@ -1091,14 +1089,6 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
* int, Map)} except we start up a second, parallel, 'hedged' read * int, Map)} except we start up a second, parallel, 'hedged' read
* if the first read is taking longer than configured amount of * if the first read is taking longer than configured amount of
* time. We then wait on which ever read returns first. * time. We then wait on which ever read returns first.
*
* @param block
* @param start
* @param end
* @param buf
* @param offset
* @param corruptedBlockMap
* @throws IOException
*/ */
private void hedgedFetchBlockByteRange(LocatedBlock block, long start, private void hedgedFetchBlockByteRange(LocatedBlock block, long start,
long end, byte[] buf, int offset, long end, byte[] buf, int offset,

View File

@ -288,9 +288,7 @@ public class DFSUtil {
* <p> * <p>
* Note that some components are only reserved under certain directories, e.g. * Note that some components are only reserved under certain directories, e.g.
* "/.reserved" is reserved, while "/hadoop/.reserved" is not. * "/.reserved" is reserved, while "/hadoop/.reserved" is not.
* * @return true, if the component is reserved
* @param component
* @return if the component is reserved
*/ */
public static boolean isReservedPathComponent(String component) { public static boolean isReservedPathComponent(String component) {
for (String reserved : HdfsConstants.RESERVED_PATH_COMPONENTS) { for (String reserved : HdfsConstants.RESERVED_PATH_COMPONENTS) {
@ -1015,8 +1013,8 @@ public class DFSUtil {
/** /**
* return server http or https address from the configuration for a * return server http or https address from the configuration for a
* given namenode rpc address. * given namenode rpc address.
* @param conf
* @param namenodeAddr - namenode RPC address * @param namenodeAddr - namenode RPC address
* @param conf configuration
* @param scheme - the scheme (http / https) * @param scheme - the scheme (http / https)
* @return server http or https address * @return server http or https address
* @throws IOException * @throws IOException
@ -1327,7 +1325,7 @@ public class DFSUtil {
/** /**
* For given set of {@code keys} adds nameservice Id and or namenode Id * For given set of {@code keys} adds nameservice Id and or namenode Id
* and returns {nameserviceId, namenodeId} when address match is found. * and returns {nameserviceId, namenodeId} when address match is found.
* @see #getSuffixIDs(Configuration, String, AddressMatcher) * @see #getSuffixIDs(Configuration, String, String, String, AddressMatcher)
*/ */
static String[] getSuffixIDs(final Configuration conf, static String[] getSuffixIDs(final Configuration conf,
final InetSocketAddress address, final String... keys) { final InetSocketAddress address, final String... keys) {
@ -1499,9 +1497,8 @@ public class DFSUtil {
/** /**
* Get SPNEGO keytab Key from configuration * Get SPNEGO keytab Key from configuration
* *
* @param conf * @param conf Configuration
* Configuration * @param defaultKey default key to be used for config lookup
* @param defaultKey
* @return DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY if the key is not empty * @return DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY if the key is not empty
* else return defaultKey * else return defaultKey
*/ */

View File

@ -1429,7 +1429,7 @@ public class DistributedFileSystem extends FileSystem {
* Get the difference between two snapshots, or between a snapshot and the * Get the difference between two snapshots, or between a snapshot and the
* current tree of a directory. * current tree of a directory.
* *
* @see DFSClient#getSnapshotDiffReport(Path, String, String) * @see DFSClient#getSnapshotDiffReport(String, String, String)
*/ */
public SnapshotDiffReport getSnapshotDiffReport(final Path snapshotDir, public SnapshotDiffReport getSnapshotDiffReport(final Path snapshotDir,
final String fromSnapshot, final String toSnapshot) throws IOException { final String fromSnapshot, final String toSnapshot) throws IOException {

View File

@ -110,8 +110,9 @@ public interface ClientDatanodeProtocol {
/** /**
* Retrieves volume location information about a list of blocks on a datanode. * Retrieves volume location information about a list of blocks on a datanode.
* This is in the form of an opaque {@link VolumeId} for each configured * This is in the form of an opaque {@link org.apache.hadoop.fs.VolumeId}
* data directory, which is not guaranteed to be the same across DN restarts. * for each configured data directory, which is not guaranteed to be
* the same across DN restarts.
* *
* @param blockPoolId the pool to query * @param blockPoolId the pool to query
* @param blockIds * @param blockIds

View File

@ -268,7 +268,7 @@ public interface ClientProtocol {
/** /**
* Set Owner of a path (i.e. a file or a directory). * Set Owner of a path (i.e. a file or a directory).
* The parameters username and groupname cannot both be null. * The parameters username and groupname cannot both be null.
* @param src * @param src file path
* @param username If it is null, the original username remains unchanged. * @param username If it is null, the original username remains unchanged.
* @param groupname If it is null, the original groupname remains unchanged. * @param groupname If it is null, the original groupname remains unchanged.
* *
@ -1126,7 +1126,6 @@ public interface ClientProtocol {
/** /**
* Modify a CacheDirective in the CacheManager. * Modify a CacheDirective in the CacheManager.
* *
* @return directive The directive to modify. Must contain a directive ID.
* @param flags {@link CacheFlag}s to use for this operation. * @param flags {@link CacheFlag}s to use for this operation.
* @throws IOException if the directive could not be modified * @throws IOException if the directive could not be modified
*/ */

View File

@ -136,7 +136,7 @@ public interface DataTransferProtocol {
/** /**
* Request a short circuit shared memory area from a DataNode. * Request a short circuit shared memory area from a DataNode.
* *
* @pram clientName The name of the client. * @param clientName The name of the client.
*/ */
public void requestShortCircuitShm(String clientName) throws IOException; public void requestShortCircuitShm(String clientName) throws IOException;

View File

@ -101,9 +101,9 @@ public class BlockTokenSecretManager extends
* *
* @param keyUpdateInterval how often a new key will be generated * @param keyUpdateInterval how often a new key will be generated
* @param tokenLifetime how long an individual token is valid * @param tokenLifetime how long an individual token is valid
* @param isHaEnabled whether or not HA is enabled * @param nnIndex namenode index
* @param thisNnId the NN ID of this NN in an HA setup * @param blockPoolId block pool ID
* @param otherNnId the NN ID of the other NN in an HA setup * @param encryptionAlgorithm encryption algorithm to use
*/ */
public BlockTokenSecretManager(long keyUpdateInterval, public BlockTokenSecretManager(long keyUpdateInterval,
long tokenLifetime, int nnIndex, String blockPoolId, long tokenLifetime, int nnIndex, String blockPoolId,
@ -412,7 +412,6 @@ public class BlockTokenSecretManager extends
* @param keyId identifier of the secret key used to generate the encryption key. * @param keyId identifier of the secret key used to generate the encryption key.
* @param nonce random value used to create the encryption key * @param nonce random value used to create the encryption key
* @return the encryption key which corresponds to this (keyId, blockPoolId, nonce) * @return the encryption key which corresponds to this (keyId, blockPoolId, nonce)
* @throws InvalidToken
* @throws InvalidEncryptionKeyException * @throws InvalidEncryptionKeyException
*/ */
public byte[] retrieveDataEncryptionKey(int keyId, byte[] nonce) public byte[] retrieveDataEncryptionKey(int keyId, byte[] nonce)

View File

@ -299,7 +299,7 @@ public class DelegationTokenSecretManager
* Update the token cache with renewal record in edit logs. * Update the token cache with renewal record in edit logs.
* *
* @param identifier DelegationTokenIdentifier of the renewed token * @param identifier DelegationTokenIdentifier of the renewed token
* @param expiryTime * @param expiryTime expirty time in milliseconds
* @throws IOException * @throws IOException
*/ */
public synchronized void updatePersistedTokenRenewal( public synchronized void updatePersistedTokenRenewal(
@ -429,8 +429,7 @@ public class DelegationTokenSecretManager
/** /**
* Private helper method to load delegation keys from fsimage. * Private helper method to load delegation keys from fsimage.
* @param in * @throws IOException on error
* @throws IOException
*/ */
private synchronized void loadAllKeys(DataInput in) throws IOException { private synchronized void loadAllKeys(DataInput in) throws IOException {
StartupProgress prog = NameNode.getStartupProgress(); StartupProgress prog = NameNode.getStartupProgress();

View File

@ -239,7 +239,6 @@ public class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
/** /**
* Find specified DatanodeDescriptor. * Find specified DatanodeDescriptor.
* @param dn
* @return index or -1 if not found. * @return index or -1 if not found.
*/ */
int findDatanode(DatanodeDescriptor dn) { int findDatanode(DatanodeDescriptor dn) {
@ -255,7 +254,6 @@ public class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
} }
/** /**
* Find specified DatanodeStorageInfo. * Find specified DatanodeStorageInfo.
* @param dn
* @return index or -1 if not found. * @return index or -1 if not found.
*/ */
int findStorageInfo(DatanodeInfo dn) { int findStorageInfo(DatanodeInfo dn) {
@ -272,7 +270,6 @@ public class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
/** /**
* Find specified DatanodeStorageInfo. * Find specified DatanodeStorageInfo.
* @param storageInfo
* @return index or -1 if not found. * @return index or -1 if not found.
*/ */
int findStorageInfo(DatanodeStorageInfo storageInfo) { int findStorageInfo(DatanodeStorageInfo storageInfo) {

View File

@ -549,7 +549,6 @@ public class BlockManager {
} }
/** /**
* @param block
* @return true if the block has minimum replicas * @return true if the block has minimum replicas
*/ */
public boolean checkMinReplication(Block block) { public boolean checkMinReplication(Block block) {
@ -3382,7 +3381,6 @@ public class BlockManager {
* heartbeat. * heartbeat.
* *
* @return number of blocks scheduled for replication or removal. * @return number of blocks scheduled for replication or removal.
* @throws IOException
*/ */
int computeDatanodeWork() { int computeDatanodeWork() {
// Blocks should not be replicated or removed if in safe mode. // Blocks should not be replicated or removed if in safe mode.

View File

@ -61,7 +61,7 @@ public abstract class BlockPlacementPolicy {
* @param srcPath the file to which this chooseTargets is being invoked. * @param srcPath the file to which this chooseTargets is being invoked.
* @param numOfReplicas additional number of replicas wanted. * @param numOfReplicas additional number of replicas wanted.
* @param writer the writer's machine, null if not in the cluster. * @param writer the writer's machine, null if not in the cluster.
* @param chosenNodes datanodes that have been chosen as targets. * @param chosen datanodes that have been chosen as targets.
* @param returnChosenNodes decide if the chosenNodes are returned. * @param returnChosenNodes decide if the chosenNodes are returned.
* @param excludedNodes datanodes that should not be considered as targets. * @param excludedNodes datanodes that should not be considered as targets.
* @param blocksize size of the data to be written. * @param blocksize size of the data to be written.
@ -78,8 +78,8 @@ public abstract class BlockPlacementPolicy {
StorageType storageType); StorageType storageType);
/** /**
* Same as {@link #chooseTarget(String, int, Node, List, boolean, * Same as {@link #chooseTarget(String, int, Node, Set, long, List, StorageType)}
* Set, long)} with added parameter {@code favoredDatanodes} * with added parameter {@code favoredDatanodes}
* @param favoredNodes datanodes that should be favored as targets. This * @param favoredNodes datanodes that should be favored as targets. This
* is only a hint and due to cluster state, namenode may not be * is only a hint and due to cluster state, namenode may not be
* able to place the blocks on these datanodes. * able to place the blocks on these datanodes.
@ -143,7 +143,8 @@ public abstract class BlockPlacementPolicy {
/** /**
* Get an instance of the configured Block Placement Policy based on the * Get an instance of the configured Block Placement Policy based on the
* the configuration property {@link DFS_BLOCK_REPLICATOR_CLASSNAME_KEY}. * the configuration property
* {@link DFSConfigKeys#DFS_BLOCK_REPLICATOR_CLASSNAME_KEY}.
* *
* @param conf the configuration to be used * @param conf the configuration to be used
* @param stats an object that is used to retrieve the load on the cluster * @param stats an object that is used to retrieve the load on the cluster
@ -195,7 +196,6 @@ public abstract class BlockPlacementPolicy {
/** /**
* Get rack string from a data node * Get rack string from a data node
* @param datanode
* @return rack of data node * @return rack of data node
*/ */
protected String getRack(final DatanodeInfo datanode) { protected String getRack(final DatanodeInfo datanode) {
@ -206,7 +206,7 @@ public abstract class BlockPlacementPolicy {
* Split data nodes into two sets, one set includes nodes on rack with * Split data nodes into two sets, one set includes nodes on rack with
* more than one replica, the other set contains the remaining nodes. * more than one replica, the other set contains the remaining nodes.
* *
* @param dataNodes * @param dataNodes datanodes to be split into two sets
* @param rackMap a map from rack to datanodes * @param rackMap a map from rack to datanodes
* @param moreThanOne contains nodes on rack with more than one replica * @param moreThanOne contains nodes on rack with more than one replica
* @param exactlyOne remains contains the remaining nodes * @param exactlyOne remains contains the remaining nodes

View File

@ -435,9 +435,9 @@ public class DatanodeManager {
} }
/** /**
* Get data node by storage ID. * Get data node by datanode ID.
* *
* @param nodeID * @param nodeID datanode ID
* @return DatanodeDescriptor or null if the node is not found. * @return DatanodeDescriptor or null if the node is not found.
* @throws UnregisteredNodeException * @throws UnregisteredNodeException
*/ */

View File

@ -831,10 +831,10 @@ public abstract class Storage extends StorageInfo {
} }
/** /**
* Checks if the upgrade from the given old version is supported. If * Checks if the upgrade from {@code oldVersion} is supported.
* no upgrade is supported, it throws IncorrectVersionException. * @param oldVersion the version of the metadata to check with the current
* * version
* @param oldVersion * @throws IOException if upgrade is not supported
*/ */
public static void checkVersionUpgradable(int oldVersion) public static void checkVersionUpgradable(int oldVersion)
throws IOException { throws IOException {

View File

@ -148,8 +148,8 @@ public class StorageInfo {
* Get common storage fields. * Get common storage fields.
* Should be overloaded if additional fields need to be get. * Should be overloaded if additional fields need to be get.
* *
* @param props * @param props properties
* @throws IOException * @throws IOException on error
*/ */
protected void setFieldsFromProperties( protected void setFieldsFromProperties(
Properties props, StorageDirectory sd) throws IOException { Properties props, StorageDirectory sd) throws IOException {

View File

@ -314,9 +314,7 @@ class BPServiceActor implements Runnable {
} }
/** /**
* Retrieve the incremental BR state for a given storage UUID * @return pending incremental block report for given {@code storage}
* @param storageUuid
* @return
*/ */
private PerStoragePendingIncrementalBR getIncrementalBRMapForStorage( private PerStoragePendingIncrementalBR getIncrementalBRMapForStorage(
DatanodeStorage storage) { DatanodeStorage storage) {
@ -339,8 +337,6 @@ class BPServiceActor implements Runnable {
* exists for the same block it is removed. * exists for the same block it is removed.
* *
* Caller must synchronize access using pendingIncrementalBRperStorage. * Caller must synchronize access using pendingIncrementalBRperStorage.
* @param bInfo
* @param storageUuid
*/ */
void addPendingReplicationBlockInfo(ReceivedDeletedBlockInfo bInfo, void addPendingReplicationBlockInfo(ReceivedDeletedBlockInfo bInfo,
DatanodeStorage storage) { DatanodeStorage storage) {

View File

@ -98,7 +98,6 @@ public class BlockMetadataHeader {
/** /**
* This reads all the fields till the beginning of checksum. * This reads all the fields till the beginning of checksum.
* @param in
* @return Metadata Header * @return Metadata Header
* @throws IOException * @throws IOException
*/ */
@ -109,9 +108,7 @@ public class BlockMetadataHeader {
/** /**
* Reads header at the top of metadata file and returns the header. * Reads header at the top of metadata file and returns the header.
* *
* @param dataset * @return metadata header for the block
* @param block
* @return
* @throws IOException * @throws IOException
*/ */
public static BlockMetadataHeader readHeader(File file) throws IOException { public static BlockMetadataHeader readHeader(File file) throws IOException {
@ -147,8 +144,6 @@ public class BlockMetadataHeader {
/** /**
* This writes all the fields till the beginning of checksum. * This writes all the fields till the beginning of checksum.
* @param out DataOutputStream * @param out DataOutputStream
* @param header
* @return
* @throws IOException * @throws IOException
*/ */
@VisibleForTesting @VisibleForTesting
@ -161,9 +156,7 @@ public class BlockMetadataHeader {
/** /**
* Writes all the fields till the beginning of checksum. * Writes all the fields till the beginning of checksum.
* @param out * @throws IOException on error
* @param checksum
* @throws IOException
*/ */
static void writeHeader(DataOutputStream out, DataChecksum checksum) static void writeHeader(DataOutputStream out, DataChecksum checksum)
throws IOException { throws IOException {

View File

@ -956,9 +956,9 @@ class BlockReceiver implements Closeable {
/** /**
* enqueue the seqno that is still be to acked by the downstream datanode. * enqueue the seqno that is still be to acked by the downstream datanode.
* @param seqno * @param seqno sequence number of the packet
* @param lastPacketInBlock * @param lastPacketInBlock if true, this is the last packet in block
* @param offsetInBlock * @param offsetInBlock offset of this packet in block
*/ */
void enqueue(final long seqno, final boolean lastPacketInBlock, void enqueue(final long seqno, final boolean lastPacketInBlock,
final long offsetInBlock, final Status ackStatus) { final long offsetInBlock, final Status ackStatus) {

View File

@ -168,7 +168,7 @@ class BlockSender implements java.io.Closeable {
* @param block Block that is being read * @param block Block that is being read
* @param startOffset starting offset to read from * @param startOffset starting offset to read from
* @param length length of data to read * @param length length of data to read
* @param corruptChecksumOk * @param corruptChecksumOk if true, corrupt checksum is okay
* @param verifyChecksum verify checksum while reading the data * @param verifyChecksum verify checksum while reading the data
* @param sendChecksum send checksum to client. * @param sendChecksum send checksum to client.
* @param datanode datanode from which the block is being read * @param datanode datanode from which the block is being read

View File

@ -646,7 +646,6 @@ public class DataNode extends Configured
/** /**
* Return the BPOfferService instance corresponding to the given block. * Return the BPOfferService instance corresponding to the given block.
* @param block
* @return the BPOS * @return the BPOS
* @throws IOException if no such BPOS can be found * @throws IOException if no such BPOS can be found
*/ */
@ -811,9 +810,7 @@ public class DataNode extends Configured
/** /**
* After the block pool has contacted the NN, registers that block pool * After the block pool has contacted the NN, registers that block pool
* with the secret manager, updating it with the secrets provided by the NN. * with the secret manager, updating it with the secrets provided by the NN.
* @param bpRegistration * @throws IOException on error
* @param blockPoolId
* @throws IOException
*/ */
private synchronized void registerBlockPoolWithSecretManager( private synchronized void registerBlockPoolWithSecretManager(
DatanodeRegistration bpRegistration, String blockPoolId) throws IOException { DatanodeRegistration bpRegistration, String blockPoolId) throws IOException {
@ -981,9 +978,8 @@ public class DataNode extends Configured
/** /**
* get BP registration by blockPool id * get BP registration by blockPool id
* @param bpid
* @return BP registration object * @return BP registration object
* @throws IOException * @throws IOException on error
*/ */
@VisibleForTesting @VisibleForTesting
public DatanodeRegistration getDNRegistrationForBP(String bpid) public DatanodeRegistration getDNRegistrationForBP(String bpid)
@ -1687,8 +1683,9 @@ public class DataNode extends Configured
/** /**
* After a block becomes finalized, a datanode increases metric counter, * After a block becomes finalized, a datanode increases metric counter,
* notifies namenode, and adds it to the block scanner * notifies namenode, and adds it to the block scanner
* @param block * @param block block to close
* @param delHint * @param delHint hint on which excess block to delete
* @param storageUuid UUID of the storage where block is stored
*/ */
void closeBlock(ExtendedBlock block, String delHint, String storageUuid) { void closeBlock(ExtendedBlock block, String delHint, String storageUuid) {
metrics.incrBlocksWritten(); metrics.incrBlocksWritten();
@ -2318,8 +2315,8 @@ public class DataNode extends Configured
* The corresponding replica must be an RBW or a Finalized. * The corresponding replica must be an RBW or a Finalized.
* Its GS and numBytes will be set to * Its GS and numBytes will be set to
* the stored GS and the visible length. * the stored GS and the visible length.
* @param targets * @param targets targets to transfer the block to
* @param client * @param client client name
*/ */
void transferReplicaForPipelineRecovery(final ExtendedBlock b, void transferReplicaForPipelineRecovery(final ExtendedBlock b,
final DatanodeInfo[] targets, final String client) throws IOException { final DatanodeInfo[] targets, final String client) throws IOException {

View File

@ -1077,7 +1077,7 @@ class DataXceiver extends Receiver implements Runnable {
/** /**
* Utility function for sending a response. * Utility function for sending a response.
* *
* @param opStatus status message to write * @param status status message to write
* @param message message to send to the client or other DN * @param message message to send to the client or other DN
*/ */
private void sendResponse(Status status, private void sendResponse(Status status,

View File

@ -108,8 +108,7 @@ public class DirectoryScanner implements Runnable {
ScanInfoPerBlockPool(int sz) {super(sz);} ScanInfoPerBlockPool(int sz) {super(sz);}
/** /**
* Merges "that" ScanInfoPerBlockPool into this one * Merges {@code that} ScanInfoPerBlockPool into this one
* @param that
*/ */
public void addAll(ScanInfoPerBlockPool that) { public void addAll(ScanInfoPerBlockPool that) {
if (that == null) return; if (that == null) return;

View File

@ -54,7 +54,7 @@ public class FinalizedReplica extends ReplicaInfo {
/** /**
* Copy constructor. * Copy constructor.
* @param from * @param from where to copy construct from
*/ */
public FinalizedReplica(FinalizedReplica from) { public FinalizedReplica(FinalizedReplica from) {
super(from); super(from);

View File

@ -68,7 +68,7 @@ public class ReplicaBeingWritten extends ReplicaInPipeline {
/** /**
* Copy constructor. * Copy constructor.
* @param from * @param from where to copy from
*/ */
public ReplicaBeingWritten(ReplicaBeingWritten from) { public ReplicaBeingWritten(ReplicaBeingWritten from) {
super(from); super(from);

View File

@ -89,7 +89,7 @@ public class ReplicaInPipeline extends ReplicaInfo
/** /**
* Copy constructor. * Copy constructor.
* @param from * @param from where to copy from
*/ */
public ReplicaInPipeline(ReplicaInPipeline from) { public ReplicaInPipeline(ReplicaInPipeline from) {
super(from); super(from);

View File

@ -40,7 +40,7 @@ public interface ReplicaInPipelineInterface extends Replica {
/** /**
* Set the number bytes that have acked * Set the number bytes that have acked
* @param bytesAcked * @param bytesAcked number bytes acked
*/ */
void setBytesAcked(long bytesAcked); void setBytesAcked(long bytesAcked);

View File

@ -100,7 +100,7 @@ abstract public class ReplicaInfo extends Block implements Replica {
/** /**
* Copy constructor. * Copy constructor.
* @param from * @param from where to copy from
*/ */
ReplicaInfo(ReplicaInfo from) { ReplicaInfo(ReplicaInfo from) {
this(from, from.getVolume(), from.getDir()); this(from, from.getVolume(), from.getDir());

View File

@ -50,7 +50,7 @@ public class ReplicaUnderRecovery extends ReplicaInfo {
/** /**
* Copy constructor. * Copy constructor.
* @param from * @param from where to copy from
*/ */
public ReplicaUnderRecovery(ReplicaUnderRecovery from) { public ReplicaUnderRecovery(ReplicaUnderRecovery from) {
super(from); super(from);

View File

@ -60,7 +60,7 @@ public class ReplicaWaitingToBeRecovered extends ReplicaInfo {
/** /**
* Copy constructor. * Copy constructor.
* @param from * @param from where to copy from
*/ */
public ReplicaWaitingToBeRecovered(ReplicaWaitingToBeRecovered from) { public ReplicaWaitingToBeRecovered(ReplicaWaitingToBeRecovered from) {
super(from); super(from);

View File

@ -165,13 +165,8 @@ public class AvailableSpaceVolumeChoosingPolicy<V extends FsVolumeSpi>
} }
/** /**
* Check if the available space on all the volumes is roughly equal. * @return true if all volumes' free space is within the
* * configured threshold, false otherwise.
* @param volumes the volumes to check
* @return true if all volumes' free space is within the configured threshold,
* false otherwise.
* @throws IOException
* in the event of error checking amount of available space
*/ */
public boolean areAllVolumesWithinFreeSpaceThreshold() { public boolean areAllVolumesWithinFreeSpaceThreshold() {
long leastAvailable = Long.MAX_VALUE; long leastAvailable = Long.MAX_VALUE;

View File

@ -124,16 +124,14 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
/** /**
* Returns the specified block's on-disk length (excluding metadata) * Returns the specified block's on-disk length (excluding metadata)
* @param b
* @return the specified block's on-disk length (excluding metadta) * @return the specified block's on-disk length (excluding metadta)
* @throws IOException * @throws IOException on error
*/ */
public long getLength(ExtendedBlock b) throws IOException; public long getLength(ExtendedBlock b) throws IOException;
/** /**
* Get reference to the replica meta info in the replicasMap. * Get reference to the replica meta info in the replicasMap.
* To be called from methods that are synchronized on {@link FSDataset} * To be called from methods that are synchronized on {@link FSDataset}
* @param blockId
* @return replica from the replicas map * @return replica from the replicas map
*/ */
@Deprecated @Deprecated
@ -151,8 +149,8 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
/** /**
* Returns an input stream at specified offset of the specified block * Returns an input stream at specified offset of the specified block
* @param b * @param b block
* @param seekOffset * @param seekOffset offset with in the block to seek to
* @return an input stream to read the contents of the specified block, * @return an input stream to read the contents of the specified block,
* starting at the offset * starting at the offset
* @throws IOException * @throws IOException
@ -163,9 +161,6 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
/** /**
* Returns an input stream at specified offset of the specified block * Returns an input stream at specified offset of the specified block
* The block is still in the tmp directory and is not finalized * The block is still in the tmp directory and is not finalized
* @param b
* @param blkoff
* @param ckoff
* @return an input stream to read the contents of the specified block, * @return an input stream to read the contents of the specified block,
* starting at the offset * starting at the offset
* @throws IOException * @throws IOException
@ -256,7 +251,6 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
* Finalizes the block previously opened for writing using writeToBlock. * Finalizes the block previously opened for writing using writeToBlock.
* The block size is what is in the parameter b and it must match the amount * The block size is what is in the parameter b and it must match the amount
* of data written * of data written
* @param b
* @throws IOException * @throws IOException
*/ */
public void finalizeBlock(ExtendedBlock b) throws IOException; public void finalizeBlock(ExtendedBlock b) throws IOException;
@ -264,7 +258,6 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
/** /**
* Unfinalizes the block previously opened for writing using writeToBlock. * Unfinalizes the block previously opened for writing using writeToBlock.
* The temporary file associated with this block is deleted. * The temporary file associated with this block is deleted.
* @param b
* @throws IOException * @throws IOException
*/ */
public void unfinalizeBlock(ExtendedBlock b) throws IOException; public void unfinalizeBlock(ExtendedBlock b) throws IOException;
@ -289,14 +282,12 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
/** /**
* Is the block valid? * Is the block valid?
* @param b
* @return - true if the specified block is valid * @return - true if the specified block is valid
*/ */
public boolean isValidBlock(ExtendedBlock b); public boolean isValidBlock(ExtendedBlock b);
/** /**
* Is the block a valid RBW? * Is the block a valid RBW?
* @param b
* @return - true if the specified block is a valid RBW * @return - true if the specified block is a valid RBW
*/ */
public boolean isValidRbw(ExtendedBlock b); public boolean isValidRbw(ExtendedBlock b);
@ -327,7 +318,7 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
* Determine if the specified block is cached. * Determine if the specified block is cached.
* @param bpid Block pool id * @param bpid Block pool id
* @param blockIds - block id * @param blockIds - block id
* @returns true if the block is cached * @return true if the block is cached
*/ */
public boolean isCached(String bpid, long blockId); public boolean isCached(String bpid, long blockId);

View File

@ -74,7 +74,7 @@ class BlockPoolSlice {
* @param bpid Block pool Id * @param bpid Block pool Id
* @param volume {@link FsVolumeImpl} to which this BlockPool belongs to * @param volume {@link FsVolumeImpl} to which this BlockPool belongs to
* @param bpDir directory corresponding to the BlockPool * @param bpDir directory corresponding to the BlockPool
* @param conf * @param conf configuration
* @throws IOException * @throws IOException
*/ */
BlockPoolSlice(String bpid, FsVolumeImpl volume, File bpDir, BlockPoolSlice(String bpid, FsVolumeImpl volume, File bpDir,

View File

@ -120,10 +120,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
/** /**
* Returns a clone of a replica stored in data-node memory. * This should be primarily used for testing.
* Should be primarily used for testing. * @return clone of replica store in datanode memory
* @param blockId
* @return
*/ */
ReplicaInfo fetchReplicaInfo(String bpid, long blockId) { ReplicaInfo fetchReplicaInfo(String bpid, long blockId) {
ReplicaInfo r = volumeMap.get(bpid, blockId); ReplicaInfo r = volumeMap.get(bpid, blockId);
@ -1581,7 +1579,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
datanode.getDnConf().getXceiverStopTimeout()); datanode.getDnConf().getXceiverStopTimeout());
} }
/** static version of {@link #initReplicaRecovery(Block, long)}. */ /** static version of {@link #initReplicaRecovery(RecoveringBlock)}. */
static ReplicaRecoveryInfo initReplicaRecovery(String bpid, ReplicaMap map, static ReplicaRecoveryInfo initReplicaRecovery(String bpid, ReplicaMap map,
Block block, long recoveryId, long xceiverStopTimeout) throws IOException { Block block, long recoveryId, long xceiverStopTimeout) throws IOException {
final ReplicaInfo replica = map.get(bpid, block.getBlockId()); final ReplicaInfo replica = map.get(bpid, block.getBlockId());

View File

@ -100,7 +100,6 @@ public class MappableBlock implements Closeable {
/** /**
* Verifies the block's checksum. This is an I/O intensive operation. * Verifies the block's checksum. This is an I/O intensive operation.
* @return if the block was successfully checksummed.
*/ */
private static void verifyChecksum(long length, private static void verifyChecksum(long length,
FileInputStream metaIn, FileChannel blockChannel, String blockFileName) FileInputStream metaIn, FileChannel blockChannel, String blockFileName)

View File

@ -355,7 +355,7 @@ public class BackupNode extends NameNode {
/** /**
* Register this backup node with the active name-node. * Register this backup node with the active name-node.
* @param nsInfo * @param nsInfo namespace information
* @throws IOException * @throws IOException
*/ */
private void registerWith(NamespaceInfo nsInfo) throws IOException { private void registerWith(NamespaceInfo nsInfo) throws IOException {

View File

@ -216,7 +216,7 @@ public final class CacheManager {
/** /**
* Resets all tracked directives and pools. Called during 2NN checkpointing to * Resets all tracked directives and pools. Called during 2NN checkpointing to
* reset FSNamesystem state. See {FSNamesystem{@link #clear()}. * reset FSNamesystem state. See {@link FSNamesystem#clear()}.
*/ */
void clear() { void clear() {
directivesById.clear(); directivesById.clear();

View File

@ -125,7 +125,7 @@ public final class CachedBlock implements Element,
* @param type If null, this parameter is ignored. * @param type If null, this parameter is ignored.
* If it is non-null, we match only datanodes which * If it is non-null, we match only datanodes which
* have it on this list. * have it on this list.
* See {@link DatanodeDescriptor#CachedBlocksList#Type} * See {@link DatanodeDescriptor.CachedBlocksList.Type}
* for a description of all the lists. * for a description of all the lists.
* *
* @return The list of datanodes. Modifying this list does not * @return The list of datanodes. Modifying this list does not

View File

@ -388,7 +388,6 @@ class ClusterJspHelper {
* is an inner map whose key is namenode, value is datanode status. * is an inner map whose key is namenode, value is datanode status.
* reported by each namenode. * reported by each namenode.
* @param namenodeHost host name of the namenode * @param namenodeHost host name of the namenode
* @param decomnode update DecommissionNode with alive node status
* @param json JSON string contains datanode status * @param json JSON string contains datanode status
* @throws IOException * @throws IOException
*/ */
@ -426,7 +425,6 @@ class ClusterJspHelper {
* @param statusMap map with key being datanode, value being an * @param statusMap map with key being datanode, value being an
* inner map (key:namenode, value:decommisionning state). * inner map (key:namenode, value:decommisionning state).
* @param host datanode hostname * @param host datanode hostname
* @param decomnode DecommissionNode
* @param json String * @param json String
* @throws IOException * @throws IOException
*/ */
@ -468,7 +466,6 @@ class ClusterJspHelper {
* @param dataNodeStatusMap map with key being datanode, value being an * @param dataNodeStatusMap map with key being datanode, value being an
* inner map (key:namenode, value:decommisionning state). * inner map (key:namenode, value:decommisionning state).
* @param host datanode * @param host datanode
* @param decomnode DecommissionNode
* @param json String * @param json String
*/ */
private static void getDecommissionNodeStatus( private static void getDecommissionNodeStatus(

View File

@ -93,10 +93,6 @@ public class EditLogFileInputStream extends EditLogInputStream {
* @param name filename to open * @param name filename to open
* @param firstTxId first transaction found in file * @param firstTxId first transaction found in file
* @param lastTxId last transaction id found in file * @param lastTxId last transaction id found in file
* @throws LogHeaderCorruptException if the header is either missing or
* appears to be corrupt/truncated
* @throws IOException if an actual IO error occurs while reading the
* header
*/ */
public EditLogFileInputStream(File name, long firstTxId, long lastTxId, public EditLogFileInputStream(File name, long firstTxId, long lastTxId,
boolean isInProgress) { boolean isInProgress) {

View File

@ -127,14 +127,14 @@ public abstract class EditLogOutputStream implements Closeable {
} }
/** /**
* Return total time spent in {@link #flushAndSync()} * Return total time spent in {@link #flushAndSync(boolean)}
*/ */
long getTotalSyncTime() { long getTotalSyncTime() {
return totalTimeSync; return totalTimeSync;
} }
/** /**
* Return number of calls to {@link #flushAndSync()} * Return number of calls to {@link #flushAndSync(boolean)}
*/ */
protected long getNumSync() { protected long getNumSync() {
return numSync; return numSync;

View File

@ -524,7 +524,7 @@ public class FSDirectory implements Closeable {
/** /**
* @throws SnapshotAccessControlException * @throws SnapshotAccessControlException
* @see #unprotectedRenameTo(String, String, long) * @see #unprotectedRenameTo(String, String, long)
* @deprecated Use {@link #renameTo(String, String, Rename...)} instead. * @deprecated Use {@link #renameTo(String, String, boolean, Rename...)}
*/ */
@Deprecated @Deprecated
boolean renameTo(String src, String dst, boolean logRetryCache) boolean renameTo(String src, String dst, boolean logRetryCache)
@ -581,7 +581,7 @@ public class FSDirectory implements Closeable {
* @throws QuotaExceededException if the operation violates any quota limit * @throws QuotaExceededException if the operation violates any quota limit
* @throws FileAlreadyExistsException if the src is a symlink that points to dst * @throws FileAlreadyExistsException if the src is a symlink that points to dst
* @throws SnapshotAccessControlException if path is in RO snapshot * @throws SnapshotAccessControlException if path is in RO snapshot
* @deprecated See {@link #renameTo(String, String)} * @deprecated See {@link #renameTo(String, String, boolean, Rename...)}
*/ */
@Deprecated @Deprecated
boolean unprotectedRenameTo(String src, String dst, long timestamp) boolean unprotectedRenameTo(String src, String dst, long timestamp)
@ -1844,7 +1844,7 @@ public class FSDirectory implements Closeable {
/** /**
* update quota of each inode and check to see if quota is exceeded. * update quota of each inode and check to see if quota is exceeded.
* See {@link #updateCount(INode[], int, long, long, boolean)} * See {@link #updateCount(INodesInPath, long, long, boolean)}
*/ */
private void updateCountNoQuotaCheck(INodesInPath inodesInPath, private void updateCountNoQuotaCheck(INodesInPath inodesInPath,
int numOfINodes, long nsDelta, long dsDelta) { int numOfINodes, long nsDelta, long dsDelta) {
@ -1928,13 +1928,12 @@ public class FSDirectory implements Closeable {
* @param src string representation of the path to the directory * @param src string representation of the path to the directory
* @param permissions the permission of the directory * @param permissions the permission of the directory
* @param isAutocreate if the permission of the directory should inherit * @param inheritPermission if the permission of the directory should inherit
* from its parent or not. u+wx is implicitly added to * from its parent or not. u+wx is implicitly added to
* the automatically created directories, and to the * the automatically created directories, and to the
* given directory if inheritPermission is true * given directory if inheritPermission is true
* @param now creation time * @param now creation time
* @return true if the operation succeeds false otherwise * @return true if the operation succeeds false otherwise
* @throws FileNotFoundException if an ancestor or itself is a file
* @throws QuotaExceededException if directory creation violates * @throws QuotaExceededException if directory creation violates
* any quota limit * any quota limit
* @throws UnresolvedLinkException if a symlink is encountered in src. * @throws UnresolvedLinkException if a symlink is encountered in src.
@ -2064,7 +2063,7 @@ public class FSDirectory implements Closeable {
/** /**
* Add the given child to the namespace. * Add the given child to the namespace.
* @param src The full path name of the child node. * @param src The full path name of the child node.
* @throw QuotaExceededException is thrown if it violates quota limit * @throws QuotaExceededException is thrown if it violates quota limit
*/ */
private boolean addINode(String src, INode child private boolean addINode(String src, INode child
) throws QuotaExceededException, UnresolvedLinkException { ) throws QuotaExceededException, UnresolvedLinkException {
@ -2260,7 +2259,7 @@ public class FSDirectory implements Closeable {
* Its ancestors are stored at [0, pos-1]. * Its ancestors are stored at [0, pos-1].
* @return false if the child with this name already exists; * @return false if the child with this name already exists;
* otherwise return true; * otherwise return true;
* @throw QuotaExceededException is thrown if it violates quota limit * @throws QuotaExceededException is thrown if it violates quota limit
*/ */
private boolean addChild(INodesInPath iip, int pos, private boolean addChild(INodesInPath iip, int pos,
INode child, boolean checkQuota) throws QuotaExceededException { INode child, boolean checkQuota) throws QuotaExceededException {
@ -2446,7 +2445,7 @@ public class FSDirectory implements Closeable {
/** /**
* See {@link ClientProtocol#setQuota(String, long, long)} for the contract. * See {@link ClientProtocol#setQuota(String, long, long)} for the contract.
* Sets quota for for a directory. * Sets quota for for a directory.
* @returns INodeDirectory if any of the quotas have changed. null other wise. * @return INodeDirectory if any of the quotas have changed. null other wise.
* @throws FileNotFoundException if the path does not exist. * @throws FileNotFoundException if the path does not exist.
* @throws PathIsNotDirectoryException if the path is not a directory. * @throws PathIsNotDirectoryException if the path is not a directory.
* @throws QuotaExceededException if the directory tree size is * @throws QuotaExceededException if the directory tree size is

View File

@ -424,7 +424,6 @@ public class FSEditLog implements LogsPurgeable {
/** /**
* Wait if an automatic sync is scheduled * Wait if an automatic sync is scheduled
* @throws InterruptedException
*/ */
synchronized void waitIfAutoSyncScheduled() { synchronized void waitIfAutoSyncScheduled() {
try { try {
@ -802,7 +801,8 @@ public class FSEditLog implements LogsPurgeable {
/** Add set namespace quota record to edit log /** Add set namespace quota record to edit log
* *
* @param src the string representation of the path to a directory * @param src the string representation of the path to a directory
* @param quota the directory size limit * @param nsQuota namespace quota
* @param dsQuota diskspace quota
*/ */
void logSetQuota(String src, long nsQuota, long dsQuota) { void logSetQuota(String src, long nsQuota, long dsQuota) {
SetQuotaOp op = SetQuotaOp.getInstance(cache.get()) SetQuotaOp op = SetQuotaOp.getInstance(cache.get())
@ -1452,8 +1452,9 @@ public class FSEditLog implements LogsPurgeable {
* Select a list of input streams. * Select a list of input streams.
* *
* @param fromTxId first transaction in the selected streams * @param fromTxId first transaction in the selected streams
* @param toAtLeast the selected streams must contain this transaction * @param toAtLeastTxId the selected streams must contain this transaction
* @param inProgessOk set to true if in-progress streams are OK * @param recovery recovery context
* @param inProgressOk set to true if in-progress streams are OK
*/ */
public synchronized Collection<EditLogInputStream> selectInputStreams( public synchronized Collection<EditLogInputStream> selectInputStreams(
long fromTxId, long toAtLeastTxId, MetaRecoveryContext recovery, long fromTxId, long toAtLeastTxId, MetaRecoveryContext recovery,

View File

@ -992,9 +992,6 @@ public class FSEditLogLoader {
* If there are invalid or corrupt transactions in the middle of the stream, * If there are invalid or corrupt transactions in the middle of the stream,
* validateEditLog will skip over them. * validateEditLog will skip over them.
* This reads through the stream but does not close it. * This reads through the stream but does not close it.
*
* @throws IOException if the stream cannot be read due to an IO error (eg
* if the log does not exist)
*/ */
static EditLogValidation validateEditLog(EditLogInputStream in) { static EditLogValidation validateEditLog(EditLogInputStream in) {
long lastPos = 0; long lastPos = 0;

View File

@ -666,8 +666,8 @@ public abstract class FSEditLogOp {
} }
/** /**
* {@literal @AtMostOnce} for {@link ClientProtocol#startFile} and * {@literal @AtMostOnce} for {@link ClientProtocol#create} and
* {@link ClientProtocol#appendFile} * {@link ClientProtocol#append}
*/ */
static class AddOp extends AddCloseOp { static class AddOp extends AddCloseOp {
private AddOp() { private AddOp() {

View File

@ -156,7 +156,7 @@ public class FSImage implements Closeable {
* directory to allow them to format anyway. Otherwise, returns * directory to allow them to format anyway. Otherwise, returns
* false, unless 'force' is specified. * false, unless 'force' is specified.
* *
* @param force format regardless of whether dirs exist * @param force if true, format regardless of whether dirs exist
* @param interactive prompt the user when a dir exists * @param interactive prompt the user when a dir exists
* @return true if formatting should proceed * @return true if formatting should proceed
* @throws IOException if some storage cannot be accessed * @throws IOException if some storage cannot be accessed
@ -1002,7 +1002,6 @@ public class FSImage implements Closeable {
/** /**
* Save the contents of the FS image to a new image file in each of the * Save the contents of the FS image to a new image file in each of the
* current storage directories. * current storage directories.
* @param canceler
*/ */
public synchronized void saveNamespace(FSNamesystem source, NameNodeFile nnf, public synchronized void saveNamespace(FSNamesystem source, NameNodeFile nnf,
Canceler canceler) throws IOException { Canceler canceler) throws IOException {

View File

@ -484,7 +484,7 @@ public class FSImageFormat {
/** /**
* Load all children of a directory * Load all children of a directory
* *
* @param in * @param in input to load from
* @param counter Counter to increment for namenode startup progress * @param counter Counter to increment for namenode startup progress
* @return number of child inodes read * @return number of child inodes read
* @throws IOException * @throws IOException

View File

@ -206,7 +206,7 @@ public class FSImageSerialization {
/** /**
* Reading the path from the image and converting it to byte[][] directly * Reading the path from the image and converting it to byte[][] directly
* this saves us an array copy and conversions to and from String * this saves us an array copy and conversions to and from String
* @param in * @param in input to read from
* @return the array each element of which is a byte[] representation * @return the array each element of which is a byte[] representation
* of a path component * of a path component
* @throws IOException * @throws IOException

View File

@ -1353,7 +1353,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
/** /**
* Returns edit directories that are shared between primary and secondary. * Returns edit directories that are shared between primary and secondary.
* @param conf configuration * @param conf configuration
* @return Collection of edit directories. * @return collection of edit directories from {@code conf}
*/ */
public static List<URI> getSharedEditsDirs(Configuration conf) { public static List<URI> getSharedEditsDirs(Configuration conf) {
// don't use getStorageDirs here, because we want an empty default // don't use getStorageDirs here, because we want an empty default
@ -1789,9 +1789,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
* before we start actual move. * before we start actual move.
* *
* This does not support ".inodes" relative path * This does not support ".inodes" relative path
* @param target target file path to concatenate into * @param target target to concat into
* @param srcs files that are concatenated * @param srcs file that will be concatenated
* @throws IOException * @throws IOException on error
*/ */
void concat(String target, String [] srcs) void concat(String target, String [] srcs)
throws IOException, UnresolvedLinkException { throws IOException, UnresolvedLinkException {
@ -4087,11 +4087,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
} }
/** /**
* * @param pendingFile open file that needs to be closed
* @param pendingFile * @param storedBlock last block
* @param storedBlock
* @return Path of the file that was closed. * @return Path of the file that was closed.
* @throws IOException * @throws IOException on error
*/ */
@VisibleForTesting @VisibleForTesting
String closeFileCommitBlocks(INodeFile pendingFile, BlockInfo storedBlock) String closeFileCommitBlocks(INodeFile pendingFile, BlockInfo storedBlock)
@ -4299,7 +4298,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
/** /**
* Perform resource checks and cache the results. * Perform resource checks and cache the results.
* @throws IOException
*/ */
void checkAvailableResources() { void checkAvailableResources() {
Preconditions.checkState(nnResourceChecker != null, Preconditions.checkState(nnResourceChecker != null,
@ -5350,7 +5348,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
/** /**
* Leave safe mode. * Leave safe mode.
* @throws IOException
*/ */
void leaveSafeMode() { void leaveSafeMode() {
writeLock(); writeLock();
@ -5767,7 +5764,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
/** /**
* Sets the generation stamp that delineates random and sequentially * Sets the generation stamp that delineates random and sequentially
* allocated block IDs. * allocated block IDs.
* @param stamp * @param stamp set generation stamp limit to this value
*/ */
void setGenerationStampV1Limit(long stamp) { void setGenerationStampV1Limit(long stamp) {
Preconditions.checkState(generationStampV1Limit == Preconditions.checkState(generationStampV1Limit ==
@ -5852,7 +5849,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
* Determine whether the block ID was randomly generated (legacy) or * Determine whether the block ID was randomly generated (legacy) or
* sequentially generated. The generation stamp value is used to * sequentially generated. The generation stamp value is used to
* make the distinction. * make the distinction.
* @param block
* @return true if the block ID was randomly generated, false otherwise. * @return true if the block ID was randomly generated, false otherwise.
*/ */
boolean isLegacyBlock(Block block) { boolean isLegacyBlock(Block block) {
@ -6089,7 +6085,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
* Release (unregister) backup node. * Release (unregister) backup node.
* <p> * <p>
* Find and remove the backup stream corresponding to the node. * Find and remove the backup stream corresponding to the node.
* @param registration
* @throws IOException * @throws IOException
*/ */
void releaseBackupNode(NamenodeRegistration registration) void releaseBackupNode(NamenodeRegistration registration)
@ -6225,8 +6220,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
/** /**
* @param renewer Renewer information * @param renewer Renewer information
* @return Token<DelegationTokenIdentifier> * @return delegation toek
* @throws IOException * @throws IOException on error
*/ */
Token<DelegationTokenIdentifier> getDelegationToken(Text renewer) Token<DelegationTokenIdentifier> getDelegationToken(Text renewer)
throws IOException { throws IOException {
@ -6267,10 +6262,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
/** /**
* *
* @param token delegation token * @param token token to renew
* @return New expiryTime of the token * @return new expiryTime of the token
* @throws InvalidToken * @throws InvalidToken if {@code token} is invalid
* @throws IOException * @throws IOException on other errors
*/ */
long renewDelegationToken(Token<DelegationTokenIdentifier> token) long renewDelegationToken(Token<DelegationTokenIdentifier> token)
throws InvalidToken, IOException { throws InvalidToken, IOException {
@ -6301,8 +6296,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
/** /**
* *
* @param token delegation token that needs to be canceled * @param token token to cancel
* @throws IOException * @throws IOException on error
*/ */
void cancelDelegationToken(Token<DelegationTokenIdentifier> token) void cancelDelegationToken(Token<DelegationTokenIdentifier> token)
throws IOException { throws IOException {
@ -7213,7 +7208,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
/** /**
* Update internal state to indicate that a rolling upgrade is in progress. * Update internal state to indicate that a rolling upgrade is in progress.
* @param startTime start time of the rolling upgrade * @param startTime rolling upgrade start time
*/ */
void startRollingUpgradeInternal(long startTime) void startRollingUpgradeInternal(long startTime)
throws IOException { throws IOException {

View File

@ -168,7 +168,7 @@ public class FileJournalManager implements JournalManager {
/** /**
* Find all editlog segments starting at or above the given txid. * Find all editlog segments starting at or above the given txid.
* @param fromTxId the txnid which to start looking * @param firstTxId the txnid which to start looking
* @param inProgressOk whether or not to include the in-progress edit log * @param inProgressOk whether or not to include the in-progress edit log
* segment * segment
* @return a list of remote edit logs * @return a list of remote edit logs

View File

@ -652,8 +652,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
/** /**
* Breaks file path into components. * Breaks {@code path} into components.
* @param path
* @return array of byte arrays each of which represents * @return array of byte arrays each of which represents
* a single path component. * a single path component.
*/ */
@ -673,8 +672,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
} }
/** /**
* Splits an absolute path into an array of path components. * Splits an absolute {@code path} into an array of path components.
* @param path
* @throws AssertionError if the given path is invalid. * @throws AssertionError if the given path is invalid.
* @return array of path components. * @return array of path components.
*/ */

View File

@ -402,7 +402,6 @@ public class LeaseManager {
/** /**
* Get the list of inodes corresponding to valid leases. * Get the list of inodes corresponding to valid leases.
* @return list of inodes * @return list of inodes
* @throws UnresolvedLinkException
*/ */
Map<String, INodeFile> getINodesUnderConstruction() { Map<String, INodeFile> getINodesUnderConstruction() {
Map<String, INodeFile> inodes = new TreeMap<String, INodeFile>(); Map<String, INodeFile> inodes = new TreeMap<String, INodeFile>();

View File

@ -42,7 +42,6 @@ interface LogsPurgeable {
* *
* @param fromTxId the first transaction id we want to read * @param fromTxId the first transaction id we want to read
* @param inProgressOk whether or not in-progress streams should be returned * @param inProgressOk whether or not in-progress streams should be returned
* @return a list of streams
* @throws IOException if the underlying storage has an error or is otherwise * @throws IOException if the underlying storage has an error or is otherwise
* inaccessible * inaccessible
*/ */

View File

@ -425,8 +425,7 @@ public class NNStorage extends Storage implements Closeable,
/** /**
* Write last checkpoint time into a separate file. * Write last checkpoint time into a separate file.
* * @param sd storage directory
* @param sd
* @throws IOException * @throws IOException
*/ */
void writeTransactionIdFile(StorageDirectory sd, long txid) throws IOException { void writeTransactionIdFile(StorageDirectory sd, long txid) throws IOException {

View File

@ -356,8 +356,6 @@ public class NameNode implements NameNodeStatusMXBean {
/** /**
* TODO:FEDERATION
* @param filesystemURI
* @return address of file system * @return address of file system
*/ */
public static InetSocketAddress getAddress(URI filesystemURI) { public static InetSocketAddress getAddress(URI filesystemURI) {
@ -800,8 +798,8 @@ public class NameNode implements NameNodeStatusMXBean {
* Interactively confirm that formatting is desired * Interactively confirm that formatting is desired
* for each existing directory and format them. * for each existing directory and format them.
* *
* @param conf * @param conf configuration to use
* @param force * @param force if true, format regardless of whether dirs exist
* @return true if formatting was aborted, false otherwise * @return true if formatting was aborted, false otherwise
* @throws IOException * @throws IOException
*/ */

View File

@ -1180,9 +1180,8 @@ class NameNodeRpcServer implements NamenodeProtocols {
/** /**
* Verify version. * Verify version.
* * @param version layout version
* @param version * @throws IOException on layout version mismatch
* @throws IOException
*/ */
void verifyLayoutVersion(int version) throws IOException { void verifyLayoutVersion(int version) throws IOException {
if (version != HdfsConstants.NAMENODE_LAYOUT_VERSION) if (version != HdfsConstants.NAMENODE_LAYOUT_VERSION)

View File

@ -159,7 +159,6 @@ public class NamenodeFsck {
* @param totalDatanodes number of live datanodes * @param totalDatanodes number of live datanodes
* @param minReplication minimum replication * @param minReplication minimum replication
* @param remoteAddress source address of the fsck request * @param remoteAddress source address of the fsck request
* @throws IOException
*/ */
NamenodeFsck(Configuration conf, NameNode namenode, NamenodeFsck(Configuration conf, NameNode namenode,
NetworkTopology networktopology, NetworkTopology networktopology,

View File

@ -209,7 +209,6 @@ public class SecondaryNameNode implements Runnable {
/** /**
* Initialize SecondaryNameNode. * Initialize SecondaryNameNode.
* @param commandLineOpts
*/ */
private void initialize(final Configuration conf, private void initialize(final Configuration conf,
CommandLineOpts commandLineOpts) throws IOException { CommandLineOpts commandLineOpts) throws IOException {

View File

@ -130,8 +130,8 @@ abstract public class HAState {
* Check if an operation is supported in a given state. * Check if an operation is supported in a given state.
* @param context HA context * @param context HA context
* @param op Type of the operation. * @param op Type of the operation.
* @throws UnsupportedActionException if a given type of operation is not * @throws StandbyException if a given type of operation is not
* supported in this state. * supported in standby state
*/ */
public abstract void checkOperation(final HAContext context, final OperationCategory op) public abstract void checkOperation(final HAContext context, final OperationCategory op)
throws StandbyException; throws StandbyException;

View File

@ -86,7 +86,6 @@ abstract class AbstractINodeDiff<N extends INode,
return posteriorDiff; return posteriorDiff;
} }
/** @return the posterior diff. */
final void setPosterior(D posterior) { final void setPosterior(D posterior) {
posteriorDiff = posterior; posteriorDiff = posterior;
} }

View File

@ -32,7 +32,6 @@ public class BlockIdCommand extends DatanodeCommand {
/** /**
* Create BlockCommand for the given action * Create BlockCommand for the given action
* @param blocks blocks related to the action
*/ */
public BlockIdCommand(int action, String poolId, long[] blockIds) { public BlockIdCommand(int action, String poolId, long[] blockIds) {
super(action); super(action);

View File

@ -119,9 +119,9 @@ public interface DatanodeProtocol {
* and should be deleted. This function is meant to upload *all* * and should be deleted. This function is meant to upload *all*
* the locally-stored blocks. It's invoked upon startup and then * the locally-stored blocks. It's invoked upon startup and then
* infrequently afterwards. * infrequently afterwards.
* @param registration * @param registration datanode registration
* @param poolId - the block pool ID for the blocks * @param poolId the block pool ID for the blocks
* @param reports - report of blocks per storage * @param reports report of blocks per storage
* Each finalized block is represented as 3 longs. Each under- * Each finalized block is represented as 3 longs. Each under-
* construction replica is represented as 4 longs. * construction replica is represented as 4 longs.
* This is done instead of Block[] to reduce memory used by block reports. * This is done instead of Block[] to reduce memory used by block reports.

View File

@ -48,8 +48,6 @@ public class DatanodeStorage {
/** /**
* Create a storage with {@link State#NORMAL} and {@link StorageType#DEFAULT}. * Create a storage with {@link State#NORMAL} and {@link StorageType#DEFAULT}.
*
* @param storageID
*/ */
public DatanodeStorage(String storageID) { public DatanodeStorage(String storageID) {
this(storageID, State.NORMAL, StorageType.DEFAULT); this(storageID, State.NORMAL, StorageType.DEFAULT);

View File

@ -39,7 +39,7 @@ public abstract class ServerCommand {
* *
* @see DatanodeProtocol * @see DatanodeProtocol
* @see NamenodeProtocol * @see NamenodeProtocol
* @param action * @param action protocol specific action
*/ */
public ServerCommand(int action) { public ServerCommand(int action) {
this.action = action; this.action = action;

View File

@ -130,9 +130,6 @@ public class DFSck extends Configured implements Tool {
out.println(USAGE + "\n"); out.println(USAGE + "\n");
ToolRunner.printGenericCommandUsage(out); ToolRunner.printGenericCommandUsage(out);
} }
/**
* @param args
*/
@Override @Override
public int run(final String[] args) throws IOException { public int run(final String[] args) throws IOException {
if (args.length == 0) { if (args.length == 0) {

View File

@ -167,8 +167,7 @@ public class GetConf extends Configured implements Tool {
} }
/** Method to be overridden by sub classes for specific behavior /** Method to be overridden by sub classes for specific behavior */
* @param args */
int doWorkInternal(GetConf tool, String[] args) throws Exception { int doWorkInternal(GetConf tool, String[] args) throws Exception {
String value = tool.getConf().getTrimmed(key); String value = tool.getConf().getTrimmed(key);

View File

@ -30,9 +30,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
@InterfaceAudience.Private @InterfaceAudience.Private
public class HDFSConcat { public class HDFSConcat {
private final static String def_uri = "hdfs://localhost:9000"; private final static String def_uri = "hdfs://localhost:9000";
/**
* @param args
*/
public static void main(String... args) throws IOException { public static void main(String... args) throws IOException {
if(args.length < 2) { if(args.length < 2) {

View File

@ -217,7 +217,7 @@ public class JMXGet {
} }
/** /**
* @param msg * @param msg error message
*/ */
private static void err(String msg) { private static void err(String msg) {
System.err.println(msg); System.err.println(msg);
@ -274,13 +274,7 @@ public class JMXGet {
return commandLine; return commandLine;
} }
/**
* main
*
* @param args
*/
public static void main(String[] args) { public static void main(String[] args) {
int res = -1; int res = -1;
// parse arguments // parse arguments

View File

@ -37,8 +37,7 @@ public class BinaryEditsVisitor implements OfflineEditsVisitor {
/** /**
* Create a processor that writes to a given file * Create a processor that writes to a given file
* * @param outputName Name of file to write output to
* @param filename Name of file to write output to
*/ */
public BinaryEditsVisitor(String outputName) throws IOException { public BinaryEditsVisitor(String outputName) throws IOException {
this.elfos = new EditLogFileOutputStream(new Configuration(), this.elfos = new EditLogFileOutputStream(new Configuration(),

View File

@ -63,8 +63,6 @@ public class DataTransferThrottler {
/** /**
* Sets throttle bandwidth. This takes affect latest by the end of current * Sets throttle bandwidth. This takes affect latest by the end of current
* period. * period.
*
* @param bytesPerSecond
*/ */
public synchronized void setBandwidth(long bytesPerSecond) { public synchronized void setBandwidth(long bytesPerSecond) {
if ( bytesPerSecond <= 0 ) { if ( bytesPerSecond <= 0 ) {

View File

@ -60,10 +60,7 @@ public class AclPermissionParam extends StringParam {
} }
/** /**
* Parse the list of AclEntry and returns aclspec. * @return parse {@code aclEntry} and return aclspec
*
* @param List <AclEntry>
* @return String
*/ */
private static String parseAclSpec(List<AclEntry> aclEntry) { private static String parseAclSpec(List<AclEntry> aclEntry) {
return StringUtils.join(aclEntry, ","); return StringUtils.join(aclEntry, ",");

View File

@ -229,7 +229,7 @@ public class BenchmarkThroughput extends Configured implements Tool {
} }
/** /**
* @param args * @param args arguments
*/ */
public static void main(String[] args) throws Exception { public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new HdfsConfiguration(), int res = ToolRunner.run(new HdfsConfiguration(),

View File

@ -1384,8 +1384,8 @@ public class MiniDFSCluster {
/** /**
* Finalize cluster for the namenode at the given index * Finalize cluster for the namenode at the given index
* @see MiniDFSCluster#finalizeCluster(Configuration) * @see MiniDFSCluster#finalizeCluster(Configuration)
* @param nnIndex * @param nnIndex index of the namenode
* @param conf * @param conf configuration
* @throws Exception * @throws Exception
*/ */
public void finalizeCluster(int nnIndex, Configuration conf) throws Exception { public void finalizeCluster(int nnIndex, Configuration conf) throws Exception {
@ -2216,7 +2216,7 @@ public class MiniDFSCluster {
* to determine the location of the storage of a DN instance in the mini cluster * to determine the location of the storage of a DN instance in the mini cluster
* @param dnIndex datanode index * @param dnIndex datanode index
* @param dirIndex directory index. * @param dirIndex directory index.
* @return * @return storage directory path
*/ */
private static String getStorageDirPath(int dnIndex, int dirIndex) { private static String getStorageDirPath(int dnIndex, int dirIndex) {
return "data/data" + (2 * dnIndex + 1 + dirIndex); return "data/data" + (2 * dnIndex + 1 + dirIndex);
@ -2242,8 +2242,8 @@ public class MiniDFSCluster {
} }
/** /**
* Get directory relative to block pool directory in the datanode * Get directory relative to block pool directory in the datanode
* @param storageDir * @param storageDir storage directory
* @return current directory * @return current directory in the given storage directory
*/ */
public static String getBPDir(File storageDir, String bpid, String dirName) { public static String getBPDir(File storageDir, String bpid, String dirName) {
return getBPDir(storageDir, bpid) + dirName + "/"; return getBPDir(storageDir, bpid) + dirName + "/";

View File

@ -101,7 +101,6 @@ public class BlockManagerTestUtil {
} }
/** /**
* @param blockManager
* @return replication monitor thread instance from block manager. * @return replication monitor thread instance from block manager.
*/ */
public static Daemon getReplicationThread(final BlockManager blockManager) public static Daemon getReplicationThread(final BlockManager blockManager)
@ -111,7 +110,6 @@ public class BlockManagerTestUtil {
/** /**
* Stop the replication monitor thread * Stop the replication monitor thread
* @param blockManager
*/ */
public static void stopReplicationThread(final BlockManager blockManager) public static void stopReplicationThread(final BlockManager blockManager)
throws IOException { throws IOException {
@ -126,7 +124,6 @@ public class BlockManagerTestUtil {
} }
/** /**
* @param blockManager
* @return corruptReplicas from block manager * @return corruptReplicas from block manager
*/ */
public static CorruptReplicasMap getCorruptReplicas(final BlockManager blockManager){ public static CorruptReplicasMap getCorruptReplicas(final BlockManager blockManager){
@ -135,7 +132,6 @@ public class BlockManagerTestUtil {
} }
/** /**
* @param blockManager
* @return computed block replication and block invalidation work that can be * @return computed block replication and block invalidation work that can be
* scheduled on data-nodes. * scheduled on data-nodes.
* @throws IOException * @throws IOException
@ -158,7 +154,7 @@ public class BlockManagerTestUtil {
* regardless of invalidation/replication limit configurations. * regardless of invalidation/replication limit configurations.
* *
* NB: you may want to set * NB: you may want to set
* {@link DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY} to * {@link DFSConfigKeys#DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY} to
* a high value to ensure that all work is calculated. * a high value to ensure that all work is calculated.
*/ */
public static int computeAllPendingWork(BlockManager bm) { public static int computeAllPendingWork(BlockManager bm) {
@ -200,7 +196,7 @@ public class BlockManagerTestUtil {
/** /**
* Change whether the block placement policy will prefer the writer's * Change whether the block placement policy will prefer the writer's
* local Datanode or not. * local Datanode or not.
* @param prefer * @param prefer if true, prefer local node
*/ */
public static void setWritingPrefersLocalNode( public static void setWritingPrefersLocalNode(
BlockManager bm, boolean prefer) { BlockManager bm, boolean prefer) {

View File

@ -171,9 +171,6 @@ public abstract class BlockReportTestBase {
* Utility routine to send block reports to the NN, either in a single call * Utility routine to send block reports to the NN, either in a single call
* or reporting one storage per call. * or reporting one storage per call.
* *
* @param dnR
* @param poolId
* @param reports
* @throws IOException * @throws IOException
*/ */
protected abstract void sendBlockReports(DatanodeRegistration dnR, String poolId, protected abstract void sendBlockReports(DatanodeRegistration dnR, String poolId,

View File

@ -833,8 +833,8 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
/** /**
* An input stream of size l with repeated bytes * An input stream of size l with repeated bytes
* @param l * @param l size of the stream
* @param iRepeatedData * @param iRepeatedData byte that is repeated in the stream
*/ */
SimulatedInputStream(long l, byte iRepeatedData) { SimulatedInputStream(long l, byte iRepeatedData) {
length = l; length = l;
@ -843,17 +843,14 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
/** /**
* An input stream of of the supplied data * An input stream of of the supplied data
* * @param iData data to construct the stream
* @param iData
*/ */
SimulatedInputStream(byte[] iData) { SimulatedInputStream(byte[] iData) {
data = iData; data = iData;
length = data.length; length = data.length;
} }
/** /**
*
* @return the lenght of the input stream * @return the lenght of the input stream
*/ */
long getLength() { long getLength() {

View File

@ -131,14 +131,10 @@ public class CreateEditsLog {
printUsageExit(); printUsageExit();
} }
/** /**
* @param args * @param args arguments
* @throws IOException * @throws IOException
*/ */
public static void main(String[] args) public static void main(String[] args) throws IOException {
throws IOException {
long startingBlockId = 1; long startingBlockId = 1;
int numFiles = 0; int numFiles = 0;
short replication = 1; short replication = 1;

View File

@ -202,7 +202,7 @@ public class NNThroughputBenchmark implements Tool {
* {@link #executeOp(int, int, String)}, which can have different meanings * {@link #executeOp(int, int, String)}, which can have different meanings
* depending on the operation performed. * depending on the operation performed.
* *
* @param daemonId * @param daemonId id of the daemon calling this method
* @return the argument * @return the argument
*/ */
abstract String getExecutionArgument(int daemonId); abstract String getExecutionArgument(int daemonId);
@ -322,11 +322,10 @@ public class NNThroughputBenchmark implements Tool {
/** /**
* Parse first 2 arguments, corresponding to the "-op" option. * Parse first 2 arguments, corresponding to the "-op" option.
* *
* @param args * @param args argument list
* @return true if operation is all, which means that options not related * @return true if operation is all, which means that options not related
* to this operation should be ignored, or false otherwise, meaning * to this operation should be ignored, or false otherwise, meaning
* that usage should be printed when an unrelated option is encountered. * that usage should be printed when an unrelated option is encountered.
* @throws IOException
*/ */
protected boolean verifyOpArgument(List<String> args) { protected boolean verifyOpArgument(List<String> args) {
if(args.size() < 2 || ! args.get(0).startsWith("-op")) if(args.size() < 2 || ! args.get(0).startsWith("-op"))