Merge r1582073 from trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1582074 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Suresh Srinivas 2014-03-26 21:34:10 +00:00
parent 53bebe2329
commit bf266aa554
2 changed files with 38 additions and 39 deletions

View File

@ -19,6 +19,8 @@ Release 2.5.0 - UNRELEASED
HDFS-6155. Fix Boxing/unboxing to parse a primitive findbugs warnings. HDFS-6155. Fix Boxing/unboxing to parse a primitive findbugs warnings.
(suresh) (suresh)
HDFS-6119. FSNamesystem code cleanup. (suresh)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES

View File

@ -602,12 +602,12 @@ private static void checkConfiguration(Configuration conf)
if (namespaceDirs.size() == 1) { if (namespaceDirs.size() == 1) {
LOG.warn("Only one image storage directory (" LOG.warn("Only one image storage directory ("
+ DFS_NAMENODE_NAME_DIR_KEY + ") configured. Beware of dataloss" + DFS_NAMENODE_NAME_DIR_KEY + ") configured. Beware of data loss"
+ " due to lack of redundant storage directories!"); + " due to lack of redundant storage directories!");
} }
if (editsDirs.size() == 1) { if (editsDirs.size() == 1) {
LOG.warn("Only one namespace edits storage directory (" LOG.warn("Only one namespace edits storage directory ("
+ DFS_NAMENODE_EDITS_DIR_KEY + ") configured. Beware of dataloss" + DFS_NAMENODE_EDITS_DIR_KEY + ") configured. Beware of data loss"
+ " due to lack of redundant storage directories!"); + " due to lack of redundant storage directories!");
} }
} }
@ -937,8 +937,6 @@ private void stopSecretManager() {
/** /**
* Start services common to both active and standby states * Start services common to both active and standby states
* @param haContext
* @throws IOException
*/ */
void startCommonServices(Configuration conf, HAContext haContext) throws IOException { void startCommonServices(Configuration conf, HAContext haContext) throws IOException {
this.registerMBean(); // register the MBean for the FSNamesystemState this.registerMBean(); // register the MBean for the FSNamesystemState
@ -965,7 +963,6 @@ void startCommonServices(Configuration conf, HAContext haContext) throws IOExcep
/** /**
* Stop services common to both active and standby states * Stop services common to both active and standby states
* @throws IOException
*/ */
void stopCommonServices() { void stopCommonServices() {
writeLock(); writeLock();
@ -1078,7 +1075,6 @@ private boolean shouldUseDelegationTokens() {
/** /**
* Stop services required in active state * Stop services required in active state
* @throws InterruptedException
*/ */
void stopActiveServices() { void stopActiveServices() {
LOG.info("Stopping services started for active state"); LOG.info("Stopping services started for active state");
@ -1326,7 +1322,7 @@ public static List<URI> getNamespaceEditsDirs(Configuration conf,
/** /**
* Returns edit directories that are shared between primary and secondary. * Returns edit directories that are shared between primary and secondary.
* @param conf * @param conf configuration
* @return Collection of edit directories. * @return Collection of edit directories.
*/ */
public static List<URI> getSharedEditsDirs(Configuration conf) { public static List<URI> getSharedEditsDirs(Configuration conf) {
@ -1757,13 +1753,13 @@ && doAccessTime && isAccessTimeSupported()) {
} }
/** /**
* Moves all the blocks from srcs and appends them to trg * Moves all the blocks from {@code srcs} and appends them to {@code target}
* To avoid rollbacks we will verify validitity of ALL of the args * To avoid rollbacks we will verify validity of ALL of the args
* before we start actual move. * before we start actual move.
* *
* This does not support ".inodes" relative path * This does not support ".inodes" relative path
* @param target * @param target target file path to concatenate into
* @param srcs * @param srcs files that are concatenated
* @throws IOException * @throws IOException
*/ */
void concat(String target, String [] srcs) void concat(String target, String [] srcs)
@ -1773,7 +1769,7 @@ void concat(String target, String [] srcs)
return; // Return previous response return; // Return previous response
} }
// Either there is no previous request in progres or it has failed // Either there is no previous request in progress or it has failed
if(FSNamesystem.LOG.isDebugEnabled()) { if(FSNamesystem.LOG.isDebugEnabled()) {
FSNamesystem.LOG.debug("concat " + Arrays.toString(srcs) + FSNamesystem.LOG.debug("concat " + Arrays.toString(srcs) +
" to " + target); " to " + target);
@ -1898,7 +1894,7 @@ private void concatInternal(FSPermissionChecker pc, String target,
// check replication and blocks size // check replication and blocks size
if(repl != srcInode.getBlockReplication()) { if(repl != srcInode.getBlockReplication()) {
throw new HadoopIllegalArgumentException("concat: the soruce file " throw new HadoopIllegalArgumentException("concat: the source file "
+ src + " and the target file " + target + src + " and the target file " + target
+ " should have the same replication: source replication is " + " should have the same replication: source replication is "
+ srcInode.getBlockReplication() + srcInode.getBlockReplication()
@ -1913,7 +1909,7 @@ private void concatInternal(FSPermissionChecker pc, String target,
if(endSrc) if(endSrc)
idx = srcBlocks.length-2; // end block of endSrc is OK not to be full idx = srcBlocks.length-2; // end block of endSrc is OK not to be full
if(idx >= 0 && srcBlocks[idx].getNumBytes() != blockSize) { if(idx >= 0 && srcBlocks[idx].getNumBytes() != blockSize) {
throw new HadoopIllegalArgumentException("concat: the soruce file " throw new HadoopIllegalArgumentException("concat: the source file "
+ src + " and the target file " + target + src + " and the target file " + target
+ " should have the same blocks sizes: target block size is " + " should have the same blocks sizes: target block size is "
+ blockSize + " but the size of source block " + idx + " is " + blockSize + " but the size of source block " + idx + " is "
@ -1940,7 +1936,7 @@ private void concatInternal(FSPermissionChecker pc, String target,
/** /**
* stores the modification and access time for this inode. * stores the modification and access time for this inode.
* The access time is precise upto an hour. The transaction, if needed, is * The access time is precise up to an hour. The transaction, if needed, is
* written to the edits log but is not flushed. * written to the edits log but is not flushed.
*/ */
void setTimes(String src, long mtime, long atime) void setTimes(String src, long mtime, long atime)
@ -2155,12 +2151,8 @@ private void verifyParentDir(String src) throws FileNotFoundException,
* Create a new file entry in the namespace. * Create a new file entry in the namespace.
* *
* For description of parameters and exceptions thrown see * For description of parameters and exceptions thrown see
* {@link ClientProtocol#create()}, except it returns valid file status upon * {@link ClientProtocol#create}, except it returns valid file status upon
* success * success
*
* For retryCache handling details see -
* {@link #getFileStatus(boolean, CacheEntryWithPayload)}
*
*/ */
HdfsFileStatus startFile(String src, PermissionStatus permissions, HdfsFileStatus startFile(String src, PermissionStatus permissions,
String holder, String clientMachine, EnumSet<CreateFlag> flag, String holder, String clientMachine, EnumSet<CreateFlag> flag,
@ -2245,10 +2237,10 @@ private HdfsFileStatus startFileInt(String src, PermissionStatus permissions,
* Create a new file or overwrite an existing file<br> * Create a new file or overwrite an existing file<br>
* *
* Once the file is create the client then allocates a new block with the next * Once the file is create the client then allocates a new block with the next
* call using {@link NameNode#addBlock()}. * call using {@link ClientProtocol#addBlock}.
* <p> * <p>
* For description of parameters and exceptions thrown see * For description of parameters and exceptions thrown see
* {@link ClientProtocol#create()} * {@link ClientProtocol#create}
*/ */
private void startFileInternal(FSPermissionChecker pc, String src, private void startFileInternal(FSPermissionChecker pc, String src,
PermissionStatus permissions, String holder, String clientMachine, PermissionStatus permissions, String holder, String clientMachine,
@ -2333,7 +2325,8 @@ private void startFileInternal(FSPermissionChecker pc, String src,
* which can still be used for writing more data. The client uses the returned * which can still be used for writing more data. The client uses the returned
* block locations to form the data pipeline for this block.<br> * block locations to form the data pipeline for this block.<br>
* The method returns null if the last block is full. The client then * The method returns null if the last block is full. The client then
* allocates a new block with the next call using {@link NameNode#addBlock()}. * allocates a new block with the next call using
* {@link ClientProtocol#addBlock}.
* <p> * <p>
* *
* For description of parameters and exceptions thrown see * For description of parameters and exceptions thrown see
@ -2836,7 +2829,7 @@ LocatedBlock makeLocatedBlock(Block blk, DatanodeStorageInfo[] locs,
return lBlk; return lBlk;
} }
/** @see NameNode#getAdditionalDatanode(String, ExtendedBlock, DatanodeInfo[], DatanodeInfo[], int, String) */ /** @see ClientProtocol#getAdditionalDatanode */
LocatedBlock getAdditionalDatanode(String src, final ExtendedBlock blk, LocatedBlock getAdditionalDatanode(String src, final ExtendedBlock blk,
final DatanodeInfo[] existings, final String[] storageIDs, final DatanodeInfo[] existings, final String[] storageIDs,
final Set<Node> excludes, final Set<Node> excludes,
@ -3041,8 +3034,10 @@ private boolean completeFileInternal(String src,
* Save allocated block at the given pending filename * Save allocated block at the given pending filename
* *
* @param src path to the file * @param src path to the file
* @param inodesInPath representing each of the components of src. * @param inodesInPath representing each of the components of src.
* The last INode is the INode for the file. * The last INode is the INode for {@code src} file.
* @param newBlock newly allocated block to be save
* @param targets target datanodes where replicas of the new block is placed
* @throws QuotaExceededException If addition of block exceeds space quota * @throws QuotaExceededException If addition of block exceeds space quota
*/ */
BlockInfo saveAllocatedBlock(String src, INodesInPath inodes, BlockInfo saveAllocatedBlock(String src, INodesInPath inodes,
@ -5471,7 +5466,7 @@ public void checkSuperuserPrivilege()
/** /**
* Check whether current user have permissions to access the path. For more * Check whether current user have permissions to access the path. For more
* details of the parameters, see * details of the parameters, see
* {@link FSPermissionChecker#checkPermission()}. * {@link FSPermissionChecker#checkPermission}.
*/ */
private void checkPermission(FSPermissionChecker pc, private void checkPermission(FSPermissionChecker pc,
String path, boolean doCheckOwner, FsAction ancestorAccess, String path, boolean doCheckOwner, FsAction ancestorAccess,
@ -5484,7 +5479,7 @@ private void checkPermission(FSPermissionChecker pc,
/** /**
* Check whether current user have permissions to access the path. For more * Check whether current user have permissions to access the path. For more
* details of the parameters, see * details of the parameters, see
* {@link FSPermissionChecker#checkPermission()}. * {@link FSPermissionChecker#checkPermission}.
*/ */
private void checkPermission(FSPermissionChecker pc, private void checkPermission(FSPermissionChecker pc,
String path, boolean doCheckOwner, FsAction ancestorAccess, String path, boolean doCheckOwner, FsAction ancestorAccess,
@ -5981,7 +5976,9 @@ void updatePipeline(String clientName, ExtendedBlock oldBlock,
LOG.info("updatePipeline(" + oldBlock + ") successfully to " + newBlock); LOG.info("updatePipeline(" + oldBlock + ") successfully to " + newBlock);
} }
/** @see #updatePipeline(String, ExtendedBlock, ExtendedBlock, DatanodeID[]) */ /**
* @see #updatePipeline(String, ExtendedBlock, ExtendedBlock, DatanodeID[], String[])
*/
private void updatePipelineInternal(String clientName, ExtendedBlock oldBlock, private void updatePipelineInternal(String clientName, ExtendedBlock oldBlock,
ExtendedBlock newBlock, DatanodeID[] newNodes, String[] newStorageIDs, ExtendedBlock newBlock, DatanodeID[] newNodes, String[] newStorageIDs,
boolean logRetryCache) boolean logRetryCache)
@ -6102,10 +6099,9 @@ public String toString() {
} }
/** /**
* @param path Restrict corrupt files to this portion of namespace. * @param path Restrict corrupt files to this portion of namespace.
* @param startBlockAfter Support for continuation; the set of files we return * @param cookieTab Support for continuation; cookieTab tells where
* back is ordered by blockid; startBlockAfter tells where to start from * to start from
* @return a list in which each entry describes a corrupt file/block * @return a list in which each entry describes a corrupt file/block
* @throws AccessControlException
* @throws IOException * @throws IOException
*/ */
Collection<CorruptFileBlockInfo> listCorruptFileBlocks(String path, Collection<CorruptFileBlockInfo> listCorruptFileBlocks(String path,
@ -6200,7 +6196,7 @@ DelegationTokenSecretManager getDelegationTokenSecretManager() {
} }
/** /**
* @param renewer * @param renewer Renewer information
* @return Token<DelegationTokenIdentifier> * @return Token<DelegationTokenIdentifier>
* @throws IOException * @throws IOException
*/ */
@ -6243,7 +6239,7 @@ Token<DelegationTokenIdentifier> getDelegationToken(Text renewer)
/** /**
* *
* @param token * @param token delegation token
* @return New expiryTime of the token * @return New expiryTime of the token
* @throws InvalidToken * @throws InvalidToken
* @throws IOException * @throws IOException
@ -6277,7 +6273,7 @@ long renewDelegationToken(Token<DelegationTokenIdentifier> token)
/** /**
* *
* @param token * @param token delegation token that needs to be canceled
* @throws IOException * @throws IOException
*/ */
void cancelDelegationToken(Token<DelegationTokenIdentifier> token) void cancelDelegationToken(Token<DelegationTokenIdentifier> token)
@ -6571,7 +6567,8 @@ public String getDeadNodes() {
/** /**
* Returned information is a JSON representation of map with host name as the * Returned information is a JSON representation of map with host name as the
* key and value is a map of decomisioning node attribute keys to its values * key and value is a map of decommissioning node attribute keys to its
* values
*/ */
@Override // NameNodeMXBean @Override // NameNodeMXBean
public String getDecomNodes() { public String getDecomNodes() {
@ -6855,7 +6852,7 @@ public SnapshotManager getSnapshotManager() {
return snapshotManager; return snapshotManager;
} }
/** Allow snapshot on a directroy. */ /** Allow snapshot on a directory. */
void allowSnapshot(String path) throws SafeModeException, IOException { void allowSnapshot(String path) throws SafeModeException, IOException {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
writeLock(); writeLock();
@ -7188,7 +7185,7 @@ RollingUpgradeInfo startRollingUpgrade() throws IOException {
/** /**
* Update internal state to indicate that a rolling upgrade is in progress. * Update internal state to indicate that a rolling upgrade is in progress.
* @param startTime * @param startTime start time of the rolling upgrade
*/ */
void startRollingUpgradeInternal(long startTime) void startRollingUpgradeInternal(long startTime)
throws IOException { throws IOException {
@ -7211,7 +7208,7 @@ private void startRollingUpgradeInternalForNonHA(long startTime)
} }
checkRollingUpgrade("start rolling upgrade"); checkRollingUpgrade("start rolling upgrade");
getFSImage().checkUpgrade(this); getFSImage().checkUpgrade(this);
// in non-HA setup, we do an extra ckpt to generate a rollback image // in non-HA setup, we do an extra checkpoint to generate a rollback image
getFSImage().saveNamespace(this, NameNodeFile.IMAGE_ROLLBACK, null); getFSImage().saveNamespace(this, NameNodeFile.IMAGE_ROLLBACK, null);
LOG.info("Successfully saved namespace for preparing rolling upgrade."); LOG.info("Successfully saved namespace for preparing rolling upgrade.");