From 32e76decb5605fbad491f3fc81704f032f077be9 Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Thu, 24 Apr 2014 05:08:14 +0000 Subject: [PATCH] HDFS-6276. Merge r1589586 from trunk. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1589588 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../server/blockmanagement/BlockManager.java | 9 +++---- .../hdfs/server/datanode/BlockReceiver.java | 9 +++---- .../server/datanode/DataBlockScanner.java | 3 +-- .../fsdataset/impl/RollingLogsImpl.java | 8 +++---- .../hdfs/server/namenode/CachePool.java | 4 +--- .../hdfs/server/namenode/FSImageFormat.java | 4 +--- .../hdfs/server/namenode/FSNamesystem.java | 20 ++++------------ .../hdfs/server/namenode/INodeFile.java | 24 ++++++++++--------- .../SaveNamespaceCancelledException.java | 2 +- 10 files changed, 32 insertions(+), 53 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 9e1515d615b..28e6f656dd8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -130,6 +130,8 @@ Release 2.5.0 - UNRELEASED HDFS-6217. Webhdfs PUT operations may not work via a http proxy. (Daryn Sharp via kihwal) + HDFS-6276. Remove unnecessary conditions and null check. (suresh) + Release 2.4.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 9d0aa17143e..7a9079f4199 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -423,10 +423,8 @@ public class BlockManager { public void close() { try { - if (replicationThread != null) { - replicationThread.interrupt(); - replicationThread.join(3000); - } + replicationThread.interrupt(); + replicationThread.join(3000); } catch (InterruptedException ie) { } datanodeManager.close(); @@ -818,7 +816,7 @@ public class BlockManager { for(DatanodeStorageInfo storage : blocksMap.getStorages(blk)) { final DatanodeDescriptor d = storage.getDatanodeDescriptor(); final boolean replicaCorrupt = corruptReplicas.isReplicaCorrupt(blk, d); - if (isCorrupt || (!isCorrupt && !replicaCorrupt)) + if (isCorrupt || (!replicaCorrupt)) machines[j++] = storage; } } @@ -2245,7 +2243,6 @@ public class BlockManager { // it will happen in next block report otherwise. return block; } - assert storedBlock != null : "Block must be stored by now"; BlockCollection bc = storedBlock.getBlockCollection(); assert bc != null : "Block must belong to a file"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java index 14ed254cc9f..0e77d286bec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java @@ -86,8 +86,7 @@ class BlockReceiver implements Closeable { private int bytesPerChecksum; private int checksumSize; - private final PacketReceiver packetReceiver = - new PacketReceiver(false); + private final PacketReceiver packetReceiver = new PacketReceiver(false); protected final String inAddr; protected final String myAddr; @@ -268,10 +267,8 @@ class BlockReceiver implements Closeable { */ @Override public void close() throws IOException { - if (packetReceiver != null) { - packetReceiver.close(); - } - + packetReceiver.close(); + IOException ioe = null; if (syncOnClose && (out != null || checksumOut != null)) { datanode.metrics.incrFsyncCount(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java index f9f8d328b4f..bee362582ee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java @@ -131,8 +131,7 @@ public class DataBlockScanner implements Runnable { private BlockPoolSliceScanner getNextBPScanner(String currentBpId) { String nextBpId = null; - while ((nextBpId == null) && datanode.shouldRun - && !blockScannerThread.isInterrupted()) { + while (datanode.shouldRun && !blockScannerThread.isInterrupted()) { waitForInit(); synchronized (this) { if (getBlockPoolSetSize() > 0) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RollingLogsImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RollingLogsImpl.java index ed6bcaffdb2..121127d849e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RollingLogsImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RollingLogsImpl.java @@ -188,11 +188,9 @@ class RollingLogsImpl implements RollingLogs { if (reader != null && (line = reader.readLine()) != null) { return; } - if (line == null) { - // move to the next file. - if (openFile()) { - readNext(); - } + // move to the next file. + if (openFile()) { + readNext(); } } finally { if (!hasNext()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java index 87c546e3c5a..585124f93b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java @@ -109,9 +109,7 @@ public final class CachePool { UserGroupInformation ugi = null; String ownerName = info.getOwnerName(); if (ownerName == null) { - if (ugi == null) { - ugi = NameNode.getRemoteUser(); - } + ugi = NameNode.getRemoteUser(); ownerName = ugi.getShortUserName(); } String groupName = info.getGroupName(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index ee0b742465e..a08216ed5cf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -655,7 +655,7 @@ public class FSImageFormat { clientName = FSImageSerialization.readString(in); clientMachine = FSImageSerialization.readString(in); // convert the last block to BlockUC - if (blocks != null && blocks.length > 0) { + if (blocks.length > 0) { BlockInfo lastBlk = blocks[blocks.length - 1]; blocks[blocks.length - 1] = new BlockInfoUnderConstruction( lastBlk, replication); @@ -1023,7 +1023,6 @@ public class FSImageFormat { if (!NameNodeLayoutVersion.supports(Feature.SNAPSHOT, layoutVersion)) { if (Arrays.equals(component, HdfsConstants.DOT_SNAPSHOT_DIR_BYTES)) { Preconditions.checkArgument( - renameReservedMap != null && renameReservedMap.containsKey(HdfsConstants.DOT_SNAPSHOT_DIR), RESERVED_ERROR_MSG); component = @@ -1044,7 +1043,6 @@ public class FSImageFormat { if (!NameNodeLayoutVersion.supports(Feature.ADD_INODE_ID, layoutVersion)) { if (Arrays.equals(component, FSDirectory.DOT_RESERVED)) { Preconditions.checkArgument( - renameReservedMap != null && renameReservedMap.containsKey(FSDirectory.DOT_RESERVED_STRING), RESERVED_ERROR_MSG); final String renameString = renameReservedMap diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 2e663f2405c..c9f0b452170 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -1095,9 +1095,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, writeLock(); try { stopSecretManager(); - if (leaseManager != null) { - leaseManager.stopMonitor(); - } + leaseManager.stopMonitor(); if (nnrmthread != null) { ((NameNodeResourceMonitor) nnrmthread.getRunnable()).stopMonitor(); nnrmthread.interrupt(); @@ -3775,20 +3773,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats, final BlockInfo lastBlock = pendingFile.getLastBlock(); BlockUCState lastBlockState = lastBlock.getBlockUCState(); BlockInfo penultimateBlock = pendingFile.getPenultimateBlock(); - boolean penultimateBlockMinReplication; - BlockUCState penultimateBlockState; - if (penultimateBlock == null) { - penultimateBlockState = BlockUCState.COMPLETE; - // If penultimate block doesn't exist then its minReplication is met - penultimateBlockMinReplication = true; - } else { - penultimateBlockState = BlockUCState.COMMITTED; - penultimateBlockMinReplication = + + // If penultimate block doesn't exist then its minReplication is met + boolean penultimateBlockMinReplication = penultimateBlock == null ? true : blockManager.checkMinReplication(penultimateBlock); - } - assert penultimateBlockState == BlockUCState.COMPLETE || - penultimateBlockState == BlockUCState.COMMITTED : - "Unexpected state of penultimate block in " + src; switch(lastBlockState) { case COMPLETE: diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index 831ab21b7a1..a38e8d9772f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -435,17 +435,19 @@ public class INodeFile extends INodeWithAdditionalFields removedINodes, countDiffChange); } Quota.Counts counts = Quota.Counts.newInstance(); - if (snapshot == CURRENT_STATE_ID && priorSnapshotId == NO_SNAPSHOT_ID) { - // this only happens when deleting the current file and the file is not - // in any snapshot - computeQuotaUsage(counts, false); - destroyAndCollectBlocks(collectedBlocks, removedINodes); - } else if (snapshot == CURRENT_STATE_ID && priorSnapshotId != NO_SNAPSHOT_ID) { - // when deleting the current file and the file is in snapshot, we should - // clean the 0-sized block if the file is UC - FileUnderConstructionFeature uc = getFileUnderConstructionFeature(); - if (uc != null) { - uc.cleanZeroSizeBlock(this, collectedBlocks); + if (snapshot == CURRENT_STATE_ID) { + if (priorSnapshotId == NO_SNAPSHOT_ID) { + // this only happens when deleting the current file and the file is not + // in any snapshot + computeQuotaUsage(counts, false); + destroyAndCollectBlocks(collectedBlocks, removedINodes); + } else { + // when deleting the current file and the file is in snapshot, we should + // clean the 0-sized block if the file is UC + FileUnderConstructionFeature uc = getFileUnderConstructionFeature(); + if (uc != null) { + uc.cleanZeroSizeBlock(this, collectedBlocks); + } } } return counts; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SaveNamespaceCancelledException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SaveNamespaceCancelledException.java index 5b49f0ee47c..3d4e6d13c3a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SaveNamespaceCancelledException.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SaveNamespaceCancelledException.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode; import java.io.IOException; -import org.apache.hadoop.classification.InterfaceAudience;; +import org.apache.hadoop.classification.InterfaceAudience; @InterfaceAudience.Private public class SaveNamespaceCancelledException extends IOException {