diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index ebbba3b982f..8eb767031a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -838,8 +838,13 @@ public class BlockManager implements BlockStatsMXBean { new ArrayList(); NumberReplicas numReplicas = new NumberReplicas(); + BlockInfo blockInfo = getStoredBlock(block); + if (blockInfo == null) { + out.println("Block "+ block + " is Null"); + return; + } // source node returned is not used - chooseSourceDatanodes(getStoredBlock(block), containingNodes, + chooseSourceDatanodes(blockInfo, containingNodes, containingLiveReplicasNodes, numReplicas, new LinkedList(), LowRedundancyBlocks.LEVEL); @@ -848,7 +853,7 @@ public class BlockManager implements BlockStatsMXBean { assert containingLiveReplicasNodes.size() >= numReplicas.liveReplicas(); int usableReplicas = numReplicas.liveReplicas() + numReplicas.decommissionedAndDecommissioning(); - + if (block instanceof BlockInfo) { BlockCollection bc = getBlockCollection((BlockInfo)block); String fileName = (bc == null) ? "[orphaned]" : bc.getName(); @@ -1764,8 +1769,8 @@ public class BlockManager implements BlockStatsMXBean { this.shouldPostponeBlocksFromFuture = postpone; } - - private void postponeBlock(Block blk) { + @VisibleForTesting + void postponeBlock(Block blk) { postponedMisreplicatedBlocks.add(blk); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 14e90d17eab..7c635ab528a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -1756,10 +1756,10 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, void metaSave(String filename) throws IOException { String operationName = "metaSave"; checkSuperuserPrivilege(operationName); - checkOperation(OperationCategory.UNCHECKED); + checkOperation(OperationCategory.READ); writeLock(); try { - checkOperation(OperationCategory.UNCHECKED); + checkOperation(OperationCategory.READ); File file = new File(System.getProperty("hadoop.log.dir"), filename); PrintWriter out = new PrintWriter(new BufferedWriter( new OutputStreamWriter(new FileOutputStream(file), Charsets.UTF_8))); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java index aa67e72dd11..d1f83622498 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java @@ -94,6 +94,7 @@ import org.apache.hadoop.ipc.RefreshResponse; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.protocolPB.GenericRefreshProtocolClientSideTranslatorPB; import org.apache.hadoop.ipc.protocolPB.GenericRefreshProtocolPB; +import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.RefreshUserMappingsProtocol; import org.apache.hadoop.security.SecurityUtil; @@ -1537,11 +1538,20 @@ public class DFSAdmin extends FsShell { nsId, ClientProtocol.class); List exceptions = new ArrayList<>(); for (ProxyAndInfo proxy : proxies) { - try{ + try { proxy.getProxy().metaSave(pathname); System.out.println("Created metasave file " + pathname + " in the log directory of namenode " + proxy.getAddress()); - } catch (IOException ioe){ + } catch (RemoteException re) { + Exception unwrapped = re.unwrapRemoteException( + StandbyException.class); + if (unwrapped instanceof StandbyException) { + System.out.println("Skip Standby NameNode, since it cannot perform" + + " metasave operation"); + } else { + throw re; + } + } catch (IOException ioe) { System.out.println("Created metasave file " + pathname + " in the log directory of namenode " + proxy.getAddress() + " failed"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index 0097da80c49..3bd41e26bb0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -1478,6 +1478,41 @@ public class TestBlockManager { } } + /** + * Unit test to check the race condition for adding a Block to + * postponedMisreplicatedBlocks set which may not present in BlockManager + * thus avoiding NullPointerException. + **/ + @Test + public void testMetaSavePostponedMisreplicatedBlocks() throws IOException { + bm.postponeBlock(new Block()); + + File file = new File("test.log"); + PrintWriter out = new PrintWriter(file); + + bm.metaSave(out); + out.flush(); + + FileInputStream fstream = new FileInputStream(file); + DataInputStream in = new DataInputStream(fstream); + + BufferedReader reader = new BufferedReader(new InputStreamReader(in)); + StringBuffer buffer = new StringBuffer(); + String line; + try { + while ((line = reader.readLine()) != null) { + buffer.append(line); + } + String output = buffer.toString(); + assertTrue("Metasave output should not have null block ", + output.contains("Block blk_0_0 is Null")); + + } finally { + reader.close(); + file.delete(); + } + } + @Test public void testMetaSaveMissingReplicas() throws Exception { List origStorages = getStorages(0, 1); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java index b85a8d8b181..627ea0710df 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java @@ -414,16 +414,21 @@ public class TestDFSAdminWithHA { @Test (timeout = 30000) public void testMetaSave() throws Exception { setUpHaCluster(false); + cluster.getDfsCluster().transitionToActive(0); int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"}); assertEquals(err.toString().trim(), 0, exitCode); - String message = "Created metasave file dfs.meta in the log directory" - + " of namenode.*"; - assertOutputMatches(message + newLine + message + newLine); + String messageFromActiveNN = "Created metasave file dfs.meta " + + "in the log directory of namenode.*"; + String messageFromStandbyNN = "Skip Standby NameNode, since it " + + "cannot perform metasave operation"; + assertOutputMatches(messageFromActiveNN + newLine + + messageFromStandbyNN + newLine); } @Test (timeout = 30000) public void testMetaSaveNN1UpNN2Down() throws Exception { setUpHaCluster(false); + cluster.getDfsCluster().transitionToActive(0); cluster.getDfsCluster().shutdownNameNode(1); int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"}); assertNotEquals(err.toString().trim(), 0, exitCode); @@ -437,6 +442,7 @@ public class TestDFSAdminWithHA { @Test (timeout = 30000) public void testMetaSaveNN1DownNN2Up() throws Exception { setUpHaCluster(false); + cluster.getDfsCluster().transitionToActive(1); cluster.getDfsCluster().shutdownNameNode(0); int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"}); assertNotEquals(err.toString().trim(), 0, exitCode);