diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 740f9ca0f71..6d142f9e64a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -839,8 +839,13 @@ private void dumpBlockMeta(Block block, PrintWriter out) { new ArrayList(); NumberReplicas numReplicas = new NumberReplicas(); + BlockInfo blockInfo = getStoredBlock(block); + if (blockInfo == null) { + out.println("Block "+ block + " is Null"); + return; + } // source node returned is not used - chooseSourceDatanodes(getStoredBlock(block), containingNodes, + chooseSourceDatanodes(blockInfo, containingNodes, containingLiveReplicasNodes, numReplicas, new ArrayList(), LowRedundancyBlocks.LEVEL); @@ -849,7 +854,7 @@ private void dumpBlockMeta(Block block, PrintWriter out) { assert containingLiveReplicasNodes.size() >= numReplicas.liveReplicas(); int usableReplicas = numReplicas.liveReplicas() + numReplicas.decommissionedAndDecommissioning(); - + if (block instanceof BlockInfo) { BlockCollection bc = getBlockCollection((BlockInfo)block); String fileName = (bc == null) ? "[orphaned]" : bc.getName(); @@ -1765,8 +1770,8 @@ public void setPostponeBlocksFromFuture(boolean postpone) { this.shouldPostponeBlocksFromFuture = postpone; } - - private void postponeBlock(Block blk) { + @VisibleForTesting + void postponeBlock(Block blk) { postponedMisreplicatedBlocks.add(blk); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 8659ea4f7b3..d0fdbac8228 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -1768,10 +1768,10 @@ public BlocksWithLocations getBlocks(DatanodeID datanode, long size, long void metaSave(String filename) throws IOException { String operationName = "metaSave"; checkSuperuserPrivilege(operationName); - checkOperation(OperationCategory.UNCHECKED); + checkOperation(OperationCategory.READ); writeLock(); try { - checkOperation(OperationCategory.UNCHECKED); + checkOperation(OperationCategory.READ); File file = new File(System.getProperty("hadoop.log.dir"), filename); PrintWriter out = new PrintWriter(new BufferedWriter( new OutputStreamWriter(new FileOutputStream(file), Charsets.UTF_8))); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java index aa67e72dd11..d1f83622498 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java @@ -94,6 +94,7 @@ import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.protocolPB.GenericRefreshProtocolClientSideTranslatorPB; import org.apache.hadoop.ipc.protocolPB.GenericRefreshProtocolPB; +import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.RefreshUserMappingsProtocol; import org.apache.hadoop.security.SecurityUtil; @@ -1537,11 +1538,20 @@ public int metaSave(String[] argv, int idx) throws IOException { nsId, ClientProtocol.class); List exceptions = new ArrayList<>(); for (ProxyAndInfo proxy : proxies) { - try{ + try { proxy.getProxy().metaSave(pathname); System.out.println("Created metasave file " + pathname + " in the log directory of namenode " + proxy.getAddress()); - } catch (IOException ioe){ + } catch (RemoteException re) { + Exception unwrapped = re.unwrapRemoteException( + StandbyException.class); + if (unwrapped instanceof StandbyException) { + System.out.println("Skip Standby NameNode, since it cannot perform" + + " metasave operation"); + } else { + throw re; + } + } catch (IOException ioe) { System.out.println("Created metasave file " + pathname + " in the log directory of namenode " + proxy.getAddress() + " failed"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index af840d691ac..de0e1a69cd3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -1478,6 +1478,41 @@ private void verifyPlacementPolicy(final MiniDFSCluster cluster, } } + /** + * Unit test to check the race condition for adding a Block to + * postponedMisreplicatedBlocks set which may not present in BlockManager + * thus avoiding NullPointerException. + **/ + @Test + public void testMetaSavePostponedMisreplicatedBlocks() throws IOException { + bm.postponeBlock(new Block()); + + File file = new File("test.log"); + PrintWriter out = new PrintWriter(file); + + bm.metaSave(out); + out.flush(); + + FileInputStream fstream = new FileInputStream(file); + DataInputStream in = new DataInputStream(fstream); + + BufferedReader reader = new BufferedReader(new InputStreamReader(in)); + StringBuffer buffer = new StringBuffer(); + String line; + try { + while ((line = reader.readLine()) != null) { + buffer.append(line); + } + String output = buffer.toString(); + assertTrue("Metasave output should not have null block ", + output.contains("Block blk_0_0 is Null")); + + } finally { + reader.close(); + file.delete(); + } + } + @Test public void testMetaSaveMissingReplicas() throws Exception { List origStorages = getStorages(0, 1); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java index b85a8d8b181..627ea0710df 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java @@ -414,16 +414,21 @@ public void testSetNegativeBalancerBandwidth() throws Exception { @Test (timeout = 30000) public void testMetaSave() throws Exception { setUpHaCluster(false); + cluster.getDfsCluster().transitionToActive(0); int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"}); assertEquals(err.toString().trim(), 0, exitCode); - String message = "Created metasave file dfs.meta in the log directory" - + " of namenode.*"; - assertOutputMatches(message + newLine + message + newLine); + String messageFromActiveNN = "Created metasave file dfs.meta " + + "in the log directory of namenode.*"; + String messageFromStandbyNN = "Skip Standby NameNode, since it " + + "cannot perform metasave operation"; + assertOutputMatches(messageFromActiveNN + newLine + + messageFromStandbyNN + newLine); } @Test (timeout = 30000) public void testMetaSaveNN1UpNN2Down() throws Exception { setUpHaCluster(false); + cluster.getDfsCluster().transitionToActive(0); cluster.getDfsCluster().shutdownNameNode(1); int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"}); assertNotEquals(err.toString().trim(), 0, exitCode); @@ -437,6 +442,7 @@ public void testMetaSaveNN1UpNN2Down() throws Exception { @Test (timeout = 30000) public void testMetaSaveNN1DownNN2Up() throws Exception { setUpHaCluster(false); + cluster.getDfsCluster().transitionToActive(1); cluster.getDfsCluster().shutdownNameNode(0); int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"}); assertNotEquals(err.toString().trim(), 0, exitCode);