diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index ad43b45ee1d..29ea1de02a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -419,7 +419,7 @@ class FsDatasetImpl implements FsDatasetSpi { LOG.error(errorMsg); throw new IOException(errorMsg); } - volumeMap.addAll(replicaMap); + volumeMap.mergeAll(replicaMap); storageMap.put(sd.getStorageUuid(), new DatanodeStorage(sd.getStorageUuid(), DatanodeStorage.State.NORMAL, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java index 73d3c600b9c..786078f135e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java @@ -166,6 +166,20 @@ class ReplicaMap { void addAll(ReplicaMap other) { map.putAll(other.map); } + + + /** + * Merge all entries from the given replica map into the local replica map. + */ + void mergeAll(ReplicaMap other) { + other.map.forEach( + (bp, replicaInfos) -> { + replicaInfos.forEach( + replicaInfo -> add(bp, replicaInfo) + ); + } + ); + } /** * Remove the replica's meta information from the map that matches diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java index c19c8493194..609e16c5168 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java @@ -420,6 +420,74 @@ public class TestDataNodeHotSwapVolumes { verifyFileLength(cluster.getFileSystem(), testFile, numBlocks); } + /** + * Test re-adding one volume with some blocks on a running MiniDFSCluster + * with only one NameNode to reproduce HDFS-13677. + */ + @Test(timeout=60000) + public void testReAddVolumeWithBlocks() + throws IOException, ReconfigurationException, + InterruptedException, TimeoutException { + startDFSCluster(1, 1); + String bpid = cluster.getNamesystem().getBlockPoolId(); + final int numBlocks = 10; + + Path testFile = new Path("/test"); + createFile(testFile, numBlocks); + + List> blockReports = + cluster.getAllBlockReports(bpid); + assertEquals(1, blockReports.size()); // 1 DataNode + assertEquals(2, blockReports.get(0).size()); // 2 volumes + + // Now remove the second volume + DataNode dn = cluster.getDataNodes().get(0); + Collection oldDirs = getDataDirs(dn); + String newDirs = oldDirs.iterator().next(); // Keep the first volume. + assertThat( + "DN did not update its own config", + dn.reconfigurePropertyImpl( + DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs), + is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY))); + assertFileLocksReleased( + new ArrayList(oldDirs).subList(1, oldDirs.size())); + + // Now create another file - the first volume should have 15 blocks + // and 5 blocks on the previously removed volume + createFile(new Path("/test2"), numBlocks); + dn.scheduleAllBlockReport(0); + blockReports = cluster.getAllBlockReports(bpid); + + assertEquals(1, blockReports.size()); // 1 DataNode + assertEquals(1, blockReports.get(0).size()); // 1 volume + for (BlockListAsLongs blockList : blockReports.get(0).values()) { + assertEquals(15, blockList.getNumberOfBlocks()); + } + + // Now add the original volume back again and ensure 15 blocks are reported + assertThat( + "DN did not update its own config", + dn.reconfigurePropertyImpl( + DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, String.join(",", oldDirs)), + is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY))); + dn.scheduleAllBlockReport(0); + blockReports = cluster.getAllBlockReports(bpid); + + assertEquals(1, blockReports.size()); // 1 DataNode + assertEquals(2, blockReports.get(0).size()); // 2 volumes + + // The order of the block reports is not guaranteed. As we expect 2, get the + // max block count and the min block count and then assert on that. + int minNumBlocks = Integer.MAX_VALUE; + int maxNumBlocks = Integer.MIN_VALUE; + for (BlockListAsLongs blockList : blockReports.get(0).values()) { + minNumBlocks = Math.min(minNumBlocks, blockList.getNumberOfBlocks()); + maxNumBlocks = Math.max(maxNumBlocks, blockList.getNumberOfBlocks()); + } + assertEquals(5, minNumBlocks); + assertEquals(15, maxNumBlocks); + } + @Test(timeout=60000) public void testAddVolumesDuringWrite() throws IOException, InterruptedException, TimeoutException, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java index 4fa91b040e9..1059c08079d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java @@ -108,4 +108,26 @@ public class TestReplicaMap { map.add(bpid, new FinalizedReplica(block, null, null)); assertNotNull(map.remove(bpid, block.getBlockId())); } + + @Test + public void testMergeAll() { + ReplicaMap temReplicaMap = new ReplicaMap(new AutoCloseableLock()); + Block tmpBlock = new Block(5678, 5678, 5678); + temReplicaMap.add(bpid, new FinalizedReplica(tmpBlock, null, null)); + + map.mergeAll(temReplicaMap); + assertNotNull(map.get(bpid, 1234)); + assertNotNull(map.get(bpid, 5678)); + } + + @Test + public void testAddAll() { + ReplicaMap temReplicaMap = new ReplicaMap(new AutoCloseableLock()); + Block tmpBlock = new Block(5678, 5678, 5678); + temReplicaMap.add(bpid, new FinalizedReplica(tmpBlock, null, null)); + + map.addAll(temReplicaMap); + assertNull(map.get(bpid, 1234)); + assertNotNull(map.get(bpid, 5678)); + } }