diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 4c1287c4ad5..3df42ae1f09 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -896,6 +896,9 @@ Release 2.9.0 - UNRELEASED HDFS-9129. Move the safemode block count into BlockManager. (Mingliang Liu via jing9) + HDFS-9281. Change TestDeleteBlockPool to not explicitly use File to check + block pool existence. (lei) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java index 2a8119f02f0..f5bf4e9f6a8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java @@ -260,4 +260,20 @@ void changeStoredGenerationStamp(ExtendedBlock block, long newGenStamp) * Get the number of pending async deletions. */ long getPendingAsyncDeletions(); + + /** + * Verify the existence of the block pool. + * + * @param bpid block pool ID + * @throws IOException if the block pool does not exist. + */ + void verifyBlockPoolExists(String bpid) throws IOException; + + /** + * Verify that the block pool does not exist. + * + * @param bpid block pool ID + * @throws IOException if the block pool does exist. + */ + void verifyBlockPoolMissing(String bpid) throws IOException; } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java index bc15f51a98b..e3b4267e526 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java @@ -20,10 +20,8 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; -import java.io.File; import java.io.IOException; import org.apache.hadoop.conf.Configuration; @@ -68,11 +66,6 @@ public void testDeleteBlockPool() throws Exception { String bpid1 = cluster.getNamesystem(0).getBlockPoolId(); String bpid2 = cluster.getNamesystem(1).getBlockPoolId(); - File dn1StorageDir1 = cluster.getInstanceStorageDir(0, 0); - File dn1StorageDir2 = cluster.getInstanceStorageDir(0, 1); - File dn2StorageDir1 = cluster.getInstanceStorageDir(1, 0); - File dn2StorageDir2 = cluster.getInstanceStorageDir(1, 1); - // Although namenode is shutdown, the bp offerservice is still running try { dn1.deleteBlockPool(bpid1, true); @@ -92,21 +85,17 @@ public void testDeleteBlockPool() throws Exception { } catch (IOException expected) { } - verifyBlockPoolDirectories(true, dn1StorageDir1, bpid1); - verifyBlockPoolDirectories(true, dn1StorageDir2, bpid1); + cluster.getFsDatasetTestUtils(0).verifyBlockPoolExists(bpid1); dn1.deleteBlockPool(bpid1, true); - verifyBlockPoolDirectories(false, dn1StorageDir1, bpid1); - verifyBlockPoolDirectories(false, dn1StorageDir2, bpid1); + cluster.getFsDatasetTestUtils(0).verifyBlockPoolMissing(bpid1); fs1.delete(new Path("/alpha"), true); // Wait till all blocks are deleted from the dn2 for bpid1. - File finalDir1 = MiniDFSCluster.getFinalizedDir(dn2StorageDir1, bpid1); - File finalDir2 = MiniDFSCluster.getFinalizedDir(dn2StorageDir1, bpid2); - while ((!DatanodeUtil.dirNoFilesRecursive(finalDir1)) || - (!DatanodeUtil.dirNoFilesRecursive(finalDir2))) { + while (cluster.getFsDatasetTestUtils(1).getStoredReplicas(bpid1) + .hasNext()) { try { Thread.sleep(3000); } catch (Exception ignored) { @@ -124,22 +113,18 @@ public void testDeleteBlockPool() throws Exception { dn2.refreshNamenodes(nn1Conf); assertEquals(1, dn2.getAllBpOs().size()); - - verifyBlockPoolDirectories(true, dn2StorageDir1, bpid1); - verifyBlockPoolDirectories(true, dn2StorageDir2, bpid1); + + cluster.getFsDatasetTestUtils(1).verifyBlockPoolExists(bpid1); // Now deleteBlockPool must succeed with force as false, because no // blocks exist for bpid1 and bpOfferService is also stopped for bpid1. dn2.deleteBlockPool(bpid1, false); - - verifyBlockPoolDirectories(false, dn2StorageDir1, bpid1); - verifyBlockPoolDirectories(false, dn2StorageDir2, bpid1); + + cluster.getFsDatasetTestUtils(1).verifyBlockPoolMissing(bpid1); //bpid2 must not be impacted - verifyBlockPoolDirectories(true, dn1StorageDir1, bpid2); - verifyBlockPoolDirectories(true, dn1StorageDir2, bpid2); - verifyBlockPoolDirectories(true, dn2StorageDir1, bpid2); - verifyBlockPoolDirectories(true, dn2StorageDir2, bpid2); + cluster.getFsDatasetTestUtils(0).verifyBlockPoolExists(bpid2); + cluster.getFsDatasetTestUtils(1).verifyBlockPoolExists(bpid2); //make sure second block pool is running all fine Path gammaFile = new Path("/gamma"); DFSTestUtil.createFile(fs2, gammaFile, 1024, (short) 1, 55); @@ -178,9 +163,6 @@ public void testDfsAdminDeleteBlockPool() throws Exception { String bpid1 = cluster.getNamesystem(0).getBlockPoolId(); String bpid2 = cluster.getNamesystem(1).getBlockPoolId(); - File dn1StorageDir1 = cluster.getInstanceStorageDir(0, 0); - File dn1StorageDir2 = cluster.getInstanceStorageDir(0, 1); - Configuration nn1Conf = cluster.getConfiguration(0); nn1Conf.set(DFSConfigKeys.DFS_NAMESERVICES, "namesServerId1"); dn1.refreshNamenodes(nn1Conf); @@ -193,19 +175,16 @@ public void testDfsAdminDeleteBlockPool() throws Exception { int ret = admin.run(args); assertFalse(0 == ret); - verifyBlockPoolDirectories(true, dn1StorageDir1, bpid2); - verifyBlockPoolDirectories(true, dn1StorageDir2, bpid2); + cluster.getFsDatasetTestUtils(0).verifyBlockPoolExists(bpid2); String[] forceArgs = { "-deleteBlockPool", dn1Address, bpid2, "force" }; ret = admin.run(forceArgs); assertEquals(0, ret); - - verifyBlockPoolDirectories(false, dn1StorageDir1, bpid2); - verifyBlockPoolDirectories(false, dn1StorageDir2, bpid2); + + cluster.getFsDatasetTestUtils(0).verifyBlockPoolMissing(bpid2); //bpid1 remains good - verifyBlockPoolDirectories(true, dn1StorageDir1, bpid1); - verifyBlockPoolDirectories(true, dn1StorageDir2, bpid1); + cluster.getFsDatasetTestUtils(0).verifyBlockPoolExists(bpid1); } finally { if (cluster != null) { @@ -213,24 +192,4 @@ public void testDfsAdminDeleteBlockPool() throws Exception { } } } - - private void verifyBlockPoolDirectories(boolean shouldExist, - File storageDir, String bpid) throws IOException { - File bpDir = new File(storageDir, DataStorage.STORAGE_DIR_CURRENT + "/" - + bpid); - - if (shouldExist == false) { - assertFalse(bpDir.exists()); - } else { - File bpCurrentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT); - File finalizedDir = new File(bpCurrentDir, - DataStorage.STORAGE_DIR_FINALIZED); - File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW); - File versionFile = new File(bpCurrentDir, "VERSION"); - - assertTrue(finalizedDir.isDirectory()); - assertTrue(rbwDir.isDirectory()); - assertTrue(versionFile.exists()); - } - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java index c85ca2b5600..f3c740a3200 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java @@ -29,6 +29,7 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.hdfs.server.datanode.DataStorage; import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil; import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica; import org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils; @@ -405,4 +406,42 @@ public Iterator getStoredReplicas(String bpid) throws IOException { public long getPendingAsyncDeletions() { return dataset.asyncDiskService.countPendingDeletions(); } + + @Override + public void verifyBlockPoolExists(String bpid) throws IOException { + FsVolumeImpl volume; + try (FsVolumeReferences references = dataset.getFsVolumeReferences()) { + volume = (FsVolumeImpl) references.get(0); + } + File bpDir = new File(volume.getCurrentDir(), bpid); + File bpCurrentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT); + File finalizedDir = new File(bpCurrentDir, + DataStorage.STORAGE_DIR_FINALIZED); + File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW); + File versionFile = new File(bpCurrentDir, "VERSION"); + + if (!finalizedDir.isDirectory()) { + throw new IOException(finalizedDir.getPath() + " is not a directory."); + } + if (!rbwDir.isDirectory()) { + throw new IOException(finalizedDir.getPath() + " is not a directory."); + } + if (!versionFile.exists()) { + throw new IOException( + "Version file: " + versionFile.getPath() + " does not exist."); + } + } + + @Override + public void verifyBlockPoolMissing(String bpid) throws IOException { + FsVolumeImpl volume; + try (FsVolumeReferences references = dataset.getFsVolumeReferences()) { + volume = (FsVolumeImpl) references.get(0); + } + File bpDir = new File(volume.getCurrentDir(), bpid); + if (bpDir.exists()) { + throw new IOException( + String.format("Block pool directory %s exists", bpDir)); + } + } }