HDFS-9281. Change TestDeleteBlockPool to not explicitly use File to check block pool existence. (lei)

(cherry picked from commit db975af5fae506eb3d586bd9201f76adb3fd1281)
This commit is contained in:
Lei Xu 2015-12-14 10:59:55 -08:00
parent a1a723fdff
commit 0852d3562a
4 changed files with 72 additions and 55 deletions

View File

@ -23,6 +23,9 @@ Release 2.9.0 - UNRELEASED
HDFS-9129. Move the safemode block count into BlockManager. (Mingliang Liu HDFS-9129. Move the safemode block count into BlockManager. (Mingliang Liu
via jing9) via jing9)
HDFS-9281. Change TestDeleteBlockPool to not explicitly use File to check
block pool existence. (lei)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES

View File

@ -260,4 +260,20 @@ public interface FsDatasetTestUtils {
* Get the number of pending async deletions. * Get the number of pending async deletions.
*/ */
long getPendingAsyncDeletions(); long getPendingAsyncDeletions();
/**
* Verify the existence of the block pool.
*
* @param bpid block pool ID
* @throws IOException if the block pool does not exist.
*/
void verifyBlockPoolExists(String bpid) throws IOException;
/**
* Verify that the block pool does not exist.
*
* @param bpid block pool ID
* @throws IOException if the block pool does exist.
*/
void verifyBlockPoolMissing(String bpid) throws IOException;
} }

View File

@ -20,10 +20,8 @@ package org.apache.hadoop.hdfs.server.datanode;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail; import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -68,11 +66,6 @@ public class TestDeleteBlockPool {
String bpid1 = cluster.getNamesystem(0).getBlockPoolId(); String bpid1 = cluster.getNamesystem(0).getBlockPoolId();
String bpid2 = cluster.getNamesystem(1).getBlockPoolId(); String bpid2 = cluster.getNamesystem(1).getBlockPoolId();
File dn1StorageDir1 = cluster.getInstanceStorageDir(0, 0);
File dn1StorageDir2 = cluster.getInstanceStorageDir(0, 1);
File dn2StorageDir1 = cluster.getInstanceStorageDir(1, 0);
File dn2StorageDir2 = cluster.getInstanceStorageDir(1, 1);
// Although namenode is shutdown, the bp offerservice is still running // Although namenode is shutdown, the bp offerservice is still running
try { try {
dn1.deleteBlockPool(bpid1, true); dn1.deleteBlockPool(bpid1, true);
@ -92,21 +85,17 @@ public class TestDeleteBlockPool {
} catch (IOException expected) { } catch (IOException expected) {
} }
verifyBlockPoolDirectories(true, dn1StorageDir1, bpid1); cluster.getFsDatasetTestUtils(0).verifyBlockPoolExists(bpid1);
verifyBlockPoolDirectories(true, dn1StorageDir2, bpid1);
dn1.deleteBlockPool(bpid1, true); dn1.deleteBlockPool(bpid1, true);
verifyBlockPoolDirectories(false, dn1StorageDir1, bpid1); cluster.getFsDatasetTestUtils(0).verifyBlockPoolMissing(bpid1);
verifyBlockPoolDirectories(false, dn1StorageDir2, bpid1);
fs1.delete(new Path("/alpha"), true); fs1.delete(new Path("/alpha"), true);
// Wait till all blocks are deleted from the dn2 for bpid1. // Wait till all blocks are deleted from the dn2 for bpid1.
File finalDir1 = MiniDFSCluster.getFinalizedDir(dn2StorageDir1, bpid1); while (cluster.getFsDatasetTestUtils(1).getStoredReplicas(bpid1)
File finalDir2 = MiniDFSCluster.getFinalizedDir(dn2StorageDir1, bpid2); .hasNext()) {
while ((!DatanodeUtil.dirNoFilesRecursive(finalDir1)) ||
(!DatanodeUtil.dirNoFilesRecursive(finalDir2))) {
try { try {
Thread.sleep(3000); Thread.sleep(3000);
} catch (Exception ignored) { } catch (Exception ignored) {
@ -124,22 +113,18 @@ public class TestDeleteBlockPool {
dn2.refreshNamenodes(nn1Conf); dn2.refreshNamenodes(nn1Conf);
assertEquals(1, dn2.getAllBpOs().size()); assertEquals(1, dn2.getAllBpOs().size());
verifyBlockPoolDirectories(true, dn2StorageDir1, bpid1); cluster.getFsDatasetTestUtils(1).verifyBlockPoolExists(bpid1);
verifyBlockPoolDirectories(true, dn2StorageDir2, bpid1);
// Now deleteBlockPool must succeed with force as false, because no // Now deleteBlockPool must succeed with force as false, because no
// blocks exist for bpid1 and bpOfferService is also stopped for bpid1. // blocks exist for bpid1 and bpOfferService is also stopped for bpid1.
dn2.deleteBlockPool(bpid1, false); dn2.deleteBlockPool(bpid1, false);
verifyBlockPoolDirectories(false, dn2StorageDir1, bpid1); cluster.getFsDatasetTestUtils(1).verifyBlockPoolMissing(bpid1);
verifyBlockPoolDirectories(false, dn2StorageDir2, bpid1);
//bpid2 must not be impacted //bpid2 must not be impacted
verifyBlockPoolDirectories(true, dn1StorageDir1, bpid2); cluster.getFsDatasetTestUtils(0).verifyBlockPoolExists(bpid2);
verifyBlockPoolDirectories(true, dn1StorageDir2, bpid2); cluster.getFsDatasetTestUtils(1).verifyBlockPoolExists(bpid2);
verifyBlockPoolDirectories(true, dn2StorageDir1, bpid2);
verifyBlockPoolDirectories(true, dn2StorageDir2, bpid2);
//make sure second block pool is running all fine //make sure second block pool is running all fine
Path gammaFile = new Path("/gamma"); Path gammaFile = new Path("/gamma");
DFSTestUtil.createFile(fs2, gammaFile, 1024, (short) 1, 55); DFSTestUtil.createFile(fs2, gammaFile, 1024, (short) 1, 55);
@ -178,9 +163,6 @@ public class TestDeleteBlockPool {
String bpid1 = cluster.getNamesystem(0).getBlockPoolId(); String bpid1 = cluster.getNamesystem(0).getBlockPoolId();
String bpid2 = cluster.getNamesystem(1).getBlockPoolId(); String bpid2 = cluster.getNamesystem(1).getBlockPoolId();
File dn1StorageDir1 = cluster.getInstanceStorageDir(0, 0);
File dn1StorageDir2 = cluster.getInstanceStorageDir(0, 1);
Configuration nn1Conf = cluster.getConfiguration(0); Configuration nn1Conf = cluster.getConfiguration(0);
nn1Conf.set(DFSConfigKeys.DFS_NAMESERVICES, "namesServerId1"); nn1Conf.set(DFSConfigKeys.DFS_NAMESERVICES, "namesServerId1");
dn1.refreshNamenodes(nn1Conf); dn1.refreshNamenodes(nn1Conf);
@ -193,19 +175,16 @@ public class TestDeleteBlockPool {
int ret = admin.run(args); int ret = admin.run(args);
assertFalse(0 == ret); assertFalse(0 == ret);
verifyBlockPoolDirectories(true, dn1StorageDir1, bpid2); cluster.getFsDatasetTestUtils(0).verifyBlockPoolExists(bpid2);
verifyBlockPoolDirectories(true, dn1StorageDir2, bpid2);
String[] forceArgs = { "-deleteBlockPool", dn1Address, bpid2, "force" }; String[] forceArgs = { "-deleteBlockPool", dn1Address, bpid2, "force" };
ret = admin.run(forceArgs); ret = admin.run(forceArgs);
assertEquals(0, ret); assertEquals(0, ret);
verifyBlockPoolDirectories(false, dn1StorageDir1, bpid2); cluster.getFsDatasetTestUtils(0).verifyBlockPoolMissing(bpid2);
verifyBlockPoolDirectories(false, dn1StorageDir2, bpid2);
//bpid1 remains good //bpid1 remains good
verifyBlockPoolDirectories(true, dn1StorageDir1, bpid1); cluster.getFsDatasetTestUtils(0).verifyBlockPoolExists(bpid1);
verifyBlockPoolDirectories(true, dn1StorageDir2, bpid1);
} finally { } finally {
if (cluster != null) { if (cluster != null) {
@ -213,24 +192,4 @@ public class TestDeleteBlockPool {
} }
} }
} }
private void verifyBlockPoolDirectories(boolean shouldExist,
File storageDir, String bpid) throws IOException {
File bpDir = new File(storageDir, DataStorage.STORAGE_DIR_CURRENT + "/"
+ bpid);
if (shouldExist == false) {
assertFalse(bpDir.exists());
} else {
File bpCurrentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
File finalizedDir = new File(bpCurrentDir,
DataStorage.STORAGE_DIR_FINALIZED);
File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW);
File versionFile = new File(bpCurrentDir, "VERSION");
assertTrue(finalizedDir.isDirectory());
assertTrue(rbwDir.isDirectory());
assertTrue(versionFile.exists());
}
}
} }

View File

@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil; import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica; import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
import org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils; import org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils;
@ -405,4 +406,42 @@ public class FsDatasetImplTestUtils implements FsDatasetTestUtils {
public long getPendingAsyncDeletions() { public long getPendingAsyncDeletions() {
return dataset.asyncDiskService.countPendingDeletions(); return dataset.asyncDiskService.countPendingDeletions();
} }
@Override
public void verifyBlockPoolExists(String bpid) throws IOException {
FsVolumeImpl volume;
try (FsVolumeReferences references = dataset.getFsVolumeReferences()) {
volume = (FsVolumeImpl) references.get(0);
}
File bpDir = new File(volume.getCurrentDir(), bpid);
File bpCurrentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
File finalizedDir = new File(bpCurrentDir,
DataStorage.STORAGE_DIR_FINALIZED);
File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW);
File versionFile = new File(bpCurrentDir, "VERSION");
if (!finalizedDir.isDirectory()) {
throw new IOException(finalizedDir.getPath() + " is not a directory.");
}
if (!rbwDir.isDirectory()) {
throw new IOException(finalizedDir.getPath() + " is not a directory.");
}
if (!versionFile.exists()) {
throw new IOException(
"Version file: " + versionFile.getPath() + " does not exist.");
}
}
@Override
public void verifyBlockPoolMissing(String bpid) throws IOException {
FsVolumeImpl volume;
try (FsVolumeReferences references = dataset.getFsVolumeReferences()) {
volume = (FsVolumeImpl) references.get(0);
}
File bpDir = new File(volume.getCurrentDir(), bpid);
if (bpDir.exists()) {
throw new IOException(
String.format("Block pool directory %s exists", bpDir));
}
}
} }