HDFS-13631. TestDFSAdmin#testCheckNumOfBlocksInReportCommand should use a separate MiniDFSCluster path. Contributed by Anbang Hu.

This commit is contained in:
Inigo Goiri 2018-05-31 10:12:38 -07:00
parent 1361030e59
commit 1f324e9661
1 changed files with 32 additions and 33 deletions

View File

@ -110,11 +110,15 @@ public class TestDFSAdmin {
private static final PrintStream OLD_OUT = System.out; private static final PrintStream OLD_OUT = System.out;
private static final PrintStream OLD_ERR = System.err; private static final PrintStream OLD_ERR = System.err;
private String tempResource = null; private String tempResource = null;
private static final int NUM_DATANODES = 2;
@Before @Before
public void setUp() throws Exception { public void setUp() throws Exception {
conf = new Configuration(); conf = new Configuration();
conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 3); conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 3);
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512);
conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,
GenericTestUtils.getRandomizedTempPath());
restartCluster(); restartCluster();
admin = new DFSAdmin(conf); admin = new DFSAdmin(conf);
@ -157,7 +161,8 @@ public class TestDFSAdmin {
if (cluster != null) { if (cluster != null) {
cluster.shutdown(); cluster.shutdown();
} }
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATANODES).build();
cluster.waitActive(); cluster.waitActive();
datanode = cluster.getDataNodes().get(0); datanode = cluster.getDataNodes().get(0);
namenode = cluster.getNameNode(); namenode = cluster.getNameNode();
@ -904,40 +909,34 @@ public class TestDFSAdmin {
@Test(timeout = 300000L) @Test(timeout = 300000L)
public void testCheckNumOfBlocksInReportCommand() throws Exception { public void testCheckNumOfBlocksInReportCommand() throws Exception {
Configuration config = new Configuration(); DistributedFileSystem dfs = cluster.getFileSystem();
config.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512); Path path = new Path("/tmp.txt");
config.set(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, "3s");
int numOfDatanodes = 1;
MiniDFSCluster miniDFSCluster = new MiniDFSCluster.Builder(config)
.numDataNodes(numOfDatanodes).build();
try {
miniDFSCluster.waitActive();
DistributedFileSystem dfs = miniDFSCluster.getFileSystem();
Path path= new Path("/tmp.txt");
DatanodeInfo[] dn = dfs.getDataNodeStats(); DatanodeInfo[] dn = dfs.getDataNodeStats();
assertEquals(dn.length, numOfDatanodes); assertEquals(dn.length, NUM_DATANODES);
//Block count should be 0, as no files are created // Block count should be 0, as no files are created
assertEquals(dn[0].getNumBlocks(), 0); int actualBlockCount = 0;
for (DatanodeInfo d : dn) {
actualBlockCount += d.getNumBlocks();
}
assertEquals(0, actualBlockCount);
// Create a file with 2 blocks
//Create a file with 2 blocks
DFSTestUtil.createFile(dfs, path, 1024, (short) 1, 0); DFSTestUtil.createFile(dfs, path, 1024, (short) 1, 0);
int expectedBlockCount = 2; int expectedBlockCount = 2;
//Wait for One Heartbeat // Wait for One Heartbeat
Thread.sleep(3 * 1000); Thread.sleep(3 * 1000);
dn = dfs.getDataNodeStats(); dn = dfs.getDataNodeStats();
assertEquals(dn.length, numOfDatanodes); assertEquals(dn.length, NUM_DATANODES);
//Block count should be 2, as file is created with block count 2 // Block count should be 2, as file is created with block count 2
assertEquals(dn[0].getNumBlocks(), expectedBlockCount); actualBlockCount = 0;
for (DatanodeInfo d : dn) {
} finally { actualBlockCount += d.getNumBlocks();
cluster.shutdown();
} }
assertEquals(expectedBlockCount, actualBlockCount);
} }
@Test @Test