HDFS-13251. Avoid using hard coded datanode data dirs in unit tests. Contributed by Ajay Kumar.
This commit is contained in:
parent
0be0f1ce1d
commit
f83716b7f2
@ -179,10 +179,9 @@ public void testStorageTypeStatsWhenStorageFailed() throws Exception {
|
|||||||
|
|
||||||
storageTypeStats = storageTypeStatsMap.get(StorageType.ARCHIVE);
|
storageTypeStats = storageTypeStatsMap.get(StorageType.ARCHIVE);
|
||||||
assertEquals(3, storageTypeStats.getNodesInService());
|
assertEquals(3, storageTypeStats.getNodesInService());
|
||||||
String dataDir = cluster.getDataDirectory();
|
File dn1ArcVol1 = cluster.getInstanceStorageDir(0, 1);
|
||||||
File dn1ArcVol1 = new File(dataDir, "data" + (3 * 0 + 2));
|
File dn2ArcVol1 = cluster.getInstanceStorageDir(1, 1);
|
||||||
File dn2ArcVol1 = new File(dataDir, "data" + (3 * 1 + 2));
|
File dn3ArcVol1 = cluster.getInstanceStorageDir(2, 1);
|
||||||
File dn3ArcVol1 = new File(dataDir, "data" + (3 * 2 + 2));
|
|
||||||
DataNodeTestUtils.injectDataDirFailure(dn1ArcVol1);
|
DataNodeTestUtils.injectDataDirFailure(dn1ArcVol1);
|
||||||
DataNodeTestUtils.injectDataDirFailure(dn2ArcVol1);
|
DataNodeTestUtils.injectDataDirFailure(dn2ArcVol1);
|
||||||
DataNodeTestUtils.injectDataDirFailure(dn3ArcVol1);
|
DataNodeTestUtils.injectDataDirFailure(dn3ArcVol1);
|
||||||
|
@ -305,7 +305,6 @@ private void addVolumes(int numNewVolumes)
|
|||||||
|
|
||||||
private void addVolumes(int numNewVolumes, CountDownLatch waitLatch)
|
private void addVolumes(int numNewVolumes, CountDownLatch waitLatch)
|
||||||
throws ReconfigurationException, IOException, InterruptedException {
|
throws ReconfigurationException, IOException, InterruptedException {
|
||||||
File dataDir = new File(cluster.getDataDirectory());
|
|
||||||
DataNode dn = cluster.getDataNodes().get(0); // First DataNode.
|
DataNode dn = cluster.getDataNodes().get(0); // First DataNode.
|
||||||
Configuration conf = dn.getConf();
|
Configuration conf = dn.getConf();
|
||||||
String oldDataDir = conf.get(DFS_DATANODE_DATA_DIR_KEY);
|
String oldDataDir = conf.get(DFS_DATANODE_DATA_DIR_KEY);
|
||||||
@ -315,14 +314,14 @@ private void addVolumes(int numNewVolumes, CountDownLatch waitLatch)
|
|||||||
int startIdx = oldDataDir.split(",").length + 1;
|
int startIdx = oldDataDir.split(",").length + 1;
|
||||||
// Find the first available (non-taken) directory name for data volume.
|
// Find the first available (non-taken) directory name for data volume.
|
||||||
while (true) {
|
while (true) {
|
||||||
File volumeDir = new File(dataDir, "data" + startIdx);
|
File volumeDir = cluster.getInstanceStorageDir(0, startIdx);
|
||||||
if (!volumeDir.exists()) {
|
if (!volumeDir.exists()) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
startIdx++;
|
startIdx++;
|
||||||
}
|
}
|
||||||
for (int i = startIdx; i < startIdx + numNewVolumes; i++) {
|
for (int i = startIdx; i < startIdx + numNewVolumes; i++) {
|
||||||
File volumeDir = new File(dataDir, "data" + String.valueOf(i));
|
File volumeDir = cluster.getInstanceStorageDir(0, i);
|
||||||
newVolumeDirs.add(volumeDir);
|
newVolumeDirs.add(volumeDir);
|
||||||
volumeDir.mkdirs();
|
volumeDir.mkdirs();
|
||||||
newDataDirBuf.append(",");
|
newDataDirBuf.append(",");
|
||||||
@ -985,7 +984,7 @@ public void testDirectlyReloadAfterCheckDiskError()
|
|||||||
|
|
||||||
DataNode dn = cluster.getDataNodes().get(0);
|
DataNode dn = cluster.getDataNodes().get(0);
|
||||||
final String oldDataDir = dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY);
|
final String oldDataDir = dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY);
|
||||||
File dirToFail = new File(cluster.getDataDirectory(), "data1");
|
File dirToFail = cluster.getInstanceStorageDir(0, 0);
|
||||||
|
|
||||||
FsVolumeImpl failedVolume = DataNodeTestUtils.getVolume(dn, dirToFail);
|
FsVolumeImpl failedVolume = DataNodeTestUtils.getVolume(dn, dirToFail);
|
||||||
assertTrue("No FsVolume was found for " + dirToFail,
|
assertTrue("No FsVolume was found for " + dirToFail,
|
||||||
@ -1037,7 +1036,7 @@ public void testFullBlockReportAfterRemovingVolumes()
|
|||||||
InternalDataNodeTestUtils.spyOnBposToNN(dn, cluster.getNameNode());
|
InternalDataNodeTestUtils.spyOnBposToNN(dn, cluster.getNameNode());
|
||||||
|
|
||||||
// Remove a data dir from datanode
|
// Remove a data dir from datanode
|
||||||
File dataDirToKeep = new File(cluster.getDataDirectory(), "data1");
|
File dataDirToKeep = cluster.getInstanceStorageDir(0, 0);
|
||||||
assertThat(
|
assertThat(
|
||||||
"DN did not update its own config",
|
"DN did not update its own config",
|
||||||
dn.reconfigurePropertyImpl(
|
dn.reconfigurePropertyImpl(
|
||||||
|
@ -168,7 +168,7 @@ public void testVolumeFailure() throws Exception {
|
|||||||
|
|
||||||
// fail the volume
|
// fail the volume
|
||||||
// delete/make non-writable one of the directories (failed volume)
|
// delete/make non-writable one of the directories (failed volume)
|
||||||
data_fail = new File(dataDir, "data3");
|
data_fail = cluster.getInstanceStorageDir(1, 0);
|
||||||
failedDir = MiniDFSCluster.getFinalizedDir(data_fail,
|
failedDir = MiniDFSCluster.getFinalizedDir(data_fail,
|
||||||
cluster.getNamesystem().getBlockPoolId());
|
cluster.getNamesystem().getBlockPoolId());
|
||||||
if (failedDir.exists() &&
|
if (failedDir.exists() &&
|
||||||
@ -235,7 +235,7 @@ public void testFailedVolumeBeingRemovedFromDataNode()
|
|||||||
DFSTestUtil.createFile(fs, file1, 1024, (short) 2, 1L);
|
DFSTestUtil.createFile(fs, file1, 1024, (short) 2, 1L);
|
||||||
DFSTestUtil.waitReplication(fs, file1, (short) 2);
|
DFSTestUtil.waitReplication(fs, file1, (short) 2);
|
||||||
|
|
||||||
File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
|
File dn0Vol1 = cluster.getInstanceStorageDir(0, 0);
|
||||||
DataNodeTestUtils.injectDataDirFailure(dn0Vol1);
|
DataNodeTestUtils.injectDataDirFailure(dn0Vol1);
|
||||||
DataNode dn0 = cluster.getDataNodes().get(0);
|
DataNode dn0 = cluster.getDataNodes().get(0);
|
||||||
DataNodeTestUtils.waitForDiskError(dn0,
|
DataNodeTestUtils.waitForDiskError(dn0,
|
||||||
@ -298,8 +298,8 @@ public void testDataNodeShutdownAfterNumFailedVolumeExceedsTolerated()
|
|||||||
assumeNotWindows();
|
assumeNotWindows();
|
||||||
|
|
||||||
// make both data directories to fail on dn0
|
// make both data directories to fail on dn0
|
||||||
final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
|
final File dn0Vol1 = cluster.getInstanceStorageDir(0, 0);
|
||||||
final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2));
|
final File dn0Vol2 = cluster.getInstanceStorageDir(0, 1);
|
||||||
DataNodeTestUtils.injectDataDirFailure(dn0Vol1, dn0Vol2);
|
DataNodeTestUtils.injectDataDirFailure(dn0Vol1, dn0Vol2);
|
||||||
DataNode dn0 = cluster.getDataNodes().get(0);
|
DataNode dn0 = cluster.getDataNodes().get(0);
|
||||||
DataNodeTestUtils.waitForDiskError(dn0,
|
DataNodeTestUtils.waitForDiskError(dn0,
|
||||||
@ -322,8 +322,8 @@ public void testVolumeFailureRecoveredByHotSwappingVolume()
|
|||||||
// volume failures which is currently not supported on Windows.
|
// volume failures which is currently not supported on Windows.
|
||||||
assumeNotWindows();
|
assumeNotWindows();
|
||||||
|
|
||||||
final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
|
final File dn0Vol1 = cluster.getInstanceStorageDir(0, 0);
|
||||||
final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2));
|
final File dn0Vol2 = cluster.getInstanceStorageDir(0, 1);
|
||||||
final DataNode dn0 = cluster.getDataNodes().get(0);
|
final DataNode dn0 = cluster.getDataNodes().get(0);
|
||||||
final String oldDataDirs = dn0.getConf().get(
|
final String oldDataDirs = dn0.getConf().get(
|
||||||
DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
|
DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
|
||||||
@ -366,8 +366,8 @@ public void testTolerateVolumeFailuresAfterAddingMoreVolumes()
|
|||||||
// volume failures which is currently not supported on Windows.
|
// volume failures which is currently not supported on Windows.
|
||||||
assumeNotWindows();
|
assumeNotWindows();
|
||||||
|
|
||||||
final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
|
final File dn0Vol1 = cluster.getInstanceStorageDir(0, 0);
|
||||||
final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2));
|
final File dn0Vol2 = cluster.getInstanceStorageDir(0, 1);
|
||||||
final File dn0VolNew = new File(dataDir, "data_new");
|
final File dn0VolNew = new File(dataDir, "data_new");
|
||||||
final DataNode dn0 = cluster.getDataNodes().get(0);
|
final DataNode dn0 = cluster.getDataNodes().get(0);
|
||||||
final String oldDataDirs = dn0.getConf().get(
|
final String oldDataDirs = dn0.getConf().get(
|
||||||
@ -413,8 +413,8 @@ public void testUnderReplicationAfterVolFailure() throws Exception {
|
|||||||
DFSTestUtil.waitReplication(fs, file1, (short)3);
|
DFSTestUtil.waitReplication(fs, file1, (short)3);
|
||||||
|
|
||||||
// Fail the first volume on both datanodes
|
// Fail the first volume on both datanodes
|
||||||
File dn1Vol1 = new File(dataDir, "data"+(2*0+1));
|
File dn1Vol1 = cluster.getInstanceStorageDir(0, 0);
|
||||||
File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
|
File dn2Vol1 = cluster.getInstanceStorageDir(1, 0);
|
||||||
DataNodeTestUtils.injectDataDirFailure(dn1Vol1, dn2Vol1);
|
DataNodeTestUtils.injectDataDirFailure(dn1Vol1, dn2Vol1);
|
||||||
|
|
||||||
Path file2 = new Path("/test2");
|
Path file2 = new Path("/test2");
|
||||||
|
@ -78,7 +78,6 @@ public class TestDataNodeVolumeFailureReporting {
|
|||||||
private FileSystem fs;
|
private FileSystem fs;
|
||||||
private MiniDFSCluster cluster;
|
private MiniDFSCluster cluster;
|
||||||
private Configuration conf;
|
private Configuration conf;
|
||||||
private String dataDir;
|
|
||||||
private long volumeCapacity;
|
private long volumeCapacity;
|
||||||
|
|
||||||
// Sleep at least 3 seconds (a 1s heartbeat plus padding) to allow
|
// Sleep at least 3 seconds (a 1s heartbeat plus padding) to allow
|
||||||
@ -134,10 +133,10 @@ public void testSuccessiveVolumeFailures() throws Exception {
|
|||||||
final long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm);
|
final long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm);
|
||||||
long dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0);
|
long dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0);
|
||||||
|
|
||||||
File dn1Vol1 = new File(dataDir, "data"+(2*0+1));
|
File dn1Vol1 = cluster.getInstanceStorageDir(0, 0);
|
||||||
File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
|
File dn2Vol1 = cluster.getInstanceStorageDir(1, 0);
|
||||||
File dn3Vol1 = new File(dataDir, "data"+(2*2+1));
|
File dn3Vol1 = cluster.getInstanceStorageDir(2, 0);
|
||||||
File dn3Vol2 = new File(dataDir, "data"+(2*2+2));
|
File dn3Vol2 = cluster.getInstanceStorageDir(2, 1);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Make the 1st volume directories on the first two datanodes
|
* Make the 1st volume directories on the first two datanodes
|
||||||
@ -275,8 +274,8 @@ public void testVolFailureStatsPreservedOnNNRestart() throws Exception {
|
|||||||
|
|
||||||
// Fail the first volume on both datanodes (we have to keep the
|
// Fail the first volume on both datanodes (we have to keep the
|
||||||
// third healthy so one node in the pipeline will not fail).
|
// third healthy so one node in the pipeline will not fail).
|
||||||
File dn1Vol1 = new File(dataDir, "data"+(2*0+1));
|
File dn1Vol1 = cluster.getInstanceStorageDir(0, 0);
|
||||||
File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
|
File dn2Vol1 = cluster.getInstanceStorageDir(1, 0);
|
||||||
DataNodeTestUtils.injectDataDirFailure(dn1Vol1, dn2Vol1);
|
DataNodeTestUtils.injectDataDirFailure(dn1Vol1, dn2Vol1);
|
||||||
|
|
||||||
Path file1 = new Path("/test1");
|
Path file1 = new Path("/test1");
|
||||||
@ -317,10 +316,10 @@ public void testMultipleVolFailuresOnNode() throws Exception {
|
|||||||
long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm);
|
long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm);
|
||||||
long dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0);
|
long dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0);
|
||||||
|
|
||||||
File dn1Vol1 = new File(dataDir, "data"+(4*0+1));
|
File dn1Vol1 = cluster.getInstanceStorageDir(0, 0);
|
||||||
File dn1Vol2 = new File(dataDir, "data"+(4*0+2));
|
File dn1Vol2 = cluster.getInstanceStorageDir(0, 1);
|
||||||
File dn2Vol1 = new File(dataDir, "data"+(4*1+1));
|
File dn2Vol1 = cluster.getInstanceStorageDir(1, 0);
|
||||||
File dn2Vol2 = new File(dataDir, "data"+(4*1+2));
|
File dn2Vol2 = cluster.getInstanceStorageDir(1, 1);
|
||||||
|
|
||||||
// Make the first two volume directories on the first two datanodes
|
// Make the first two volume directories on the first two datanodes
|
||||||
// non-accessible.
|
// non-accessible.
|
||||||
@ -376,10 +375,10 @@ public void testDataNodeReconfigureWithVolumeFailures() throws Exception {
|
|||||||
|
|
||||||
// Fail the first volume on both datanodes (we have to keep the
|
// Fail the first volume on both datanodes (we have to keep the
|
||||||
// third healthy so one node in the pipeline will not fail).
|
// third healthy so one node in the pipeline will not fail).
|
||||||
File dn1Vol1 = new File(dataDir, "data"+(2*0+1));
|
File dn1Vol1 = cluster.getInstanceStorageDir(0, 0);
|
||||||
File dn1Vol2 = new File(dataDir, "data"+(2*0+2));
|
File dn1Vol2 = cluster.getInstanceStorageDir(0, 1);
|
||||||
File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
|
File dn2Vol1 = cluster.getInstanceStorageDir(1, 0);
|
||||||
File dn2Vol2 = new File(dataDir, "data"+(2*1+2));
|
File dn2Vol2 = cluster.getInstanceStorageDir(1, 1);
|
||||||
DataNodeTestUtils.injectDataDirFailure(dn1Vol1);
|
DataNodeTestUtils.injectDataDirFailure(dn1Vol1);
|
||||||
DataNodeTestUtils.injectDataDirFailure(dn2Vol1);
|
DataNodeTestUtils.injectDataDirFailure(dn2Vol1);
|
||||||
|
|
||||||
@ -528,8 +527,8 @@ public void testAutoFormatEmptyBlockPoolDirectory() throws Exception {
|
|||||||
@Test
|
@Test
|
||||||
public void testHotSwapOutFailedVolumeAndReporting()
|
public void testHotSwapOutFailedVolumeAndReporting()
|
||||||
throws Exception {
|
throws Exception {
|
||||||
final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
|
final File dn0Vol1 = cluster.getInstanceStorageDir(0, 0);
|
||||||
final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2));
|
final File dn0Vol2 = cluster.getInstanceStorageDir(0, 1);
|
||||||
final DataNode dn0 = cluster.getDataNodes().get(0);
|
final DataNode dn0 = cluster.getDataNodes().get(0);
|
||||||
final String oldDataDirs = dn0.getConf().get(
|
final String oldDataDirs = dn0.getConf().get(
|
||||||
DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
|
DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
|
||||||
@ -777,7 +776,6 @@ private void initCluster(int numDataNodes, int storagesPerDatanode,
|
|||||||
.storagesPerDatanode(storagesPerDatanode).build();
|
.storagesPerDatanode(storagesPerDatanode).build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
fs = cluster.getFileSystem();
|
fs = cluster.getFileSystem();
|
||||||
dataDir = cluster.getDataDirectory();
|
|
||||||
long dnCapacity = DFSTestUtil.getDatanodeCapacity(
|
long dnCapacity = DFSTestUtil.getDatanodeCapacity(
|
||||||
cluster.getNamesystem().getBlockManager().getDatanodeManager(), 0);
|
cluster.getNamesystem().getBlockManager().getDatanodeManager(), 0);
|
||||||
volumeCapacity = dnCapacity / cluster.getStoragesPerDatanode();
|
volumeCapacity = dnCapacity / cluster.getStoragesPerDatanode();
|
||||||
|
@ -50,7 +50,6 @@ public class TestDataNodeVolumeFailureToleration {
|
|||||||
private FileSystem fs;
|
private FileSystem fs;
|
||||||
private MiniDFSCluster cluster;
|
private MiniDFSCluster cluster;
|
||||||
private Configuration conf;
|
private Configuration conf;
|
||||||
private String dataDir;
|
|
||||||
|
|
||||||
// Sleep at least 3 seconds (a 1s heartbeat plus padding) to allow
|
// Sleep at least 3 seconds (a 1s heartbeat plus padding) to allow
|
||||||
// for heartbeats to propagate from the datanodes to the namenode.
|
// for heartbeats to propagate from the datanodes to the namenode.
|
||||||
@ -80,7 +79,6 @@ public void setUp() throws Exception {
|
|||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
fs = cluster.getFileSystem();
|
fs = cluster.getFileSystem();
|
||||||
dataDir = cluster.getDataDirectory();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@After
|
@After
|
||||||
@ -161,7 +159,7 @@ public void testConfigureMinValidVolumes() throws Exception {
|
|||||||
long dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0);
|
long dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0);
|
||||||
|
|
||||||
// Fail a volume on the 2nd DN
|
// Fail a volume on the 2nd DN
|
||||||
File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
|
File dn2Vol1 = cluster.getStorageDir(1, 0);
|
||||||
DataNodeTestUtils.injectDataDirFailure(dn2Vol1);
|
DataNodeTestUtils.injectDataDirFailure(dn2Vol1);
|
||||||
|
|
||||||
// Should only get two replicas (the first DN and the 3rd)
|
// Should only get two replicas (the first DN and the 3rd)
|
||||||
|
@ -102,9 +102,7 @@ public void testVolumeMetricsWithVolumeDepartureArrival() throws Exception {
|
|||||||
|
|
||||||
ArrayList<DataNode> dns = cluster.getDataNodes();
|
ArrayList<DataNode> dns = cluster.getDataNodes();
|
||||||
assertTrue("DN1 should be up", dns.get(0).isDatanodeUp());
|
assertTrue("DN1 should be up", dns.get(0).isDatanodeUp());
|
||||||
|
final File dn1Vol2 = cluster.getInstanceStorageDir(0, 1);
|
||||||
final String dataDir = cluster.getDataDirectory();
|
|
||||||
final File dn1Vol2 = new File(dataDir, "data2");
|
|
||||||
|
|
||||||
DataNodeTestUtils.injectDataDirFailure(dn1Vol2);
|
DataNodeTestUtils.injectDataDirFailure(dn1Vol2);
|
||||||
verifyDataNodeVolumeMetrics(fs, cluster, fileName);
|
verifyDataNodeVolumeMetrics(fs, cluster, fileName);
|
||||||
|
@ -615,13 +615,15 @@ public void testReportNodeWithoutJson() throws Exception {
|
|||||||
assertThat(
|
assertThat(
|
||||||
outputs.get(3),
|
outputs.get(3),
|
||||||
is(allOf(containsString("DISK"),
|
is(allOf(containsString("DISK"),
|
||||||
containsString("/dfs/data/data1"),
|
containsString(cluster.getInstanceStorageDir(0, 0)
|
||||||
|
.getAbsolutePath()),
|
||||||
containsString("0.00"),
|
containsString("0.00"),
|
||||||
containsString("1.00"))));
|
containsString("1.00"))));
|
||||||
assertThat(
|
assertThat(
|
||||||
outputs.get(4),
|
outputs.get(4),
|
||||||
is(allOf(containsString("DISK"),
|
is(allOf(containsString("DISK"),
|
||||||
containsString("/dfs/data/data2"),
|
containsString(cluster.getInstanceStorageDir(0, 1)
|
||||||
|
.getAbsolutePath()),
|
||||||
containsString("0.00"),
|
containsString("0.00"),
|
||||||
containsString("1.00"))));
|
containsString("1.00"))));
|
||||||
}
|
}
|
||||||
|
@ -349,11 +349,14 @@ private void testDataNodeGetReconfigurationStatus(boolean expectedSuccuss)
|
|||||||
containsString("FAILED: Change property " +
|
containsString("FAILED: Change property " +
|
||||||
DFS_DATANODE_DATA_DIR_KEY));
|
DFS_DATANODE_DATA_DIR_KEY));
|
||||||
}
|
}
|
||||||
|
File dnDir0 = cluster.getInstanceStorageDir(0, 0);
|
||||||
|
File dnDir1 = cluster.getInstanceStorageDir(0, 1);
|
||||||
assertThat(outs.get(offset + 1),
|
assertThat(outs.get(offset + 1),
|
||||||
is(allOf(containsString("From:"), containsString("data1"),
|
is(allOf(containsString("From:"), containsString(dnDir0.getName()),
|
||||||
containsString("data2"))));
|
containsString(dnDir1.getName()))));
|
||||||
assertThat(outs.get(offset + 2),
|
assertThat(outs.get(offset + 2), is(not(
|
||||||
is(not(anyOf(containsString("data1"), containsString("data2")))));
|
anyOf(containsString(dnDir0.getName()),
|
||||||
|
containsString(dnDir1.getName())))));
|
||||||
assertThat(outs.get(offset + 2),
|
assertThat(outs.get(offset + 2),
|
||||||
is(allOf(containsString("To"), containsString("data_new"))));
|
is(allOf(containsString("To"), containsString("data_new"))));
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user