HDFS-11105. TestRBWBlockInvalidation#testRWRInvalidation fails intermittently. Contributed by Yiqun Lin

(cherry picked from commit c90891e7b3)
This commit is contained in:
Mingliang Liu 2016-11-16 13:02:10 -08:00
parent e27fbcb50c
commit a8dadd8c53
1 changed files with 39 additions and 6 deletions

View File

@ -42,8 +42,10 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.hdfs.server.namenode.ha.TestDNFencing.RandomDeleterPolicy; import org.apache.hadoop.hdfs.server.namenode.ha.TestDNFencing.RandomDeleterPolicy;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Test; import org.junit.Test;
import com.google.common.base.Supplier;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
/** /**
@ -142,7 +144,7 @@ public class TestRBWBlockInvalidation {
* were RWR replicas with out-of-date genstamps, the NN could accidentally * were RWR replicas with out-of-date genstamps, the NN could accidentally
* delete good replicas instead of the bad replicas. * delete good replicas instead of the bad replicas.
*/ */
@Test(timeout=60000) @Test(timeout=120000)
public void testRWRInvalidation() throws Exception { public void testRWRInvalidation() throws Exception {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
@ -157,10 +159,11 @@ public class TestRBWBlockInvalidation {
// Speed up the test a bit with faster heartbeats. // Speed up the test a bit with faster heartbeats.
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
int numFiles = 10;
// Test with a bunch of separate files, since otherwise the test may // Test with a bunch of separate files, since otherwise the test may
// fail just due to "good luck", even if a bug is present. // fail just due to "good luck", even if a bug is present.
List<Path> testPaths = Lists.newArrayList(); List<Path> testPaths = Lists.newArrayList();
for (int i = 0; i < 10; i++) { for (int i = 0; i < numFiles; i++) {
testPaths.add(new Path("/test" + i)); testPaths.add(new Path("/test" + i));
} }
@ -178,6 +181,9 @@ public class TestRBWBlockInvalidation {
out.hflush(); out.hflush();
} }
for (Path path : testPaths) {
DFSTestUtil.waitReplication(cluster.getFileSystem(), path, (short)2);
}
// Shutdown one of the nodes in the pipeline // Shutdown one of the nodes in the pipeline
DataNodeProperties oldGenstampNode = cluster.stopDataNode(0); DataNodeProperties oldGenstampNode = cluster.stopDataNode(0);
@ -197,6 +203,10 @@ public class TestRBWBlockInvalidation {
out.close(); out.close();
} }
for (Path path : testPaths) {
DFSTestUtil.waitReplication(cluster.getFileSystem(), path, (short)1);
}
// Upon restart, there will be two replicas, one with an old genstamp // Upon restart, there will be two replicas, one with an old genstamp
// and one current copy. This test wants to ensure that the old genstamp // and one current copy. This test wants to ensure that the old genstamp
// copy is the one that is deleted. // copy is the one that is deleted.
@ -220,6 +230,7 @@ public class TestRBWBlockInvalidation {
HATestUtil.waitForDNDeletions(cluster); HATestUtil.waitForDNDeletions(cluster);
cluster.triggerDeletionReports(); cluster.triggerDeletionReports();
waitForNumTotalBlocks(cluster, numFiles);
// Make sure we can still read the blocks. // Make sure we can still read the blocks.
for (Path path : testPaths) { for (Path path : testPaths) {
String ret = DFSTestUtil.readFile(cluster.getFileSystem(), path); String ret = DFSTestUtil.readFile(cluster.getFileSystem(), path);
@ -233,4 +244,26 @@ public class TestRBWBlockInvalidation {
} }
} }
private void waitForNumTotalBlocks(final MiniDFSCluster cluster,
final int numTotalBlocks) throws Exception {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
try {
cluster.triggerBlockReports();
// Wait total blocks
if (cluster.getNamesystem().getBlocksTotal() == numTotalBlocks) {
return true;
}
} catch (Exception ignored) {
// Ignore the exception
}
return false;
}
}, 1000, 60000);
}
} }