HBASE-14648 Reenable TestWALProcedureStoreOnHDFS#testWalRollOnLowReplication (Heng Chen)

This commit is contained in:
stack 2015-10-24 20:55:37 -07:00
parent 40887c94b7
commit d76dbb4f84
1 changed files with 80 additions and 89 deletions

View File

@ -83,7 +83,7 @@ public class TestWALProcedureStoreOnHDFS {
}
};
private static void setupConf(Configuration conf) {
private static void initConfig(Configuration conf) {
conf.setInt("dfs.replication", 3);
conf.setInt("dfs.namenode.replication.min", 3);
@ -93,20 +93,16 @@ public class TestWALProcedureStoreOnHDFS {
conf.setInt("hbase.procedure.store.wal.sync.failure.roll.max", 10);
}
@Before
public void setup() throws Exception {
setupConf(UTIL.getConfiguration());
MiniDFSCluster dfs = UTIL.startMiniDFSCluster(3);
Path logDir = new Path(new Path(dfs.getFileSystem().getUri()), "/test-logs");
store = ProcedureTestingUtility.createWalStore(
UTIL.getConfiguration(), dfs.getFileSystem(), logDir);
store = ProcedureTestingUtility.createWalStore(UTIL.getConfiguration(), dfs.getFileSystem(), logDir);
store.registerListener(stopProcedureListener);
store.start(8);
store.recoverLease();
}
@After
public void tearDown() throws Exception {
store.stop(false);
UTIL.getDFSCluster().getFileSystem().delete(store.getLogDir(), true);
@ -120,6 +116,9 @@ public class TestWALProcedureStoreOnHDFS {
@Test(timeout=60000, expected=RuntimeException.class)
public void testWalAbortOnLowReplication() throws Exception {
initConfig(UTIL.getConfiguration());
setup();
try {
assertEquals(3, UTIL.getDFSCluster().getDataNodes().size());
LOG.info("Stop DataNode");
@ -134,10 +133,16 @@ public class TestWALProcedureStoreOnHDFS {
}
assertFalse(store.isRunning());
fail("The store.insert() should throw an exeption");
} finally {
tearDown();
}
}
@Test(timeout=60000)
public void testWalAbortOnLowReplicationWithQueuedWriters() throws Exception {
initConfig(UTIL.getConfiguration());
setup();
try {
assertEquals(3, UTIL.getDFSCluster().getDataNodes().size());
store.registerListener(new ProcedureStore.ProcedureStoreListener() {
@Override
@ -180,39 +185,22 @@ public class TestWALProcedureStoreOnHDFS {
assertFalse(store.isRunning());
assertTrue(reCount.toString(), reCount.get() >= store.getNumThreads() &&
reCount.get() < thread.length);
} finally {
tearDown();
}
}
@Ignore ("Needs work") @Test(timeout=60000)
@Test(timeout=60000)
public void testWalRollOnLowReplication() throws Exception {
store.unregisterListener(stopProcedureListener);
store.registerListener(new ProcedureStore.ProcedureStoreListener() {
@Override
public void postSync() {}
@Override
public void abortProcess() {
LOG.info("Aborted!!!!");
}
});
initConfig(UTIL.getConfiguration());
UTIL.getConfiguration().setInt("dfs.namenode.replication.min", 1);
setup();
try {
int dnCount = 0;
store.insert(new TestProcedure(1, -1), null);
UTIL.getDFSCluster().restartDataNode(dnCount);
for (long i = 2; i < 100; ++i) {
try {
store.insert(new TestProcedure(i, -1), null);
} catch (RuntimeException re) {
String msg = re.getMessage();
// We could get a sync failed here...if the test cluster is crawling such that DN recovery
// is taking a long time. If we've done enough passes, just finish up the test as a 'pass'
if (msg != null && msg.toLowerCase().contains("sync aborted")) {
LOG.info("i=" + i, re);
if (i > 50) {
LOG.info("Returning early... i=" + i + "...We ran enough of this test", re);
return;
}
}
throw re;
}
waitForNumReplicas(3);
Thread.sleep(100);
if ((i % 30) == 0) {
@ -221,6 +209,9 @@ public class TestWALProcedureStoreOnHDFS {
}
}
assertTrue(store.isRunning());
} finally {
tearDown();
}
}
public void waitForNumReplicas(int numReplicas) throws Exception {