HBASE-4545 TestHLog doesn't clean up after itself
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1179511 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
3d156a5a24
commit
da04d73e70
|
@ -19,6 +19,7 @@ Release 0.93.0 - Unreleased
|
|||
TESTS
|
||||
HBASE-4534 A new unit test for lazy seek and StoreScanner in general
|
||||
(mikhail via jgray)
|
||||
HBASE-4545 TestHLog doesn't clean up after itself
|
||||
|
||||
|
||||
Release 0.92.0 - Unreleased
|
||||
|
|
|
@ -1366,6 +1366,13 @@ public class HBaseTestingUtility {
|
|||
return dfsCluster;
|
||||
}
|
||||
|
||||
public void setDFSCluster(MiniDFSCluster cluster) throws IOException {
|
||||
if (dfsCluster != null && dfsCluster.isClusterUp()) {
|
||||
throw new IOException("DFSCluster is already running! Shut it down first.");
|
||||
}
|
||||
this.dfsCluster = cluster;
|
||||
}
|
||||
|
||||
public FileSystem getTestFileSystem() throws IOException {
|
||||
return FileSystem.get(conf);
|
||||
}
|
||||
|
|
|
@ -54,6 +54,7 @@ import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
|
|||
import org.apache.hadoop.io.SequenceFile;
|
||||
import org.apache.log4j.Level;
|
||||
import org.junit.After;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
@ -110,16 +111,21 @@ public class TestHLog {
|
|||
"ipc.client.connection.maxidletime", 500);
|
||||
TEST_UTIL.getConfiguration().set(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY,
|
||||
SampleRegionWALObserver.class.getName());
|
||||
TEST_UTIL.startMiniCluster(3);
|
||||
TEST_UTIL.startMiniDFSCluster(3);
|
||||
|
||||
conf = TEST_UTIL.getConfiguration();
|
||||
cluster = TEST_UTIL.getDFSCluster();
|
||||
fs = cluster.getFileSystem();
|
||||
|
||||
hbaseDir = new Path(TEST_UTIL.getConfiguration().get("hbase.rootdir"));
|
||||
hbaseDir = TEST_UTIL.createRootDir();
|
||||
oldLogDir = new Path(hbaseDir, ".oldlogs");
|
||||
dir = new Path(hbaseDir, getName());
|
||||
}
|
||||
@AfterClass
|
||||
public static void tearDownAfterClass() throws Exception {
|
||||
TEST_UTIL.shutdownMiniDFSCluster();
|
||||
}
|
||||
|
||||
private static String getName() {
|
||||
// TODO Auto-generated method stub
|
||||
return "TestHLog";
|
||||
|
@ -205,7 +211,9 @@ public class TestHLog {
|
|||
Path subdir = new Path(dir, "hlogdir");
|
||||
HLog wal = new HLog(fs, subdir, oldLogDir, conf);
|
||||
final int total = 20;
|
||||
HLog.Reader reader = null;
|
||||
|
||||
try {
|
||||
HRegionInfo info = new HRegionInfo(bytes,
|
||||
null,null, false);
|
||||
HTableDescriptor htd = new HTableDescriptor();
|
||||
|
@ -221,7 +229,7 @@ public class TestHLog {
|
|||
wal.sync();
|
||||
// Open a Reader.
|
||||
Path walPath = wal.computeFilename();
|
||||
HLog.Reader reader = HLog.getReader(fs, walPath, conf);
|
||||
reader = HLog.getReader(fs, walPath, conf);
|
||||
int count = 0;
|
||||
HLog.Entry entry = new HLog.Entry();
|
||||
while ((entry = reader.next(entry)) != null) count++;
|
||||
|
@ -267,6 +275,10 @@ public class TestHLog {
|
|||
while((entry = reader.next(entry)) != null) count++;
|
||||
assertEquals(total * 3, count);
|
||||
reader.close();
|
||||
} finally {
|
||||
if (wal != null) wal.closeAndDelete();
|
||||
if (reader != null) reader.close();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -342,6 +354,7 @@ public class TestHLog {
|
|||
Path archdir = new Path(dir, "hlogdir_archive");
|
||||
HLog wal = new HLog(fs, subdir, archdir, conf);
|
||||
final int total = 20;
|
||||
|
||||
HTableDescriptor htd = new HTableDescriptor();
|
||||
htd.addFamily(new HColumnDescriptor(tableName));
|
||||
|
||||
|
@ -383,6 +396,7 @@ public class TestHLog {
|
|||
Thread.sleep(2000);
|
||||
|
||||
cluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null, null, null, null);
|
||||
TEST_UTIL.setDFSCluster(cluster);
|
||||
cluster.waitActive();
|
||||
fs = cluster.getFileSystem();
|
||||
LOG.info("START second instance.");
|
||||
|
@ -454,8 +468,9 @@ public class TestHLog {
|
|||
final byte [] tableName = Bytes.toBytes("tablename");
|
||||
final byte [] row = Bytes.toBytes("row");
|
||||
HLog.Reader reader = null;
|
||||
HLog log = new HLog(fs, dir, oldLogDir, conf);
|
||||
HLog log = null;
|
||||
try {
|
||||
log = new HLog(fs, dir, oldLogDir, conf);
|
||||
// Write columns named 1, 2, 3, etc. and then values of single byte
|
||||
// 1, 2, 3...
|
||||
long timestamp = System.currentTimeMillis();
|
||||
|
@ -595,6 +610,7 @@ public class TestHLog {
|
|||
final byte [] tableName = Bytes.toBytes("tablename");
|
||||
final byte [] row = Bytes.toBytes("row");
|
||||
HLog log = new HLog(fs, dir, oldLogDir, conf);
|
||||
try {
|
||||
DumbWALActionsListener visitor = new DumbWALActionsListener();
|
||||
log.registerWALActionsListener(visitor);
|
||||
long timestamp = System.currentTimeMillis();
|
||||
|
@ -618,6 +634,9 @@ public class TestHLog {
|
|||
timestamp, new byte[]{(byte) (11 + '0')}));
|
||||
log.append(hri, tableName, cols, System.currentTimeMillis(), htd);
|
||||
assertEquals(COL_COUNT, visitor.increments);
|
||||
} finally {
|
||||
if (log != null) log.closeAndDelete();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -627,6 +646,7 @@ public class TestHLog {
|
|||
final byte [] tableName2 = Bytes.toBytes("testLogCleaning2");
|
||||
|
||||
HLog log = new HLog(fs, dir, oldLogDir, conf);
|
||||
try {
|
||||
HRegionInfo hri = new HRegionInfo(tableName,
|
||||
HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
|
||||
HRegionInfo hri2 = new HRegionInfo(tableName2,
|
||||
|
@ -665,6 +685,9 @@ public class TestHLog {
|
|||
log.completeCacheFlush(hri2.getEncodedNameAsBytes(), tableName2, seqId, false);
|
||||
log.rollWriter();
|
||||
assertEquals(0, log.getNumLogFiles());
|
||||
} finally {
|
||||
if (log != null) log.closeAndDelete();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -674,9 +697,13 @@ public class TestHLog {
|
|||
public void testWALCoprocessorLoaded() throws Exception {
|
||||
// test to see whether the coprocessor is loaded or not.
|
||||
HLog log = new HLog(fs, dir, oldLogDir, conf);
|
||||
try {
|
||||
WALCoprocessorHost host = log.getCoprocessorHost();
|
||||
Coprocessor c = host.findCoprocessor(SampleRegionWALObserver.class.getName());
|
||||
assertNotNull(c);
|
||||
} finally {
|
||||
if (log != null) log.closeAndDelete();
|
||||
}
|
||||
}
|
||||
|
||||
private void addEdits(HLog log, HRegionInfo hri, byte [] tableName,
|
||||
|
|
Loading…
Reference in New Issue