HBASE-2924 TestLogRolling doesn't use the right HLog half the time

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@987310 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jean-Daniel Cryans 2010-08-19 20:57:05 +00:00
parent 5e9f6d1528
commit 831afacddf
4 changed files with 41 additions and 21 deletions

View File

@ -482,6 +482,7 @@ Release 0.21.0 - Unreleased
HBASE-2927 BaseScanner gets stale HRegionInfo in some race cases
HBASE-2928 Fault in logic in BinaryPrefixComparator leads to
ArrayIndexOutOfBoundsException (pranav via jgray)
HBASE-2924 TestLogRolling doesn't use the right HLog half the time
IMPROVEMENTS
HBASE-1760 Cleanup TODOs in HTable

View File

@ -675,6 +675,26 @@ public class HBaseTestingUtility {
return rows;
}
/**
* Tool to get the reference to the region server object that holds the
* region of the specified user table.
* It first searches for the meta rows that contain the region of the
* specified table, then gets the index of that RS, and finally retrieves
* the RS's reference.
* @param tableName user table to lookup in .META.
* @return region server that holds it, null if the row doesn't exist
* @throws IOException
*/
public HRegionServer getRSForFirstRegionInTable(byte[] tableName)
throws IOException {
List<byte[]> metaRows = getMetaTableRows(tableName);
if (metaRows == null || metaRows.size() == 0) {
return null;
}
int index = hbaseCluster.getServerWith(metaRows.get(0));
return hbaseCluster.getRegionServerThreads().get(index).getRegionServer();
}
/**
* Starts a <code>MiniMRCluster</code> with a default number of
* <code>TaskTracker</code>'s.

View File

@ -24,6 +24,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.After;
import org.junit.AfterClass;
@ -114,9 +115,7 @@ public class TestScannerTimeout {
*/
@Test
public void test2772() throws Exception {
int rs = TEST_UTIL.getHBaseCluster().getServerWith(
TEST_UTIL.getHBaseCluster().getRegions(
TABLE_NAME).get(0).getRegionName());
HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TABLE_NAME);
Scan scan = new Scan();
// Set a very high timeout, we want to test what happens when a RS
// fails but the region is recovered before the lease times out.
@ -128,7 +127,7 @@ public class TestScannerTimeout {
HTable higherScanTimeoutTable = new HTable(conf, TABLE_NAME);
ResultScanner r = higherScanTimeoutTable.getScanner(scan);
// This takes way less than SCANNER_TIMEOUT*100
TEST_UTIL.getHBaseCluster().getRegionServer(rs).abort("die!");
rs.abort("die!");
Result[] results = r.next(NB_ROWS);
assertEquals(NB_ROWS, results.length);
r.close();

View File

@ -154,14 +154,15 @@ public class TestLogRolling {
private void startAndWriteData() throws IOException {
// When the META table can be opened, the region servers are running
new HTable(TEST_UTIL.getConfiguration(), HConstants.META_TABLE_NAME);
this.server = cluster.getRegionServerThreads().get(0).getRegionServer();
this.log = server.getLog();
// Create the test table and open it
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
admin.createTable(desc);
HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName);
server = TEST_UTIL.getRSForFirstRegionInTable(Bytes.toBytes(tableName));
this.log = server.getLog();
for (int i = 1; i <= 256; i++) { // 256 writes should cause 8 log rolls
Put put = new Put(Bytes.toBytes("row" + String.format("%1$04d", i)));
put.add(HConstants.CATALOG_FAMILY, null, value);
@ -228,8 +229,6 @@ public class TestLogRolling {
@SuppressWarnings("null")
DatanodeInfo[] getPipeline(HLog log) throws IllegalArgumentException,
IllegalAccessException, InvocationTargetException {
// kill a datanode in the pipeline to force a log roll on the next sync()
OutputStream stm = log.getOutputStream();
Method getPipeline = null;
for (Method m : stm.getClass().getDeclaredMethods()) {
@ -263,7 +262,19 @@ public class TestLogRolling {
.getDefaultReplication() > 1);
// When the META table can be opened, the region servers are running
new HTable(TEST_UTIL.getConfiguration(), HConstants.META_TABLE_NAME);
this.server = cluster.getRegionServer(0);
// Create the test table and open it
String tableName = getName();
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
if (admin.tableExists(tableName)) {
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
admin.createTable(desc);
HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName);
server = TEST_UTIL.getRSForFirstRegionInTable(Bytes.toBytes(tableName));
this.log = server.getLog();
assertTrue("Need HDFS-826 for this test", log.canGetCurReplicas());
@ -277,18 +288,6 @@ public class TestLogRolling {
dfsCluster.waitActive();
assertTrue(dfsCluster.getDataNodes().size() >= fs.getDefaultReplication() + 1);
// Create the test table and open it
String tableName = getName();
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
if (admin.tableExists(tableName)) {
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
admin.createTable(desc);
HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName);
writeData(table, 2);
table.setAutoFlush(true);
@ -303,6 +302,7 @@ public class TestLogRolling {
DatanodeInfo[] pipeline = getPipeline(log);
assertTrue(pipeline.length == fs.getDefaultReplication());
// kill a datanode in the pipeline to force a log roll on the next sync()
assertTrue(dfsCluster.stopDataNode(pipeline[0].getName()) != null);
Thread.sleep(10000);
// this write should succeed, but trigger a log roll