HBASE-2868 Do some small cleanups in org.apache.hadoop.hbase.regionserver.wal

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@980250 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2010-07-28 22:21:13 +00:00
parent 8f5e996795
commit 1280130acc
3 changed files with 57 additions and 32 deletions

View File

@ -811,6 +811,8 @@ Release 0.21.0 - Unreleased
HBASE-2879 Offer ZK CLI outside of HBase Shell
(Nicolas Spiegelberg via Stack)
HBASE-2886 Add search box to site (Alex Baranau via Stack)
HBASE-2868 Do some small cleanups in org.apache.hadoop.hbase.regionserver.wal
(Alex Newman via Stack)
NEW FEATURES
HBASE-1961 HBase EC2 scripts

View File

@ -58,9 +58,9 @@ public class TestLogActionsListener {
conf = TEST_UTIL.getConfiguration();
conf.setInt("hbase.regionserver.maxlogs", 5);
fs = FileSystem.get(conf);
oldLogDir = new Path(TEST_UTIL.getTestDir(),
oldLogDir = new Path(HBaseTestingUtility.getTestDir(),
HConstants.HREGION_OLDLOGDIR_NAME);
logDir = new Path(TEST_UTIL.getTestDir(),
logDir = new Path(HBaseTestingUtility.getTestDir(),
HConstants.HREGION_LOGDIR_NAME);
}

View File

@ -27,10 +27,14 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.hbase.HBaseClusterTestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.client.HBaseAdmin;
@ -39,22 +43,31 @@ import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.log4j.Level;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
/**
* Test log deletion as logs are rolled.
*/
public class TestLogRolling extends HBaseClusterTestCase {
public class TestLogRolling {
private static final Log LOG = LogFactory.getLog(TestLogRolling.class);
private HRegionServer server;
private HLog log;
private String tableName;
private byte[] value;
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private static Configuration conf;
private static FileSystem fs;
private static MiniHBaseCluster cluster;
// verbose logging on classes that are touched in these tests
{
@ -95,40 +108,43 @@ public class TestLogRolling extends HBaseClusterTestCase {
// Need to override this setup so we can edit the config before it gets sent
// to the HDFS & HBase cluster startup.
@Override
protected void setUp() throws Exception {
@BeforeClass
public static void setUpBeforeClass() throws Exception {
/**** configuration for testLogRolling ****/
// Force a region split after every 768KB
conf.setLong("hbase.hregion.max.filesize", 768L * 1024L);
TEST_UTIL.getConfiguration().setLong("hbase.hregion.max.filesize", 768L * 1024L);
// We roll the log after every 32 writes
conf.setInt("hbase.regionserver.maxlogentries", 32);
TEST_UTIL.getConfiguration().setInt("hbase.regionserver.maxlogentries", 32);
// For less frequently updated regions flush after every 2 flushes
conf.setInt("hbase.hregion.memstore.optionalflushcount", 2);
TEST_UTIL.getConfiguration().setInt("hbase.hregion.memstore.optionalflushcount", 2);
// We flush the cache after every 8192 bytes
conf.setInt("hbase.hregion.memstore.flush.size", 8192);
TEST_UTIL.getConfiguration().setInt("hbase.hregion.memstore.flush.size", 8192);
// Increase the amount of time between client retries
conf.setLong("hbase.client.pause", 15 * 1000);
TEST_UTIL.getConfiguration().setLong("hbase.client.pause", 15 * 1000);
// Reduce thread wake frequency so that other threads can get
// a chance to run.
conf.setInt(HConstants.THREAD_WAKE_FREQUENCY, 2 * 1000);
TEST_UTIL.getConfiguration().setInt(HConstants.THREAD_WAKE_FREQUENCY, 2 * 1000);
/**** configuration for testLogRollOnDatanodeDeath ****/
// make sure log.hflush() calls syncFs() to open a pipeline
conf.setBoolean("dfs.support.append", true);
// lower the namenode & datanode heartbeat so the namenode
TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
// lower the namenode & datanode heartbeat so the namenode
// quickly detects datanode failures
conf.setInt("heartbeat.recheck.interval", 5000);
conf.setInt("dfs.heartbeat.interval", 1);
// the namenode might still try to choose the recently-dead datanode
TEST_UTIL.getConfiguration().setInt("heartbeat.recheck.interval", 5000);
TEST_UTIL.getConfiguration().setInt("dfs.heartbeat.interval", 1);
// the namenode might still try to choose the recently-dead datanode
// for a pipeline, so try to a new pipeline multiple times
conf.setInt("dfs.client.block.write.retries", 30);
super.setUp();
TEST_UTIL.getConfiguration().setInt("dfs.client.block.write.retries", 30);
TEST_UTIL.startMiniCluster(3);
conf = TEST_UTIL.getConfiguration();
cluster = TEST_UTIL.getHBaseCluster();
fs = TEST_UTIL.getDFSCluster().getFileSystem();
}
private void startAndWriteData() throws Exception {
@ -163,6 +179,7 @@ public class TestLogRolling extends HBaseClusterTestCase {
*
* @throws Exception
*/
@Test
public void testLogRolling() throws Exception {
this.tableName = getName();
try {
@ -190,6 +207,11 @@ public class TestLogRolling extends HBaseClusterTestCase {
}
}
private static String getName() {
// TODO Auto-generated method stub
return "TestLogRolling";
}
void writeData(HTable table, int rownum) throws Exception {
Put put = new Put(Bytes.toBytes("row" + String.format("%1$04d", rownum)));
put.add(HConstants.CATALOG_FAMILY, null, value);
@ -202,30 +224,31 @@ public class TestLogRolling extends HBaseClusterTestCase {
// continue
}
}
/**
* Tests that logs are rolled upon detecting datanode death
* Requires an HDFS jar with HDFS-826 & syncFs() support (HDFS-200)
*
*
* @throws Exception
*/
@Test
public void testLogRollOnDatanodeDeath() throws Exception {
assertTrue("This test requires HLog file replication.",
assertTrue("This test requires HLog file replication.",
fs.getDefaultReplication() > 1);
// When the META table can be opened, the region servers are running
new HTable(conf, HConstants.META_TABLE_NAME);
this.server = cluster.getRegionServer(0);
this.log = server.getLog();
assertTrue("Need HDFS-826 for this test", log.canGetCurReplicas());
// don't run this test without append support (HDFS-200 & HDFS-142)
assertTrue("Need append support for this test", FSUtils.isAppendSupported(conf));
// add up the datanode count, to ensure proper replication when we kill 1
dfsCluster.startDataNodes(conf, 1, true, null, null);
dfsCluster.waitActive();
assertTrue(dfsCluster.getDataNodes().size() >=
TEST_UTIL.getDFSCluster().startDataNodes(conf, 1, true, null, null);
TEST_UTIL.getDFSCluster().waitActive();
assertTrue(TEST_UTIL.getDFSCluster().getDataNodes().size() >=
fs.getDefaultReplication() + 1);
// Create the test table and open it
@ -239,12 +262,12 @@ public class TestLogRolling extends HBaseClusterTestCase {
long curTime = System.currentTimeMillis();
long oldFilenum = log.getFilenum();
assertTrue("Log should have a timestamp older than now",
assertTrue("Log should have a timestamp older than now",
curTime > oldFilenum && oldFilenum != -1);
// normal write
writeData(table, 1);
assertTrue("The log shouldn't have rolled yet",
assertTrue("The log shouldn't have rolled yet",
oldFilenum == log.getFilenum());
// kill a datanode in the pipeline to force a log roll on the next sync()
@ -257,12 +280,12 @@ public class TestLogRolling extends HBaseClusterTestCase {
break;
}
}
assertTrue("Need DFSOutputStream.getPipeline() for this test",
assertTrue("Need DFSOutputStream.getPipeline() for this test",
getPipeline != null);
Object repl = getPipeline.invoke(stm, new Object []{} /*NO_ARGS*/);
DatanodeInfo[] pipeline = (DatanodeInfo[]) repl;
assertTrue(pipeline.length == fs.getDefaultReplication());
DataNodeProperties dnprop = dfsCluster.stopDataNode(pipeline[0].getName());
DataNodeProperties dnprop = TEST_UTIL.getDFSCluster().stopDataNode(pipeline[0].getName());
assertTrue(dnprop != null);
// this write should succeed, but trigger a log roll