HBASE-1971 Unit test the full WAL replay cycle

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@880956 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jean-Daniel Cryans 2009-11-16 21:08:04 +00:00
parent fe4b257051
commit 8dd763874c
6 changed files with 427 additions and 73 deletions

View File

@ -193,6 +193,7 @@ Release 0.21.0 - Unreleased
previous versions of the specified column
(Jeremiah Jacquet via Stack)
HBASE-1961 HBase EC2 scripts
HBASE-1971 Unit test the full WAL replay cycle
OPTIMIZATIONS
HBASE-410 [testing] Speed up the test suite

View File

@ -0,0 +1,33 @@
/**
* Copyright 2009 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.WatchedEvent;
/**
* Class used as an empty watche for the tests
*/
public class EmptyWatcher implements Watcher{
public static EmptyWatcher instance = new EmptyWatcher();
private EmptyWatcher() {}
public void process(WatchedEvent event) {}
}

View File

@ -44,8 +44,12 @@ import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWrapper;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.mapred.MiniMRCluster;
import org.apache.zookeeper.ZooKeeper;
/**
* Facility for testing HBase. Added as tool to abet junit4 testing. Replaces
@ -377,8 +381,7 @@ public class HBaseTestingUtility {
meta.delete(new Delete(row));
}
// flush cache of regions
HBaseAdmin admin = new HBaseAdmin(getConfiguration());
HConnection conn = admin.getConnection();
HConnection conn = table.getConnection();
conn.clearRegionCache();
}
@ -470,4 +473,48 @@ public class HBaseTestingUtility {
((Jdk14Logger) l).getLogger().setLevel(java.util.logging.Level.ALL);
}
}
}
/**
* Expire the Master's session
* @throws Exception
*/
public void expireMasterSession() throws Exception {
HMaster master = hbaseCluster.getMaster();
expireSession(master.getZooKeeperWrapper());
}
/**
* Expire a region server's session
* @param index which RS
* @throws Exception
*/
public void expireRegionServerSession(int index) throws Exception {
HRegionServer rs = hbaseCluster.getRegionServer(index);
expireSession(rs.getZooKeeperWrapper());
}
public void expireSession(ZooKeeperWrapper nodeZK) throws Exception{
ZooKeeperWrapper zkw = new ZooKeeperWrapper(conf, EmptyWatcher.instance);
String quorumServers = zkw.getQuorumServers();
int sessionTimeout = 5 * 1000; // 5 seconds
byte[] password = nodeZK.getSessionPassword();
long sessionID = nodeZK.getSessionID();
ZooKeeper zk = new ZooKeeper(quorumServers,
sessionTimeout, EmptyWatcher.instance, sessionID, password);
zk.close();
Thread.sleep(sessionTimeout * 5L);
new HTable(conf, HConstants.META_TABLE_NAME);
}
/**
* Get the HBase cluster
* @return hbase cluster
*/
public MiniHBaseCluster getHbaseCluster() {
return hbaseCluster;
}
}

View File

@ -0,0 +1,127 @@
/**
* Copyright 2009 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.*;
import static org.junit.Assert.assertEquals;
public class TestFullLogReconstruction {
private final static HBaseTestingUtility
TEST_UTIL = new HBaseTestingUtility();
private final static byte[] TABLE_NAME = Bytes.toBytes("tabletest");
private final static byte[] FAMILY = Bytes.toBytes("family");
private HBaseConfiguration conf;
/**
* @throws java.lang.Exception
*/
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.getConfiguration().
setInt("hbase.regionserver.flushlogentries", 1);
TEST_UTIL.startMiniCluster(2);
}
/**
* @throws java.lang.Exception
*/
@AfterClass
public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}
/**
* @throws java.lang.Exception
*/
@Before
public void setUp() throws Exception {
conf = TEST_UTIL.getConfiguration();
}
/**
* @throws java.lang.Exception
*/
@After
public void tearDown() throws Exception {
}
/**
* Test the whole reconstruction loop. Build a table with regions aaa to zzz
* and load every one of them multiple times with the same date and do a flush
* at some point. Kill one of the region servers and scan the table. We should
* see all the rows.
* @throws Exception
*/
@Test
public void testReconstruction() throws Exception {
TEST_UTIL.createTable(TABLE_NAME, FAMILY);
HTable table = new HTable(TABLE_NAME);
TEST_UTIL.createMultiRegions(table, Bytes.toBytes("family"));
// Load up the table with simple rows and count them
int initialCount = TEST_UTIL.loadTable(table, FAMILY);
Scan scan = new Scan();
ResultScanner results = table.getScanner(scan);
int count = 0;
for (Result res : results) {
count++;
}
results.close();
assertEquals(initialCount, count);
for(int i = 0; i < 4; i++) {
TEST_UTIL.loadTable(table, FAMILY);
if(i == 2) {
TEST_UTIL.flush();
}
}
TEST_UTIL.expireRegionServerSession(0);
scan = new Scan();
results = table.getScanner(scan);
int newCount = 0;
for (Result res : results) {
newCount++;
}
assertEquals(count, newCount);
}
}

View File

@ -19,63 +19,63 @@
*/
package org.apache.hadoop.hbase;
import java.io.IOException;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWrapper;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.ZooKeeper;
import org.junit.*;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.fail;
public class TestZooKeeper extends HBaseClusterTestCase {
private static class EmptyWatcher implements Watcher {
public static EmptyWatcher instance = new EmptyWatcher();
private EmptyWatcher() {}
public void process(WatchedEvent event) {}
}
import java.io.IOException;
@Override
protected void setUp() throws Exception {
setOpenMetaTable(false);
super.setUp();
}
public class TestZooKeeper {
/**
* @throws IOException
private final static HBaseTestingUtility
TEST_UTIL = new HBaseTestingUtility();
private HBaseConfiguration conf;
private MiniHBaseCluster cluster;
/**
* @throws java.lang.Exception
*/
public void testWritesRootRegionLocation() throws IOException {
ZooKeeperWrapper zooKeeper = new ZooKeeperWrapper(conf, EmptyWatcher.instance);
HServerAddress zooKeeperRootAddress = zooKeeper.readRootRegionLocation();
assertNull(zooKeeperRootAddress);
HMaster master = cluster.getMaster();
HServerAddress masterRootAddress = master.getRegionManager().getRootRegionLocation();
assertNull(masterRootAddress);
new HTable(conf, HConstants.META_TABLE_NAME);
zooKeeperRootAddress = zooKeeper.readRootRegionLocation();
assertNotNull(zooKeeperRootAddress);
masterRootAddress = master.getRegionManager().getRootRegionLocation();
assertEquals(masterRootAddress, zooKeeperRootAddress);
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.startMiniCluster(1);
}
/**
* @throws java.lang.Exception
*/
@AfterClass
public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}
/**
* @throws java.lang.Exception
*/
@Before
public void setUp() throws Exception {
conf = TEST_UTIL.getConfiguration();
cluster = TEST_UTIL.getHbaseCluster();
}
/**
* @throws java.lang.Exception
*/
@After
public void tearDown() throws Exception {}
/**
* See HBASE-1232 and http://wiki.apache.org/hadoop/ZooKeeper/FAQ#4.
* @throws IOException
* @throws InterruptedException
*/
public void testClientSessionExpired() throws IOException, InterruptedException {
@Test
public void testClientSessionExpired()
throws IOException, InterruptedException {
new HTable(conf, HConstants.META_TABLE_NAME);
ZooKeeperWrapper zkw = new ZooKeeperWrapper(conf, EmptyWatcher.instance);
@ -86,7 +86,8 @@ public class TestZooKeeper extends HBaseClusterTestCase {
long sessionID = connectionZK.getSessionID();
byte[] password = connectionZK.getSessionPassword();
ZooKeeper zk = new ZooKeeper(quorumServers, sessionTimeout, EmptyWatcher.instance, sessionID, password);
ZooKeeper zk = new ZooKeeper(quorumServers, sessionTimeout,
EmptyWatcher.instance, sessionID, password);
zk.close();
Thread.sleep(sessionTimeout * 3L);
@ -94,56 +95,50 @@ public class TestZooKeeper extends HBaseClusterTestCase {
System.err.println("ZooKeeper should have timed out");
connection.relocateRegion(HConstants.ROOT_TABLE_NAME, HConstants.EMPTY_BYTE_ARRAY);
}
@Test
public void testRegionServerSessionExpired() throws Exception{
this.conf.setBoolean("hbase.regionserver.restart.on.zk.expire", true);
new HTable(conf, HConstants.META_TABLE_NAME);
HRegionServer rs = cluster.getRegionServer(0);
sessionExpirationHelper(rs.getZooKeeperWrapper());
TEST_UTIL.expireRegionServerSession(0);
testSanity();
}
@Test
public void testMasterSessionExpired() throws Exception {
new HTable(conf, HConstants.META_TABLE_NAME);
HMaster master = cluster.getMaster();
sessionExpirationHelper(master.getZooKeeperWrapper());
TEST_UTIL.expireMasterSession();
testSanity();
}
public void sessionExpirationHelper(ZooKeeperWrapper nodeZK) throws Exception{
ZooKeeperWrapper zkw = new ZooKeeperWrapper(conf, EmptyWatcher.instance);
String quorumServers = zkw.getQuorumServers();
int sessionTimeout = 5 * 1000; // 5 seconds
byte[] password = nodeZK.getSessionPassword();
long sessionID = nodeZK.getSessionID();
ZooKeeper zk = new ZooKeeper(quorumServers,
sessionTimeout, EmptyWatcher.instance, sessionID, password);
zk.close();
Thread.sleep(sessionTimeout * 3L);
new HTable(conf, HConstants.META_TABLE_NAME);
/**
* Make sure we can use the cluster
* @throws Exception
*/
public void testSanity() throws Exception{
HBaseAdmin admin = new HBaseAdmin(conf);
HTableDescriptor desc = new HTableDescriptor("test");
String tableName = "test"+System.currentTimeMillis();
HTableDescriptor desc =
new HTableDescriptor(tableName);
HColumnDescriptor family = new HColumnDescriptor("fam");
desc.addFamily(family);
admin.createTable(desc);
HTable table = new HTable("test");
HTable table = new HTable(tableName);
Put put = new Put(Bytes.toBytes("testrow"));
put.add(Bytes.toBytes("fam"), Bytes.toBytes("col"), Bytes.toBytes("testdata"));
put.add(Bytes.toBytes("fam"),
Bytes.toBytes("col"), Bytes.toBytes("testdata"));
table.put(put);
}
@Test
public void testMultipleZK() {
try {
HTable localMeta = new HTable(conf, HConstants.META_TABLE_NAME);
HBaseConfiguration otherConf = new HBaseConfiguration(conf);
otherConf.set(HConstants.ZOOKEEPER_QUORUM, "127.0.0.1");
HTable ipMeta = new HTable(conf, HConstants.META_TABLE_NAME);
// dummy, just to open the connection
localMeta.exists(new Get(HConstants.LAST_ROW));
ipMeta.exists(new Get(HConstants.LAST_ROW));

View File

@ -0,0 +1,151 @@
/**
* Copyright 2009 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.*;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.util.ArrayList;
import java.util.List;
import java.util.NavigableSet;
import java.util.concurrent.ConcurrentSkipListSet;
public class TestStoreReconstruction {
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private Path dir;
private MiniDFSCluster cluster;
private final String TABLE = "testtable";
private final int TOTAL_EDITS = 10000;
private HBaseConfiguration conf;
/**
* @throws java.lang.Exception
*/
@BeforeClass
public static void setUpBeforeClass() throws Exception { }
/**
* @throws java.lang.Exception
*/
@AfterClass
public static void tearDownAfterClass() throws Exception { }
/**
* @throws java.lang.Exception
*/
@Before
public void setUp() throws Exception {
conf = TEST_UTIL.getConfiguration();
cluster = new MiniDFSCluster(conf, 3, true, (String[])null);
// Set the hbase.rootdir to be the home directory in mini dfs.
TEST_UTIL.getConfiguration().set(HConstants.HBASE_DIR,
this.cluster.getFileSystem().getHomeDirectory().toString());
this.dir = new Path("/hbase", TABLE);
conf.setInt("hbase.regionserver.flushlogentries", 1);
if (cluster.getFileSystem().exists(dir)) {
cluster.getFileSystem().delete(dir, true);
}
}
/**
* @throws java.lang.Exception
*/
@After
public void tearDown() throws Exception {}
/**
* Create a Store with the result of a HLog split and test we only
* see the good edits
* @throws Exception
*/
@Test
public void runReconstructionLog() throws Exception {
byte[] family = Bytes.toBytes("column");
HColumnDescriptor hcd = new HColumnDescriptor(family);
HTableDescriptor htd = new HTableDescriptor(TABLE);
htd.addFamily(hcd);
HRegionInfo info = new HRegionInfo(htd, null, null, false);
HLog log = new HLog(cluster.getFileSystem(), this.dir,conf, null);
HRegion region = new HRegion(dir, log,
cluster.getFileSystem(),conf, info, null);
List<KeyValue> result = new ArrayList<KeyValue>();
// Empty set to get all columns
NavigableSet<byte[]> qualifiers =
new ConcurrentSkipListSet<byte[]>(Bytes.BYTES_COMPARATOR);
final byte[] tableName = Bytes.toBytes(TABLE);
final byte[] rowName = tableName;
final byte[] regionName = info.getRegionName();
// Add 10 000 edits to HLog on the good family
List<KeyValue> edit = new ArrayList<KeyValue>();
for (int j = 0; j < TOTAL_EDITS; j++) {
byte[] qualifier = Bytes.toBytes(Integer.toString(j));
byte[] column = Bytes.toBytes("column:" + Integer.toString(j));
edit.add(new KeyValue(rowName, family, qualifier,
System.currentTimeMillis(), column));
log.append(regionName, tableName, edit,
System.currentTimeMillis());
edit.clear();
}
// Add a cache flush, shouldn't have any effect
long logSeqId = log.startCacheFlush();
log.completeCacheFlush(regionName, tableName, logSeqId);
// Add an edit to another family, should be skipped.
edit.add(new KeyValue(rowName, Bytes.toBytes("another family"), rowName,
System.currentTimeMillis(), rowName));
log.append(regionName, tableName, edit,
System.currentTimeMillis());
log.sync();
List<Path> splits =
HLog.splitLog(new Path(conf.get(HConstants.HBASE_DIR)),
this.dir, cluster.getFileSystem(),conf);
// Split should generate only 1 file since there's only 1 region
assertTrue(splits.size() == 1);
// Make sure the file exists
assertTrue(cluster.getFileSystem().exists(splits.get(0)));
// This will run the log reconstruction
Store store = new Store(dir, region, hcd, cluster.getFileSystem(),
splits.get(0), conf, null);
Get get = new Get(rowName);
store.get(get, qualifiers, result);
// Make sure we only see the good edits
assertEquals(result.size(), TOTAL_EDITS);
}
}