HBASE-4313 Refactor TestHBaseFsck to make adding individual hbck tests easier
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1167003 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
34f318b625
commit
9675cda5d1
|
@ -570,6 +570,8 @@ Release 0.90.5 - Unreleased
|
|||
META/ROOT regions (todd)
|
||||
HBASE-4323 Add debug logging when AssignmentManager can't make a plan
|
||||
for a region (todd)
|
||||
HBASE-4313 Refactor TestHBaseFsck to make adding individual hbck tests
|
||||
easier (Jonathan Hsieh)
|
||||
|
||||
Release 0.90.4 - August 10, 2011
|
||||
|
||||
|
|
|
@ -20,72 +20,76 @@
|
|||
package org.apache.hadoop.hbase.util;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HServerAddress;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.UnknownRegionException;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.ResultScanner;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
* This tests HBaseFsck's ability to detect reasons for inconsistent tables.
|
||||
*/
|
||||
public class TestHBaseFsck {
|
||||
|
||||
final Log LOG = LogFactory.getLog(getClass());
|
||||
private final static HBaseTestingUtility TEST_UTIL =
|
||||
new HBaseTestingUtility();
|
||||
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
||||
private final static Configuration conf = TEST_UTIL.getConfiguration();
|
||||
private final static byte[] TABLE = Bytes.toBytes("table");
|
||||
private final static byte[] FAM = Bytes.toBytes("fam");
|
||||
|
||||
// for the instance, reset every test run
|
||||
private HTable tbl;
|
||||
|
||||
@BeforeClass
|
||||
public static void setUpBeforeClass() throws Exception {
|
||||
TEST_UTIL.startMiniCluster(3);
|
||||
}
|
||||
|
||||
private List doFsck(boolean fix) throws Exception {
|
||||
@AfterClass
|
||||
public static void tearDownAfterClass() throws Exception {
|
||||
TEST_UTIL.shutdownMiniCluster();
|
||||
}
|
||||
|
||||
private List<ERROR_CODE> doFsck(boolean fix) throws Exception {
|
||||
HBaseFsck fsck = new HBaseFsck(conf);
|
||||
fsck.displayFullReport(); // i.e. -details
|
||||
fsck.displayFullReport(); // i.e. -details
|
||||
fsck.setTimeLag(0);
|
||||
fsck.setFixErrors(fix);
|
||||
fsck.doWork();
|
||||
return fsck.getErrors().getErrorList();
|
||||
}
|
||||
|
||||
private void assertNoErrors(List errs) throws Exception {
|
||||
private void assertNoErrors(List<ERROR_CODE> errs) throws Exception {
|
||||
assertEquals(0, errs.size());
|
||||
}
|
||||
|
||||
private void assertErrors(List errs, ERROR_CODE[] expectedErrors) {
|
||||
private void assertErrors(List<ERROR_CODE> errs, ERROR_CODE[] expectedErrors) {
|
||||
assertEquals(Arrays.asList(expectedErrors), errs);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testHBaseFsck() throws Exception {
|
||||
assertNoErrors(doFsck(false));
|
||||
|
||||
TEST_UTIL.createTable(TABLE, FAM);
|
||||
String table = "tableBadMetaAssign";
|
||||
TEST_UTIL.createTable(Bytes.toBytes(table), FAM);
|
||||
|
||||
// We created 1 table, should be fine
|
||||
assertNoErrors(doFsck(false));
|
||||
|
@ -127,7 +131,7 @@ public class TestHBaseFsck {
|
|||
assertNoErrors(doFsck(false));
|
||||
|
||||
// comment needed - what is the purpose of this line
|
||||
new HTable(conf, TABLE).getScanner(new Scan());
|
||||
new HTable(conf, Bytes.toBytes(table)).getScanner(new Scan());;
|
||||
}
|
||||
|
||||
private HRegionInfo createRegion(Configuration conf, final HTableDescriptor
|
||||
|
@ -142,76 +146,122 @@ public class TestHBaseFsck {
|
|||
return hri;
|
||||
}
|
||||
|
||||
@Test
|
||||
/**
|
||||
* Tests for inconsistencies in the META data (duplicate start keys, or holes)
|
||||
* Setup a clean table before we start mucking with it.
|
||||
*
|
||||
* @throws IOException
|
||||
* @throws InterruptedException
|
||||
* @throws KeeperException
|
||||
*/
|
||||
public void testHBaseFsckMeta() throws Exception {
|
||||
void setupTable(String tablename) throws Exception {
|
||||
byte[][] startKeys = new byte[][] { Bytes.toBytes("A"), Bytes.toBytes("B"),
|
||||
Bytes.toBytes("C") };
|
||||
HTableDescriptor desc = new HTableDescriptor(tablename);
|
||||
HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toString(FAM));
|
||||
desc.addFamily(hcd); // If a table has no CF's it doesn't get checked
|
||||
TEST_UTIL.getHBaseAdmin().createTable(desc, startKeys);
|
||||
tbl = new HTable(TEST_UTIL.getConfiguration(), tablename);
|
||||
}
|
||||
|
||||
/**
|
||||
* delete table in preparation for next test
|
||||
*
|
||||
* @param tablename
|
||||
* @throws IOException
|
||||
*/
|
||||
void deleteTable(String tablename) throws IOException {
|
||||
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
|
||||
byte[] tbytes = Bytes.toBytes(tablename);
|
||||
admin.disableTable(tbytes);
|
||||
admin.deleteTable(tbytes);
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* This creates a clean table and confirms that the table is clean.
|
||||
*/
|
||||
@Test
|
||||
public void testHBaseFsckClean() throws Exception {
|
||||
assertNoErrors(doFsck(false));
|
||||
|
||||
HTable tbl = TEST_UTIL.createTable(Bytes.toBytes("table2"), FAM);
|
||||
|
||||
Map<HRegionInfo, HServerAddress> hris = tbl.getRegionsInfo();
|
||||
HRegionInfo hriOrig = hris.keySet().iterator().next();
|
||||
Map<HRegionInfo, ServerName> locations = tbl.getRegionLocations();
|
||||
ServerName rsAddressOrig = locations.get(hriOrig);
|
||||
|
||||
byte[][] startKeys = new byte[][]{
|
||||
HConstants.EMPTY_BYTE_ARRAY,
|
||||
Bytes.toBytes("A"),
|
||||
Bytes.toBytes("B"),
|
||||
Bytes.toBytes("C")
|
||||
};
|
||||
TEST_UTIL.createMultiRegions(conf, tbl, FAM, startKeys);
|
||||
Path rootDir = new Path(conf.get(HConstants.HBASE_DIR));
|
||||
FileSystem fs = rootDir.getFileSystem(conf);
|
||||
Path p = new Path(rootDir + "/table2", hriOrig.getEncodedName());
|
||||
fs.delete(p, true);
|
||||
|
||||
Thread.sleep(1 * 1000);
|
||||
ArrayList servers = new ArrayList();
|
||||
servers.add(rsAddressOrig);
|
||||
String table = "tableClean";
|
||||
try {
|
||||
HBaseFsckRepair.fixDupeAssignment(TEST_UTIL.getHBaseAdmin(), hriOrig, servers);
|
||||
} catch (IOException ex) {
|
||||
ex = RemoteExceptionHandler.checkIOException(ex);
|
||||
if (!(ex instanceof UnknownRegionException)) {
|
||||
fail("Unexpected exception: " + ex);
|
||||
}
|
||||
setupTable(table);
|
||||
|
||||
// We created 1 table, should be fine
|
||||
assertNoErrors(doFsck(false));
|
||||
} finally {
|
||||
deleteTable(table);
|
||||
}
|
||||
}
|
||||
|
||||
// We created 1 table, should be fine
|
||||
assertNoErrors(doFsck(false));
|
||||
/**
|
||||
* This creates a bad table with regions that have a duplicate start key
|
||||
*/
|
||||
@Test
|
||||
public void testDupeStartKey() throws Exception {
|
||||
String table = "tableDupeStartKey";
|
||||
try {
|
||||
setupTable(table);
|
||||
assertNoErrors(doFsck(false));
|
||||
|
||||
// Now let's mess it up, by adding a region with a duplicate startkey
|
||||
HRegionInfo hriDupe = createRegion(conf, tbl.getTableDescriptor(),
|
||||
Bytes.toBytes("A"), Bytes.toBytes("A2"));
|
||||
TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriDupe);
|
||||
TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager()
|
||||
.waitForAssignment(hriDupe);
|
||||
assertErrors(doFsck(false), new ERROR_CODE[]{ERROR_CODE.DUPE_STARTKEYS});
|
||||
// Now let's mess it up, by adding a region with a duplicate startkey
|
||||
HRegionInfo hriDupe = createRegion(conf, tbl.getTableDescriptor(),
|
||||
Bytes.toBytes("A"), Bytes.toBytes("A2"));
|
||||
TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriDupe);
|
||||
TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager()
|
||||
.waitForAssignment(hriDupe);
|
||||
|
||||
// Mess it up by creating an overlap in the metadata
|
||||
HRegionInfo hriOverlap = createRegion(conf, tbl.getTableDescriptor(),
|
||||
Bytes.toBytes("A2"), Bytes.toBytes("B2"));
|
||||
TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriOverlap);
|
||||
TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager()
|
||||
.waitForAssignment(hriOverlap);
|
||||
assertErrors(doFsck(false), new ERROR_CODE[]{
|
||||
ERROR_CODE.DUPE_STARTKEYS, ERROR_CODE.OVERLAP_IN_REGION_CHAIN,
|
||||
ERROR_CODE.OVERLAP_IN_REGION_CHAIN});
|
||||
assertErrors(doFsck(false),
|
||||
new ERROR_CODE[] { ERROR_CODE.DUPE_STARTKEYS });
|
||||
} finally {
|
||||
deleteTable(table);
|
||||
}
|
||||
}
|
||||
|
||||
// Mess it up by leaving a hole in the meta data
|
||||
HRegionInfo hriHole = createRegion(conf, tbl.getTableDescriptor(),
|
||||
Bytes.toBytes("D"), Bytes.toBytes("E"));
|
||||
TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriHole);
|
||||
TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager()
|
||||
.waitForAssignment(hriHole);
|
||||
// assertError(doFsck(false), ERROR_CODE.OVERLAP_IN_REGION_CHAIN);
|
||||
assertErrors(doFsck(false), new ERROR_CODE[]{ ERROR_CODE.DUPE_STARTKEYS,
|
||||
ERROR_CODE.OVERLAP_IN_REGION_CHAIN, ERROR_CODE.OVERLAP_IN_REGION_CHAIN,
|
||||
ERROR_CODE.HOLE_IN_REGION_CHAIN });
|
||||
/**
|
||||
* This creates a bad table where a start key contained in another region.
|
||||
*/
|
||||
@Test
|
||||
public void testCoveredStartKey() throws Exception {
|
||||
String table = "tableCoveredStartKey";
|
||||
try {
|
||||
setupTable(table);
|
||||
|
||||
// Mess it up by creating an overlap in the metadata
|
||||
HRegionInfo hriOverlap = createRegion(conf, tbl.getTableDescriptor(),
|
||||
Bytes.toBytes("A2"), Bytes.toBytes("B2"));
|
||||
TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriOverlap);
|
||||
TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager()
|
||||
.waitForAssignment(hriOverlap);
|
||||
assertErrors(doFsck(false), new ERROR_CODE[] {
|
||||
ERROR_CODE.OVERLAP_IN_REGION_CHAIN,
|
||||
ERROR_CODE.OVERLAP_IN_REGION_CHAIN });
|
||||
} finally {
|
||||
deleteTable(table);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This creates a bad table with a hole in meta.
|
||||
*/
|
||||
@Test
|
||||
public void testMetaHole() throws Exception {
|
||||
String table = "tableMetaHole";
|
||||
try {
|
||||
setupTable(table);
|
||||
|
||||
// Mess it up by leaving a hole in the meta data
|
||||
HRegionInfo hriHole = createRegion(conf, tbl.getTableDescriptor(),
|
||||
Bytes.toBytes("D"), Bytes.toBytes("E"));
|
||||
TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriHole);
|
||||
TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager()
|
||||
.waitForAssignment(hriHole);
|
||||
assertErrors(doFsck(false),
|
||||
new ERROR_CODE[] { ERROR_CODE.HOLE_IN_REGION_CHAIN });
|
||||
} finally {
|
||||
deleteTable(table);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue