diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 1b362af0466..e507df4e172 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.util; +import java.io.Closeable; import java.io.FileNotFoundException; import java.io.IOException; import java.io.PrintWriter; @@ -184,7 +185,7 @@ import com.google.protobuf.ServiceException; */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @InterfaceStability.Evolving -public class HBaseFsck extends Configured { +public class HBaseFsck extends Configured implements Closeable { public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000; private static final int MAX_NUM_THREADS = 50; // #threads to contact regions @@ -395,7 +396,8 @@ public class HBaseFsck extends Configured { Runtime.getRuntime().addShutdownHook(new Thread() { @Override public void run() { - unlockHbck(); + IOUtils.closeStream(HBaseFsck.this); + unlockHbck(); } }); LOG.debug("Launching hbck"); @@ -611,6 +613,11 @@ public class HBaseFsck extends Configured { return result; } + @Override + public void close() throws IOException { + IOUtils.cleanup(null, admin, meta, connection); + } + private static class RegionBoundariesInformation { public byte [] regionName; public byte [] metaFirstKey; @@ -4082,6 +4089,7 @@ public class HBaseFsck extends Configured { public int run(String[] args) throws Exception { HBaseFsck hbck = new HBaseFsck(getConf()); hbck.exec(hbck.executor, args); + hbck.close(); return hbck.getRetCode(); } }; @@ -4300,7 +4308,7 @@ public class HBaseFsck extends Configured { setRetCode(code); } } finally { - IOUtils.cleanup(null, connection, meta, admin); + IOUtils.cleanup(null, this); } return this; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java index b5740b26b2c..666040869fe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java @@ -210,7 +210,7 @@ public class HBaseFsckRepair { HRegion region = HRegion.createHRegion(hri, root, conf, htd, null); // Close the new region to flush to disk. Close log file too. - region.close(); + HRegion.closeHRegion(region); return region; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java index e59e07b759c..183201e6926 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java @@ -37,7 +37,6 @@ import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Map; -import java.util.Map.Entry; import java.util.NavigableMap; import java.util.Set; import java.util.concurrent.Callable; @@ -78,7 +77,6 @@ import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HConnection; -import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.MetaScanner; import org.apache.hadoop.hbase.client.Put; @@ -132,7 +130,10 @@ public class TestHBaseFsck { private final static byte[] FAM = Bytes.toBytes(FAM_STR); private final static int REGION_ONLINE_TIMEOUT = 800; private static RegionStates regionStates; - private static ExecutorService executorService; + private static ExecutorService tableExecutorService; + private static ScheduledThreadPoolExecutor hbfsckExecutorService; + private static ClusterConnection connection; + private static Admin admin; // for the instance, reset every test run private HTable tbl; @@ -145,21 +146,36 @@ public class TestHBaseFsck { @BeforeClass public static void setUpBeforeClass() throws Exception { - TEST_UTIL.getConfiguration().setInt("hbase.regionserver.handler.count", 2); - TEST_UTIL.getConfiguration().setInt("hbase.regionserver.metahandler.count", 2); + conf.setInt("hbase.regionserver.handler.count", 2); + conf.setInt("hbase.regionserver.metahandler.count", 2); + + final int poolSize = 7; + + conf.setInt("hbase.htable.threads.max", poolSize); + conf.setInt("hbase.hconnection.threads.max", 2 * poolSize); + conf.setInt("hbase.hconnection.threads.core", poolSize); TEST_UTIL.startMiniCluster(3); - executorService = new ThreadPoolExecutor(1, Integer.MAX_VALUE, 60, TimeUnit.SECONDS, + tableExecutorService = new ThreadPoolExecutor(1, poolSize, 60, TimeUnit.SECONDS, new SynchronousQueue(), Threads.newDaemonThreadFactory("testhbck")); + hbfsckExecutorService = new ScheduledThreadPoolExecutor(poolSize); + AssignmentManager assignmentManager = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager(); regionStates = assignmentManager.getRegionStates(); - TEST_UTIL.getHBaseAdmin().setBalancerRunning(false, true); + + connection = (ClusterConnection) TEST_UTIL.getConnection(); + + admin = connection.getAdmin(); + admin.setBalancerRunning(false, true); } @AfterClass public static void tearDownAfterClass() throws Exception { + tableExecutorService.shutdown(); + hbfsckExecutorService.shutdown(); + admin.close(); TEST_UTIL.shutdownMiniCluster(); } @@ -174,8 +190,7 @@ public class TestHBaseFsck { // Now let's mess it up and change the assignment in hbase:meta to // point to a different region server - Table meta = new HTable(conf, TableName.META_TABLE_NAME, - executorService); + Table meta = connection.getTable(TableName.META_TABLE_NAME, tableExecutorService); Scan scan = new Scan(); scan.setStartRow(Bytes.toBytes(table+",,")); ResultScanner scanner = meta.getScanner(scan); @@ -203,7 +218,7 @@ public class TestHBaseFsck { put.add(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER, Bytes.toBytes(sn.getStartcode())); meta.put(put); - hri = HRegionInfo.getHRegionInfo(res); + hri = MetaTableAccessor.getHRegionInfo(res); break; } } @@ -219,7 +234,7 @@ public class TestHBaseFsck { assertNoErrors(doFsck(conf, false)); // comment needed - what is the purpose of this line - Table t = new HTable(conf, table, executorService); + Table t = connection.getTable(table, tableExecutorService); ResultScanner s = t.getScanner(new Scan()); s.close(); t.close(); @@ -231,11 +246,7 @@ public class TestHBaseFsck { @Test(timeout=180000) public void testFixAssignmentsWhenMETAinTransition() throws Exception { MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); - try (Connection connection = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())) { - try (Admin admin = connection.getAdmin()) { - admin.closeRegion(cluster.getServerHoldingMeta(), HRegionInfo.FIRST_META_REGIONINFO); - } - } + admin.closeRegion(cluster.getServerHoldingMeta(), HRegionInfo.FIRST_META_REGIONINFO); regionStates.regionOffline(HRegionInfo.FIRST_META_REGIONINFO); new MetaTableLocator().deleteMetaLocation(cluster.getMaster().getZooKeeper()); assertFalse(regionStates.isRegionOnline(HRegionInfo.FIRST_META_REGIONINFO)); @@ -248,10 +259,10 @@ public class TestHBaseFsck { /** * Create a new region in META. */ - private HRegionInfo createRegion(Configuration conf, final HTableDescriptor + private HRegionInfo createRegion(final HTableDescriptor htd, byte[] startKey, byte[] endKey) throws IOException { - Table meta = new HTable(conf, TableName.META_TABLE_NAME, executorService); + Table meta = connection.getTable(TableName.META_TABLE_NAME, tableExecutorService); HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKey, endKey); MetaTableAccessor.addRegionToMeta(meta, hri); meta.close(); @@ -272,12 +283,12 @@ public class TestHBaseFsck { * This method is used to undeploy a region -- close it and attempt to * remove its state from the Master. */ - private void undeployRegion(HConnection conn, ServerName sn, + private void undeployRegion(Connection conn, ServerName sn, HRegionInfo hri) throws IOException, InterruptedException { try { - HBaseFsckRepair.closeRegionSilentlyAndWait(conn, sn, hri); + HBaseFsckRepair.closeRegionSilentlyAndWait((HConnection) conn, sn, hri); if (!hri.isMetaTable()) { - conn.getAdmin().offline(hri.getRegionName()); + admin.offline(hri.getRegionName()); } } catch (IOException ioe) { LOG.warn("Got exception when attempting to offline region " @@ -311,11 +322,10 @@ public class TestHBaseFsck { LOG.info("** Before delete:"); dumpMeta(htd.getTableName()); - Map hris = tbl.getRegionLocations(); - ClusterConnection conn = (ClusterConnection) ConnectionFactory.createConnection(conf); - for (Entry e: hris.entrySet()) { - HRegionInfo hri = e.getKey(); - ServerName hsa = e.getValue(); + List locations = tbl.getAllRegionLocations(); + for (HRegionLocation location : locations) { + HRegionInfo hri = location.getRegionInfo(); + ServerName hsa = location.getServerName(); if (Bytes.compareTo(hri.getStartKey(), startKey) == 0 && Bytes.compareTo(hri.getEndKey(), endKey) == 0 && hri.getReplicaId() == replicaId) { @@ -325,7 +335,7 @@ public class TestHBaseFsck { if (unassign) { LOG.info("Undeploying region " + hri + " from server " + hsa); - undeployRegion(conn, hsa, hri); + undeployRegion(connection, hsa, hri); } if (regionInfoOnly) { @@ -351,7 +361,7 @@ public class TestHBaseFsck { } if (metaRow) { - try (Table meta = conn.getTable(TableName.META_TABLE_NAME, executorService)) { + try (Table meta = connection.getTable(TableName.META_TABLE_NAME, tableExecutorService)) { Delete delete = new Delete(deleteRow); meta.delete(delete); } @@ -363,34 +373,37 @@ public class TestHBaseFsck { TEST_UTIL.getMetaTableRows(htd.getTableName()); LOG.info("*** After delete:"); dumpMeta(htd.getTableName()); - conn.close(); } /** * Setup a clean table before we start mucking with it. * + * It will set tbl which needs to be closed after test + * * @throws IOException * @throws InterruptedException * @throws KeeperException */ - Table setupTable(TableName tablename) throws Exception { - return setupTableWithRegionReplica(tablename, 1); + void setupTable(TableName tablename) throws Exception { + setupTableWithRegionReplica(tablename, 1); } /** * Setup a clean table with a certain region_replica count + * + * It will set tbl which needs to be closed after test + * * @param tableName * @param replicaCount - * @return * @throws Exception */ - Table setupTableWithRegionReplica(TableName tablename, int replicaCount) throws Exception { + void setupTableWithRegionReplica(TableName tablename, int replicaCount) throws Exception { HTableDescriptor desc = new HTableDescriptor(tablename); desc.setRegionReplication(replicaCount); HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toString(FAM)); desc.addFamily(hcd); // If a table has no CF's it doesn't get checked - TEST_UTIL.getHBaseAdmin().createTable(desc, SPLITS); - tbl = (HTable)TEST_UTIL.getConnection().getTable(tablename, executorService); + admin.createTable(desc, SPLITS); + tbl = (HTable) connection.getTable(tablename, tableExecutorService); List puts = new ArrayList(); for (byte[] row : ROWKEYS) { Put p = new Put(row); @@ -399,7 +412,6 @@ public class TestHBaseFsck { } tbl.put(puts); tbl.flushCommits(); - return tbl; } /** @@ -421,31 +433,15 @@ public class TestHBaseFsck { * @param tablename * @throws IOException */ - void deleteTable(TableName tablename) throws IOException { - HConnection conn = (HConnection) ConnectionFactory.createConnection(conf); - Admin admin = conn.getAdmin(); - conn.clearRegionCache(); - if (admin.isTableEnabled(tablename)) { - admin.disableTableAsync(tablename); + void cleanupTable(TableName tablename) throws IOException { + if (tbl != null) { + tbl.close(); + tbl = null; } - long totalWait = 0; - long maxWait = 30*1000; - long sleepTime = 250; - while (!admin.isTableDisabled(tablename)) { - try { - Thread.sleep(sleepTime); - totalWait += sleepTime; - if (totalWait >= maxWait) { - fail("Waited too long for table to be disabled + " + tablename); - } - } catch (InterruptedException e) { - e.printStackTrace(); - fail("Interrupted when trying to disable table " + tablename); - } - } - admin.deleteTable(tablename); - admin.close(); - conn.close(); + + ((ClusterConnection) connection).clearRegionCache(); + TEST_UTIL.deleteTable(tablename); + } /** @@ -468,7 +464,7 @@ public class TestHBaseFsck { assertEquals(0, hbck.getOverlapGroups(table).size()); assertEquals(ROWKEYS.length, countRows()); } finally { - deleteTable(table); + cleanupTable(table); } } @@ -490,7 +486,7 @@ public class TestHBaseFsck { // We should pass without triggering a RejectedExecutionException } finally { - deleteTable(table); + cleanupTable(table); } } @@ -501,7 +497,6 @@ public class TestHBaseFsck { Path tableinfo = null; try { setupTable(table); - Admin admin = TEST_UTIL.getHBaseAdmin(); Path hbaseTableDir = FSUtils.getTableDir( FSUtils.getRootDir(conf), table); @@ -532,14 +527,13 @@ public class TestHBaseFsck { htd = admin.getTableDescriptor(table); // warms up cached htd on master hbck = doFsck(conf, true); assertNoErrors(hbck); - status = null; status = FSTableDescriptors.getTableInfoPath(fs, hbaseTableDir); assertNotNull(status); htd = admin.getTableDescriptor(table); assertEquals(htd.getValue("NOT_DEFAULT"), "true"); } finally { fs.rename(new Path("/.tableinfo"), tableinfo); - deleteTable(table); + cleanupTable(table); } } @@ -601,8 +595,8 @@ public class TestHBaseFsck { assertEquals(ROWKEYS.length, countRows()); // Now let's mess it up, by adding a region with a duplicate startkey - HRegionInfo hriDupe = createRegion(conf, tbl.getTableDescriptor(), - Bytes.toBytes("A"), Bytes.toBytes("A2")); + HRegionInfo hriDupe = + createRegion(tbl.getTableDescriptor(), Bytes.toBytes("A"), Bytes.toBytes("A2")); TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriDupe); TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager() .waitForAssignment(hriDupe); @@ -624,7 +618,7 @@ public class TestHBaseFsck { assertEquals(0, hbck2.getOverlapGroups(table).size()); assertEquals(ROWKEYS.length, countRows()); } finally { - deleteTable(table); + cleanupTable(table); } } @@ -638,10 +632,10 @@ public class TestHBaseFsck { TableName.valueOf("testHbckWithRegionReplica"); try { setupTableWithRegionReplica(table, 2); - TEST_UTIL.getHBaseAdmin().flush(table); + admin.flush(table); assertNoErrors(doFsck(conf, false)); } finally { - deleteTable(table); + cleanupTable(table); } } @@ -651,7 +645,7 @@ public class TestHBaseFsck { TableName.valueOf("testHbckWithFewerReplica"); try { setupTableWithRegionReplica(table, 2); - TEST_UTIL.getHBaseAdmin().flush(table); + admin.flush(table); assertNoErrors(doFsck(conf, false)); assertEquals(ROWKEYS.length, countRows()); deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"), @@ -665,7 +659,7 @@ public class TestHBaseFsck { hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[]{}); } finally { - deleteTable(table); + cleanupTable(table); } } @@ -675,15 +669,15 @@ public class TestHBaseFsck { TableName.valueOf("testHbckWithExcessReplica"); try { setupTableWithRegionReplica(table, 2); - TEST_UTIL.getHBaseAdmin().flush(table); + admin.flush(table); assertNoErrors(doFsck(conf, false)); assertEquals(ROWKEYS.length, countRows()); // the next few lines inject a location in meta for a replica, and then // asks the master to assign the replica (the meta needs to be injected // for the master to treat the request for assignment as valid; the master // checks the region is valid either from its memory or meta) - Table meta = new HTable(conf, TableName.META_TABLE_NAME); - List regions = TEST_UTIL.getHBaseAdmin().getTableRegions(table); + Table meta = connection.getTable(TableName.META_TABLE_NAME, tableExecutorService); + List regions = admin.getTableRegions(table); byte[] startKey = Bytes.toBytes("B"); byte[] endKey = Bytes.toBytes("C"); byte[] metaKey = null; @@ -699,20 +693,20 @@ public class TestHBaseFsck { } } Put put = new Put(metaKey); - Collection var = TEST_UTIL.getHBaseAdmin().getClusterStatus().getServers(); + Collection var = admin.getClusterStatus().getServers(); ServerName sn = var.toArray(new ServerName[var.size()])[0]; //add a location with replicaId as 2 (since we already have replicas with replicaid 0 and 1) MetaTableAccessor.addLocation(put, sn, sn.getStartcode(), 2); meta.put(put); meta.flushCommits(); // assign the new replica - HBaseFsckRepair.fixUnassigned(TEST_UTIL.getHBaseAdmin(), newHri); - HBaseFsckRepair.waitUntilAssigned(TEST_UTIL.getHBaseAdmin(), newHri); + HBaseFsckRepair.fixUnassigned(admin, newHri); + HBaseFsckRepair.waitUntilAssigned(admin, newHri); // now reset the meta row to its original value Delete delete = new Delete(metaKey); - delete.deleteColumns(HConstants.CATALOG_FAMILY, MetaTableAccessor.getServerColumn(2)); - delete.deleteColumns(HConstants.CATALOG_FAMILY, MetaTableAccessor.getStartCodeColumn(2)); - delete.deleteColumns(HConstants.CATALOG_FAMILY, MetaTableAccessor.getSeqNumColumn(2)); + delete.addColumns(HConstants.CATALOG_FAMILY, MetaTableAccessor.getServerColumn(2)); + delete.addColumns(HConstants.CATALOG_FAMILY, MetaTableAccessor.getStartCodeColumn(2)); + delete.addColumns(HConstants.CATALOG_FAMILY, MetaTableAccessor.getSeqNumColumn(2)); meta.delete(delete); meta.flushCommits(); meta.close(); @@ -725,7 +719,7 @@ public class TestHBaseFsck { hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[]{}); } finally { - deleteTable(table); + cleanupTable(table); } } /** @@ -736,9 +730,8 @@ public class TestHBaseFsck { Collection regionServers = status.getServers(); Map> mm = new HashMap>(); - HConnection connection = admin.getConnection(); for (ServerName hsi : regionServers) { - AdminProtos.AdminService.BlockingInterface server = connection.getAdmin(hsi); + AdminProtos.AdminService.BlockingInterface server = ((HConnection) connection).getAdmin(hsi); // list all online regions from this region server List regions = ProtobufUtil.getOnlineRegions(server); @@ -777,8 +770,8 @@ public class TestHBaseFsck { assertEquals(ROWKEYS.length, countRows()); // Now let's mess it up, by adding a region with a duplicate startkey - HRegionInfo hriDupe = createRegion(conf, tbl.getTableDescriptor(), - Bytes.toBytes("A"), Bytes.toBytes("B")); + HRegionInfo hriDupe = + createRegion(tbl.getTableDescriptor(), Bytes.toBytes("A"), Bytes.toBytes("B")); TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriDupe); TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager() @@ -790,8 +783,7 @@ public class TestHBaseFsck { // different regions with the same start/endkeys since it doesn't // differentiate on ts/regionId! We actually need to recheck // deployments! - HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); - while (findDeployedHSI(getDeployedHRIs(admin), hriDupe) == null) { + while (findDeployedHSI(getDeployedHRIs((HBaseAdmin) admin), hriDupe) == null) { Thread.sleep(250); } @@ -813,7 +805,7 @@ public class TestHBaseFsck { assertEquals(0, hbck2.getOverlapGroups(table).size()); assertEquals(ROWKEYS.length, countRows()); } finally { - deleteTable(table); + cleanupTable(table); } } @@ -829,8 +821,8 @@ public class TestHBaseFsck { assertEquals(ROWKEYS.length, countRows()); // Now let's mess it up, by adding a region with a duplicate startkey - HRegionInfo hriDupe = createRegion(conf, tbl.getTableDescriptor(), - Bytes.toBytes("B"), Bytes.toBytes("B")); + HRegionInfo hriDupe = + createRegion(tbl.getTableDescriptor(), Bytes.toBytes("B"), Bytes.toBytes("B")); TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriDupe); TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager() .waitForAssignment(hriDupe); @@ -838,8 +830,8 @@ public class TestHBaseFsck { TEST_UTIL.assertRegionOnServer(hriDupe, server, REGION_ONLINE_TIMEOUT); HBaseFsck hbck = doFsck(conf,false); - assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.DEGENERATE_REGION, - ERROR_CODE.DUPE_STARTKEYS, ERROR_CODE.DUPE_STARTKEYS}); + assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.DEGENERATE_REGION, ERROR_CODE.DUPE_STARTKEYS, + ERROR_CODE.DUPE_STARTKEYS }); assertEquals(2, hbck.getOverlapGroups(table).size()); assertEquals(ROWKEYS.length, countRows()); @@ -852,7 +844,7 @@ public class TestHBaseFsck { assertEquals(0, hbck2.getOverlapGroups(table).size()); assertEquals(ROWKEYS.length, countRows()); } finally { - deleteTable(table); + cleanupTable(table); } } @@ -869,8 +861,8 @@ public class TestHBaseFsck { assertEquals(ROWKEYS.length, countRows()); // Mess it up by creating an overlap in the metadata - HRegionInfo hriOverlap = createRegion(conf, tbl.getTableDescriptor(), - Bytes.toBytes("A2"), Bytes.toBytes("B")); + HRegionInfo hriOverlap = + createRegion(tbl.getTableDescriptor(), Bytes.toBytes("A2"), Bytes.toBytes("B")); TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriOverlap); TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager() .waitForAssignment(hriOverlap); @@ -892,7 +884,7 @@ public class TestHBaseFsck { assertEquals(0, hbck2.getOverlapGroups(table).size()); assertEquals(ROWKEYS.length, countRows()); } finally { - deleteTable(table); + cleanupTable(table); } } @@ -906,19 +898,19 @@ public class TestHBaseFsck { public void testSidelineOverlapRegion() throws Exception { TableName table = TableName.valueOf("testSidelineOverlapRegion"); - try (HConnection conn = (HConnection) ConnectionFactory.createConnection(conf)){ + try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); // Mess it up by creating an overlap MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); HMaster master = cluster.getMaster(); - HRegionInfo hriOverlap1 = createRegion(conf, tbl.getTableDescriptor(), - Bytes.toBytes("A"), Bytes.toBytes("AB")); + HRegionInfo hriOverlap1 = + createRegion(tbl.getTableDescriptor(), Bytes.toBytes("A"), Bytes.toBytes("AB")); master.assignRegion(hriOverlap1); master.getAssignmentManager().waitForAssignment(hriOverlap1); - HRegionInfo hriOverlap2 = createRegion(conf, tbl.getTableDescriptor(), - Bytes.toBytes("AB"), Bytes.toBytes("B")); + HRegionInfo hriOverlap2 = + createRegion(tbl.getTableDescriptor(), Bytes.toBytes("AB"), Bytes.toBytes("B")); master.assignRegion(hriOverlap2); master.getAssignmentManager().waitForAssignment(hriOverlap2); @@ -947,9 +939,8 @@ public class TestHBaseFsck { } } - HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); - HBaseFsckRepair.closeRegionSilentlyAndWait(conn, - cluster.getRegionServer(k).getServerName(), hbi.getHdfsHRI()); + HBaseFsckRepair.closeRegionSilentlyAndWait((HConnection) connection, + cluster.getRegionServer(k).getServerName(), hbi.getHdfsHRI()); admin.offline(regionName); break; } @@ -957,14 +948,15 @@ public class TestHBaseFsck { assertNotNull(regionName); assertNotNull(serverName); - Table meta = conn.getTable(TableName.META_TABLE_NAME, executorService); - Put put = new Put(regionName); - put.add(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, - Bytes.toBytes(serverName.getHostAndPort())); - meta.put(put); + try (Table meta = connection.getTable(TableName.META_TABLE_NAME, tableExecutorService)) { + Put put = new Put(regionName); + put.add(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, + Bytes.toBytes(serverName.getHostAndPort())); + meta.put(put); + } // fix the problem. - HBaseFsck fsck = new HBaseFsck(conf); + HBaseFsck fsck = new HBaseFsck(conf, hbfsckExecutorService); fsck.connect(); fsck.setDisplayFullReport(); // i.e. -details fsck.setTimeLag(0); @@ -977,6 +969,7 @@ public class TestHBaseFsck { fsck.setSidelineBigOverlaps(true); fsck.setMaxMerge(2); fsck.onlineHbck(); + fsck.close(); // verify that overlaps are fixed, and there are less rows // since one region is sidelined. @@ -985,7 +978,7 @@ public class TestHBaseFsck { assertEquals(0, hbck2.getOverlapGroups(table).size()); assertTrue(ROWKEYS.length > countRows()); } finally { - deleteTable(table); + cleanupTable(table); } } @@ -1002,13 +995,13 @@ public class TestHBaseFsck { assertEquals(ROWKEYS.length, countRows()); // Mess it up by creating an overlap in the metadata - TEST_UTIL.getHBaseAdmin().disableTable(table); + admin.disableTable(table); deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("A"), Bytes.toBytes("B"), true, true, false, true, HRegionInfo.DEFAULT_REPLICA_ID); - TEST_UTIL.getHBaseAdmin().enableTable(table); + admin.enableTable(table); - HRegionInfo hriOverlap = createRegion(conf, tbl.getTableDescriptor(), - Bytes.toBytes("A2"), Bytes.toBytes("B")); + HRegionInfo hriOverlap = + createRegion(tbl.getTableDescriptor(), Bytes.toBytes("A2"), Bytes.toBytes("B")); TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriOverlap); TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager() .waitForAssignment(hriOverlap); @@ -1029,7 +1022,7 @@ public class TestHBaseFsck { assertEquals(0, hbck2.getOverlapGroups(table).size()); assertEquals(ROWKEYS.length, countRows()); } finally { - deleteTable(table); + cleanupTable(table); } } @@ -1047,8 +1040,8 @@ public class TestHBaseFsck { assertEquals(ROWKEYS.length, countRows()); // Mess it up by creating an overlap in the metadata - HRegionInfo hriOverlap = createRegion(conf, tbl.getTableDescriptor(), - Bytes.toBytes("A2"), Bytes.toBytes("B2")); + HRegionInfo hriOverlap = + createRegion(tbl.getTableDescriptor(), Bytes.toBytes("A2"), Bytes.toBytes("B2")); TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriOverlap); TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager() .waitForAssignment(hriOverlap); @@ -1056,8 +1049,7 @@ public class TestHBaseFsck { TEST_UTIL.assertRegionOnServer(hriOverlap, server, REGION_ONLINE_TIMEOUT); HBaseFsck hbck = doFsck(conf, false); - assertErrors(hbck, new ERROR_CODE[] { - ERROR_CODE.OVERLAP_IN_REGION_CHAIN, + assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.OVERLAP_IN_REGION_CHAIN, ERROR_CODE.OVERLAP_IN_REGION_CHAIN }); assertEquals(3, hbck.getOverlapGroups(table).size()); assertEquals(ROWKEYS.length, countRows()); @@ -1071,7 +1063,7 @@ public class TestHBaseFsck { assertEquals(0, hbck2.getOverlapGroups(table).size()); assertEquals(ROWKEYS.length, countRows()); } finally { - deleteTable(table); + cleanupTable(table); } } @@ -1088,10 +1080,10 @@ public class TestHBaseFsck { assertEquals(ROWKEYS.length, countRows()); // Mess it up by leaving a hole in the assignment, meta, and hdfs data - TEST_UTIL.getHBaseAdmin().disableTable(table); + admin.disableTable(table); deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"), Bytes.toBytes("C"), true, true, true); - TEST_UTIL.getHBaseAdmin().enableTable(table); + admin.enableTable(table); HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[] { @@ -1106,7 +1098,7 @@ public class TestHBaseFsck { assertNoErrors(doFsck(conf,false)); assertEquals(ROWKEYS.length - 2 , countRows()); // lost a region so lost a row } finally { - deleteTable(table); + cleanupTable(table); } } @@ -1116,17 +1108,16 @@ public class TestHBaseFsck { */ @Test (timeout=180000) public void testHDFSRegioninfoMissing() throws Exception { - TableName table = - TableName.valueOf("tableHDFSRegioininfoMissing"); + TableName table = TableName.valueOf("tableHDFSRegioninfoMissing"); try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); // Mess it up by leaving a hole in the meta data - TEST_UTIL.getHBaseAdmin().disableTable(table); + admin.disableTable(table); deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"), Bytes.toBytes("C"), true, true, false, true, HRegionInfo.DEFAULT_REPLICA_ID); - TEST_UTIL.getHBaseAdmin().enableTable(table); + admin.enableTable(table); HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[] { @@ -1143,7 +1134,7 @@ public class TestHBaseFsck { assertNoErrors(doFsck(conf, false)); assertEquals(ROWKEYS.length, countRows()); } finally { - deleteTable(table); + cleanupTable(table); } } @@ -1160,10 +1151,10 @@ public class TestHBaseFsck { assertEquals(ROWKEYS.length, countRows()); // Mess it up by leaving a hole in the meta data - TEST_UTIL.getHBaseAdmin().disableTable(table); + admin.disableTable(table); deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"), Bytes.toBytes("C"), true, true, false); // don't rm from fs - TEST_UTIL.getHBaseAdmin().enableTable(table); + admin.enableTable(table); HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[] { @@ -1179,7 +1170,7 @@ public class TestHBaseFsck { assertNoErrors(doFsck(conf,false)); assertEquals(ROWKEYS.length, countRows()); } finally { - deleteTable(table); + cleanupTable(table); } } @@ -1195,10 +1186,10 @@ public class TestHBaseFsck { assertEquals(ROWKEYS.length, countRows()); // Mess it up by leaving a hole in the meta data - TEST_UTIL.getHBaseAdmin().disableTable(table); + admin.disableTable(table); deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"), Bytes.toBytes("C"), false, true, false); // don't rm from fs - TEST_UTIL.getHBaseAdmin().enableTable(table); + admin.enableTable(table); HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[] { @@ -1214,7 +1205,7 @@ public class TestHBaseFsck { assertNoErrors(doFsck(conf,false)); assertEquals(ROWKEYS.length, countRows()); } finally { - deleteTable(table); + cleanupTable(table); } } @@ -1231,7 +1222,7 @@ public class TestHBaseFsck { assertEquals(ROWKEYS.length, countRows()); // make sure data in regions, if in wal only there is no data loss - TEST_UTIL.getHBaseAdmin().flush(table); + admin.flush(table); // Mess it up by leaving a hole in the hdfs data deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"), @@ -1249,7 +1240,7 @@ public class TestHBaseFsck { assertNoErrors(doFsck(conf,false)); assertEquals(ROWKEYS.length - 2, countRows()); } finally { - deleteTable(table); + cleanupTable(table); } } @@ -1261,7 +1252,6 @@ public class TestHBaseFsck { public void testNotInHdfsWithReplicas() throws Exception { TableName table = TableName.valueOf("tableNotInHdfs"); - Admin admin = new HBaseAdmin(conf); try { HRegionInfo[] oldHris = new HRegionInfo[2]; setupTableWithRegionReplica(table, 2); @@ -1279,7 +1269,7 @@ public class TestHBaseFsck { } } // make sure data in regions - TEST_UTIL.getHBaseAdmin().flush(table); + admin.flush(table); // Mess it up by leaving a hole in the hdfs data deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"), @@ -1321,7 +1311,7 @@ public class TestHBaseFsck { // the set didn't change) assertFalse(onlineRegions.removeAll(Arrays.asList(oldHris))); } finally { - deleteTable(table); + cleanupTable(table); admin.close(); } } @@ -1338,7 +1328,7 @@ public class TestHBaseFsck { assertEquals(ROWKEYS.length, countRows()); // make sure data in regions, if in wal only there is no data loss - TEST_UTIL.getHBaseAdmin().flush(table); + admin.flush(table); // Mess it up by deleting hdfs dirs deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes(""), @@ -1365,8 +1355,7 @@ public class TestHBaseFsck { // check that hole fixed assertNoErrors(doFsck(conf,false)); - assertFalse("Table "+ table + " should have been deleted", - TEST_UTIL.getHBaseAdmin().tableExists(table)); + assertFalse("Table " + table + " should have been deleted", admin.tableExists(table)); } public void deleteTableDir(TableName table) throws IOException { @@ -1420,18 +1409,18 @@ public class TestHBaseFsck { // Write the .tableinfo FSTableDescriptors fstd = new FSTableDescriptors(conf); fstd.createTableDescriptor(htdDisabled); - List disabledRegions = TEST_UTIL.createMultiRegionsInMeta( - TEST_UTIL.getConfiguration(), htdDisabled, SPLIT_KEYS); + List disabledRegions = + TEST_UTIL.createMultiRegionsInMeta(conf, htdDisabled, SPLIT_KEYS); // Let's just assign everything to first RS HRegionServer hrs = cluster.getRegionServer(0); // Create region files. - TEST_UTIL.getHBaseAdmin().disableTable(table); - TEST_UTIL.getHBaseAdmin().enableTable(table); + admin.disableTable(table); + admin.enableTable(table); // Disable the table and close its regions - TEST_UTIL.getHBaseAdmin().disableTable(table); + admin.disableTable(table); HRegionInfo region = disabledRegions.remove(0); byte[] regionName = region.getRegionName(); @@ -1455,8 +1444,8 @@ public class TestHBaseFsck { // check result assertNoErrors(doFsck(conf, false)); } finally { - TEST_UTIL.getHBaseAdmin().enableTable(table); - deleteTable(table); + admin.enableTable(table); + cleanupTable(table); } } @@ -1472,14 +1461,14 @@ public class TestHBaseFsck { try { setupTable(table1); // make sure data in regions, if in wal only there is no data loss - TEST_UTIL.getHBaseAdmin().flush(table1); + admin.flush(table1); // Mess them up by leaving a hole in the hdfs data deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"), Bytes.toBytes("C"), false, false, true); // don't rm meta setupTable(table2); // make sure data in regions, if in wal only there is no data loss - TEST_UTIL.getHBaseAdmin().flush(table2); + admin.flush(table2); // Mess them up by leaving a hole in the hdfs data deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"), Bytes.toBytes("C"), false, false, true); // don't rm meta @@ -1502,8 +1491,8 @@ public class TestHBaseFsck { assertNoErrors(doFsck(conf, false)); assertEquals(ROWKEYS.length - 2, countRows()); } finally { - deleteTable(table1); - deleteTable(table2); + cleanupTable(table1); + cleanupTable(table2); } } /** @@ -1519,7 +1508,7 @@ public class TestHBaseFsck { assertEquals(ROWKEYS.length, countRows()); // make sure data in regions, if in wal only there is no data loss - TEST_UTIL.getHBaseAdmin().flush(table); + admin.flush(table); HRegionLocation location = tbl.getRegionLocation("B"); // Delete one region from meta, but not hdfs, unassign it. @@ -1527,8 +1516,7 @@ public class TestHBaseFsck { Bytes.toBytes("C"), true, true, false); // Create a new meta entry to fake it as a split parent. - meta = new HTable(conf, TableName.META_TABLE_NAME, - executorService); + meta = connection.getTable(TableName.META_TABLE_NAME, tableExecutorService); HRegionInfo hri = location.getRegionInfo(); HRegionInfo a = new HRegionInfo(tbl.getName(), @@ -1541,7 +1529,8 @@ public class TestHBaseFsck { MetaTableAccessor.addRegionToMeta(meta, hri, a, b); meta.flushCommits(); - TEST_UTIL.getHBaseAdmin().flush(TableName.META_TABLE_NAME); + meta.close(); + admin.flush(TableName.META_TABLE_NAME); HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[] { @@ -1550,20 +1539,21 @@ public class TestHBaseFsck { // regular repair cannot fix lingering split parent hbck = doFsck(conf, true); assertErrors(hbck, new ERROR_CODE[] { - ERROR_CODE.LINGERING_SPLIT_PARENT, ERROR_CODE.HOLE_IN_REGION_CHAIN}); + ERROR_CODE.LINGERING_SPLIT_PARENT, ERROR_CODE.HOLE_IN_REGION_CHAIN }); assertFalse(hbck.shouldRerun()); hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.LINGERING_SPLIT_PARENT, ERROR_CODE.HOLE_IN_REGION_CHAIN}); // fix lingering split parent - hbck = new HBaseFsck(conf); + hbck = new HBaseFsck(conf, hbfsckExecutorService); hbck.connect(); hbck.setDisplayFullReport(); // i.e. -details hbck.setTimeLag(0); hbck.setFixSplitParents(true); hbck.onlineHbck(); assertTrue(hbck.shouldRerun()); + hbck.close(); Get get = new Get(hri.getRegionName()); Result result = meta.get(get); @@ -1571,7 +1561,7 @@ public class TestHBaseFsck { HConstants.SPLITA_QUALIFIER).isEmpty()); assertTrue(result.getColumnCells(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER).isEmpty()); - TEST_UTIL.getHBaseAdmin().flush(TableName.META_TABLE_NAME); + admin.flush(TableName.META_TABLE_NAME); // fix other issues doFsck(conf, true); @@ -1580,7 +1570,7 @@ public class TestHBaseFsck { assertNoErrors(doFsck(conf, false)); assertEquals(ROWKEYS.length, countRows()); } finally { - deleteTable(table); + cleanupTable(table); IOUtils.closeQuietly(meta); } } @@ -1599,18 +1589,16 @@ public class TestHBaseFsck { assertEquals(ROWKEYS.length, countRows()); // make sure data in regions, if in wal only there is no data loss - TEST_UTIL.getHBaseAdmin().flush(table); - HRegionLocation location = tbl.getRegionLocation("B"); + admin.flush(table); + HRegionLocation location = tbl.getRegionLocation(Bytes.toBytes("B")); - meta = new HTable(conf, TableName.META_TABLE_NAME); + meta = connection.getTable(TableName.META_TABLE_NAME, tableExecutorService); HRegionInfo hri = location.getRegionInfo(); // do a regular split - Admin admin = TEST_UTIL.getHBaseAdmin(); byte[] regionName = location.getRegionInfo().getRegionName(); admin.splitRegion(location.getRegionInfo().getRegionName(), Bytes.toBytes("BM")); - TestEndToEndSplitTransaction.blockUntilRegionSplit( - TEST_UTIL.getConfiguration(), 60000, regionName, true); + TestEndToEndSplitTransaction.blockUntilRegionSplit(conf, 60000, regionName, true); // TODO: fixHdfsHoles does not work against splits, since the parent dir lingers on // for some time until children references are deleted. HBCK erroneously sees this as @@ -1622,7 +1610,7 @@ public class TestHBaseFsck { Get get = new Get(hri.getRegionName()); Result result = meta.get(get); assertNotNull(result); - assertNotNull(HRegionInfo.getHRegionInfo(result)); + assertNotNull(MetaTableAccessor.getHRegionInfo(result)); assertEquals(ROWKEYS.length, countRows()); @@ -1630,7 +1618,7 @@ public class TestHBaseFsck { assertEquals(tbl.getStartKeys().length, SPLITS.length + 1 + 1); //SPLITS + 1 is # regions pre-split. assertNoErrors(doFsck(conf, false)); } finally { - deleteTable(table); + cleanupTable(table); IOUtils.closeQuietly(meta); } } @@ -1641,31 +1629,30 @@ public class TestHBaseFsck { */ @Test(timeout=75000) public void testSplitDaughtersNotInMeta() throws Exception { - TableName table = - TableName.valueOf("testSplitdaughtersNotInMeta"); - try (HConnection conn = (HConnection) ConnectionFactory.createConnection(conf); - Table meta = conn.getTable(TableName.META_TABLE_NAME)){ + TableName table = TableName.valueOf("testSplitdaughtersNotInMeta"); + Table meta = connection.getTable(TableName.META_TABLE_NAME, tableExecutorService); + try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); // make sure data in regions, if in wal only there is no data loss - TEST_UTIL.getHBaseAdmin().flush(table); - HRegionLocation location = tbl.getRegionLocation("B"); + admin.flush(table); + HRegionLocation location = tbl.getRegionLocation(Bytes.toBytes("B")); HRegionInfo hri = location.getRegionInfo(); // do a regular split byte[] regionName = location.getRegionInfo().getRegionName(); - conn.getAdmin().splitRegion(location.getRegionInfo().getRegionName(), Bytes.toBytes("BM")); - TestEndToEndSplitTransaction.blockUntilRegionSplit( - TEST_UTIL.getConfiguration(), 60000, regionName, true); + admin.splitRegion(location.getRegionInfo().getRegionName(), Bytes.toBytes("BM")); + TestEndToEndSplitTransaction.blockUntilRegionSplit(conf, 60000, regionName, true); - PairOfSameType daughters = HRegionInfo.getDaughterRegions(meta.get(new Get(regionName))); + PairOfSameType daughters = + MetaTableAccessor.getDaughterRegions(meta.get(new Get(regionName))); // Delete daughter regions from meta, but not hdfs, unassign it. Map hris = tbl.getRegionLocations(); - undeployRegion(conn, hris.get(daughters.getFirst()), daughters.getFirst()); - undeployRegion(conn, hris.get(daughters.getSecond()), daughters.getSecond()); + undeployRegion(connection, hris.get(daughters.getFirst()), daughters.getFirst()); + undeployRegion(connection, hris.get(daughters.getSecond()), daughters.getSecond()); meta.delete(new Delete(daughters.getFirst().getRegionName())); meta.delete(new Delete(daughters.getSecond().getRegionName())); @@ -1673,24 +1660,26 @@ public class TestHBaseFsck { // Remove daughters from regionStates RegionStates regionStates = TEST_UTIL.getMiniHBaseCluster().getMaster(). - getAssignmentManager().getRegionStates(); + getAssignmentManager().getRegionStates(); regionStates.deleteRegion(daughters.getFirst()); regionStates.deleteRegion(daughters.getSecond()); HBaseFsck hbck = doFsck(conf, false); - assertErrors(hbck, new ERROR_CODE[] {ERROR_CODE.NOT_IN_META_OR_DEPLOYED, - ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.HOLE_IN_REGION_CHAIN}); //no LINGERING_SPLIT_PARENT + assertErrors(hbck, + new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.NOT_IN_META_OR_DEPLOYED, + ERROR_CODE.HOLE_IN_REGION_CHAIN }); //no LINGERING_SPLIT_PARENT // now fix it. The fix should not revert the region split, but add daughters to META hbck = doFsck(conf, true, true, false, false, false, false, false, false, false, false, null); - assertErrors(hbck, new ERROR_CODE[] {ERROR_CODE.NOT_IN_META_OR_DEPLOYED, - ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.HOLE_IN_REGION_CHAIN}); + assertErrors(hbck, + new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.NOT_IN_META_OR_DEPLOYED, + ERROR_CODE.HOLE_IN_REGION_CHAIN }); // assert that the split hbase:meta entry is still there. Get get = new Get(hri.getRegionName()); Result result = meta.get(get); assertNotNull(result); - assertNotNull(HRegionInfo.getHRegionInfo(result)); + assertNotNull(MetaTableAccessor.getHRegionInfo(result)); assertEquals(ROWKEYS.length, countRows()); @@ -1698,7 +1687,8 @@ public class TestHBaseFsck { assertEquals(tbl.getStartKeys().length, SPLITS.length + 1 + 1); //SPLITS + 1 is # regions pre-split. assertNoErrors(doFsck(conf, false)); //should be fixed by now } finally { - deleteTable(table); + meta.close(); + cleanupTable(table); } } @@ -1714,10 +1704,10 @@ public class TestHBaseFsck { assertEquals(ROWKEYS.length, countRows()); // Mess it up by leaving a hole in the assignment, meta, and hdfs data - TEST_UTIL.getHBaseAdmin().disableTable(table); + admin.disableTable(table); deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes(""), Bytes.toBytes("A"), true, true, true); - TEST_UTIL.getHBaseAdmin().enableTable(table); + admin.enableTable(table); HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.FIRST_REGION_STARTKEY_NOT_EMPTY }); @@ -1726,7 +1716,7 @@ public class TestHBaseFsck { // check that hole fixed assertNoErrors(doFsck(conf, false)); } finally { - deleteTable(table); + cleanupTable(table); } } @@ -1740,7 +1730,7 @@ public class TestHBaseFsck { TableName.valueOf("testSingleRegionDeployedNotInHdfs"); try { setupTable(table); - TEST_UTIL.getHBaseAdmin().flush(table); + admin.flush(table); // Mess it up by deleting region dir deleteRegion(conf, tbl.getTableDescriptor(), @@ -1754,7 +1744,7 @@ public class TestHBaseFsck { // check that hole fixed assertNoErrors(doFsck(conf, false)); } finally { - deleteTable(table); + cleanupTable(table); } } @@ -1771,10 +1761,10 @@ public class TestHBaseFsck { assertEquals(ROWKEYS.length, countRows()); // Mess it up by leaving a hole in the assignment, meta, and hdfs data - TEST_UTIL.getHBaseAdmin().disableTable(table); + admin.disableTable(table); deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("C"), Bytes.toBytes(""), true, true, true); - TEST_UTIL.getHBaseAdmin().enableTable(table); + admin.enableTable(table); HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.LAST_REGION_ENDKEY_NOT_EMPTY }); @@ -1783,7 +1773,7 @@ public class TestHBaseFsck { // check that hole fixed assertNoErrors(doFsck(conf, false)); } finally { - deleteTable(table); + cleanupTable(table); } } @@ -1808,7 +1798,7 @@ public class TestHBaseFsck { ERROR_CODE.NOT_DEPLOYED, ERROR_CODE.HOLE_IN_REGION_CHAIN}); // verify that noHdfsChecking report the same errors - HBaseFsck fsck = new HBaseFsck(conf); + HBaseFsck fsck = new HBaseFsck(conf, hbfsckExecutorService); fsck.connect(); fsck.setDisplayFullReport(); // i.e. -details fsck.setTimeLag(0); @@ -1816,9 +1806,10 @@ public class TestHBaseFsck { fsck.onlineHbck(); assertErrors(fsck, new ERROR_CODE[] { ERROR_CODE.NOT_DEPLOYED, ERROR_CODE.HOLE_IN_REGION_CHAIN}); + fsck.close(); // verify that fixAssignments works fine with noHdfsChecking - fsck = new HBaseFsck(conf); + fsck = new HBaseFsck(conf, hbfsckExecutorService); fsck.connect(); fsck.setDisplayFullReport(); // i.e. -details fsck.setTimeLag(0); @@ -1830,8 +1821,10 @@ public class TestHBaseFsck { assertNoErrors(fsck); assertEquals(ROWKEYS.length, countRows()); + + fsck.close(); } finally { - deleteTable(table); + cleanupTable(table); } } @@ -1854,21 +1847,22 @@ public class TestHBaseFsck { // verify there is no other errors HBaseFsck hbck = doFsck(conf, false); - assertErrors(hbck, new ERROR_CODE[] { - ERROR_CODE.NOT_IN_META, ERROR_CODE.HOLE_IN_REGION_CHAIN}); + assertErrors(hbck, + new ERROR_CODE[] { ERROR_CODE.NOT_IN_META, ERROR_CODE.HOLE_IN_REGION_CHAIN }); // verify that noHdfsChecking report the same errors - HBaseFsck fsck = new HBaseFsck(conf); + HBaseFsck fsck = new HBaseFsck(conf, hbfsckExecutorService); fsck.connect(); fsck.setDisplayFullReport(); // i.e. -details fsck.setTimeLag(0); fsck.setCheckHdfs(false); fsck.onlineHbck(); - assertErrors(fsck, new ERROR_CODE[] { - ERROR_CODE.NOT_IN_META, ERROR_CODE.HOLE_IN_REGION_CHAIN}); + assertErrors(fsck, + new ERROR_CODE[] { ERROR_CODE.NOT_IN_META, ERROR_CODE.HOLE_IN_REGION_CHAIN }); + fsck.close(); // verify that fixMeta doesn't work with noHdfsChecking - fsck = new HBaseFsck(conf); + fsck = new HBaseFsck(conf, hbfsckExecutorService); fsck.connect(); fsck.setDisplayFullReport(); // i.e. -details fsck.setTimeLag(0); @@ -1877,8 +1871,9 @@ public class TestHBaseFsck { fsck.setFixMeta(true); fsck.onlineHbck(); assertFalse(fsck.shouldRerun()); - assertErrors(fsck, new ERROR_CODE[] { - ERROR_CODE.NOT_IN_META, ERROR_CODE.HOLE_IN_REGION_CHAIN}); + assertErrors(fsck, + new ERROR_CODE[] { ERROR_CODE.NOT_IN_META, ERROR_CODE.HOLE_IN_REGION_CHAIN }); + fsck.close(); // fix the cluster so other tests won't be impacted fsck = doFsck(conf, true); @@ -1886,7 +1881,7 @@ public class TestHBaseFsck { fsck = doFsck(conf, true); assertNoErrors(fsck); } finally { - deleteTable(table); + cleanupTable(table); } } @@ -1903,13 +1898,13 @@ public class TestHBaseFsck { assertEquals(ROWKEYS.length, countRows()); // Mess it up by creating an overlap in the metadata - TEST_UTIL.getHBaseAdmin().disableTable(table); + admin.disableTable(table); deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("A"), Bytes.toBytes("B"), true, true, false, true, HRegionInfo.DEFAULT_REPLICA_ID); - TEST_UTIL.getHBaseAdmin().enableTable(table); + admin.enableTable(table); - HRegionInfo hriOverlap = createRegion(conf, tbl.getTableDescriptor(), - Bytes.toBytes("A2"), Bytes.toBytes("B")); + HRegionInfo hriOverlap = + createRegion(tbl.getTableDescriptor(), Bytes.toBytes("A2"), Bytes.toBytes("B")); TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriOverlap); TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager() .waitForAssignment(hriOverlap); @@ -1922,7 +1917,7 @@ public class TestHBaseFsck { ERROR_CODE.HOLE_IN_REGION_CHAIN}); // verify that noHdfsChecking can't detect ORPHAN_HDFS_REGION - HBaseFsck fsck = new HBaseFsck(conf); + HBaseFsck fsck = new HBaseFsck(conf, hbfsckExecutorService); fsck.connect(); fsck.setDisplayFullReport(); // i.e. -details fsck.setTimeLag(0); @@ -1930,9 +1925,10 @@ public class TestHBaseFsck { fsck.onlineHbck(); assertErrors(fsck, new ERROR_CODE[] { ERROR_CODE.HOLE_IN_REGION_CHAIN}); + fsck.close(); // verify that fixHdfsHoles doesn't work with noHdfsChecking - fsck = new HBaseFsck(conf); + fsck = new HBaseFsck(conf, hbfsckExecutorService); fsck.connect(); fsck.setDisplayFullReport(); // i.e. -details fsck.setTimeLag(0); @@ -1942,13 +1938,13 @@ public class TestHBaseFsck { fsck.setFixHdfsOrphans(true); fsck.onlineHbck(); assertFalse(fsck.shouldRerun()); - assertErrors(fsck, new ERROR_CODE[] { - ERROR_CODE.HOLE_IN_REGION_CHAIN}); + assertErrors(fsck, new ERROR_CODE[] { ERROR_CODE.HOLE_IN_REGION_CHAIN}); + fsck.close(); } finally { - if (TEST_UTIL.getHBaseAdmin().isTableDisabled(table)) { - TEST_UTIL.getHBaseAdmin().enableTable(table); + if (admin.isTableDisabled(table)) { + admin.enableTable(table); } - deleteTable(table); + cleanupTable(table); } } @@ -1988,13 +1984,13 @@ public class TestHBaseFsck { try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); - TEST_UTIL.getHBaseAdmin().flush(table); // flush is async. + admin.flush(table); // flush is async. FileSystem fs = FileSystem.get(conf); Path hfile = getFlushedHFile(fs, table); // Mess it up by leaving a hole in the assignment, meta, and hdfs data - TEST_UTIL.getHBaseAdmin().disableTable(table); + admin.disableTable(table); // create new corrupt file called deadbeef (valid hfile name) Path corrupt = new Path(hfile.getParent(), "deadbeef"); @@ -2013,29 +2009,28 @@ public class TestHBaseFsck { assertEquals(hfcc.getMissing().size(), 0); // Its been fixed, verify that we can enable. - TEST_UTIL.getHBaseAdmin().enableTable(table); + admin.enableTable(table); } finally { - deleteTable(table); + cleanupTable(table); } } /** - * Test that use this should have a timeout, because this method could potentially wait forever. + * Test that use this should have a timeout, because this method could potentially wait forever. */ private void doQuarantineTest(TableName table, HBaseFsck hbck, int check, int corrupt, int fail, int quar, int missing) throws Exception { try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); - TEST_UTIL.getHBaseAdmin().flush(table); // flush is async. + admin.flush(table); // flush is async. // Mess it up by leaving a hole in the assignment, meta, and hdfs data - TEST_UTIL.getHBaseAdmin().disableTable(table); + admin.disableTable(table); String[] args = {"-sidelineCorruptHFiles", "-repairHoles", "-ignorePreCheckPermission", table.getNameAsString()}; - ExecutorService exec = new ScheduledThreadPoolExecutor(10); - HBaseFsck res = hbck.exec(exec, args); + HBaseFsck res = hbck.exec(hbfsckExecutorService, args); HFileCorruptionChecker hfcc = res.getHFilecorruptionChecker(); assertEquals(hfcc.getHFilesChecked(), check); @@ -2045,7 +2040,6 @@ public class TestHBaseFsck { assertEquals(hfcc.getMissing().size(), missing); // its been fixed, verify that we can enable - Admin admin = TEST_UTIL.getHBaseAdmin(); admin.enableTableAsync(table); while (!admin.isTableEnabled(table)) { try { @@ -2056,7 +2050,7 @@ public class TestHBaseFsck { } } } finally { - deleteTable(table); + cleanupTable(table); } } @@ -2067,10 +2061,10 @@ public class TestHBaseFsck { @Test(timeout=180000) public void testQuarantineMissingHFile() throws Exception { TableName table = TableName.valueOf(name.getMethodName()); - ExecutorService exec = new ScheduledThreadPoolExecutor(10); + // inject a fault in the hfcc created. final FileSystem fs = FileSystem.get(conf); - HBaseFsck hbck = new HBaseFsck(conf, exec) { + HBaseFsck hbck = new HBaseFsck(conf, hbfsckExecutorService) { @Override public HFileCorruptionChecker createHFileCorruptionChecker(boolean sidelineCorruptHFiles) throws IOException { return new HFileCorruptionChecker(conf, executor, sidelineCorruptHFiles) { @@ -2086,6 +2080,7 @@ public class TestHBaseFsck { } }; doQuarantineTest(table, hbck, 4, 0, 0, 0, 1); // 4 attempted, but 1 missing. + hbck.close(); } /** @@ -2097,10 +2092,9 @@ public class TestHBaseFsck { @Ignore @Test(timeout=180000) public void testQuarantineMissingFamdir() throws Exception { TableName table = TableName.valueOf(name.getMethodName()); - ExecutorService exec = new ScheduledThreadPoolExecutor(10); // inject a fault in the hfcc created. final FileSystem fs = FileSystem.get(conf); - HBaseFsck hbck = new HBaseFsck(conf, exec) { + HBaseFsck hbck = new HBaseFsck(conf, hbfsckExecutorService) { @Override public HFileCorruptionChecker createHFileCorruptionChecker(boolean sidelineCorruptHFiles) throws IOException { return new HFileCorruptionChecker(conf, executor, sidelineCorruptHFiles) { @@ -2116,6 +2110,7 @@ public class TestHBaseFsck { } }; doQuarantineTest(table, hbck, 3, 0, 0, 0, 1); + hbck.close(); } /** @@ -2125,10 +2120,9 @@ public class TestHBaseFsck { @Test(timeout=180000) public void testQuarantineMissingRegionDir() throws Exception { TableName table = TableName.valueOf(name.getMethodName()); - ExecutorService exec = new ScheduledThreadPoolExecutor(10); // inject a fault in the hfcc created. final FileSystem fs = FileSystem.get(conf); - HBaseFsck hbck = new HBaseFsck(conf, exec) { + HBaseFsck hbck = new HBaseFsck(conf, hbfsckExecutorService) { @Override public HFileCorruptionChecker createHFileCorruptionChecker(boolean sidelineCorruptHFiles) throws IOException { @@ -2145,6 +2139,7 @@ public class TestHBaseFsck { } }; doQuarantineTest(table, hbck, 3, 0, 0, 0, 1); + hbck.close(); } /** @@ -2173,7 +2168,7 @@ public class TestHBaseFsck { // check that reference file fixed assertNoErrors(doFsck(conf, false)); } finally { - deleteTable(table); + cleanupTable(table); } } @@ -2189,15 +2184,15 @@ public class TestHBaseFsck { // Mess it up by removing the RegionInfo for one region. final List deletes = new LinkedList(); - Table meta = connection.getTable(TableName.META_TABLE_NAME); + Table meta = connection.getTable(TableName.META_TABLE_NAME, hbfsckExecutorService); MetaScanner.metaScan(connection, new MetaScanner.MetaScannerVisitor() { @Override public boolean processRow(Result rowResult) throws IOException { - HRegionInfo hri = MetaScanner.getHRegionInfo(rowResult); + HRegionInfo hri = MetaTableAccessor.getHRegionInfo(rowResult); if (hri != null && !hri.getTable().isSystemTable()) { Delete delete = new Delete(rowResult.getRow()); - delete.deleteColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); + delete.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); deletes.add(delete); } return true; @@ -2225,12 +2220,11 @@ public class TestHBaseFsck { // check that reference file fixed assertFalse(hbck.getErrors().getErrorList().contains(ERROR_CODE.EMPTY_META_CELL)); } finally { - deleteTable(table); + cleanupTable(table); } connection.close(); } - /** * Test pluggable error reporter. It can be plugged in * from system property or configuration. @@ -2417,15 +2411,13 @@ public class TestHBaseFsck { private void deleteMetaRegion(Configuration conf, boolean unassign, boolean hdfs, boolean regionInfoOnly) throws IOException, InterruptedException { - HConnection connection = HConnectionManager.getConnection(conf); - HRegionLocation metaLocation = connection.locateRegion(TableName.META_TABLE_NAME, - HConstants.EMPTY_START_ROW); + HRegionLocation metaLocation = connection.getRegionLocator(TableName.META_TABLE_NAME) + .getRegionLocation(HConstants.EMPTY_START_ROW); ServerName hsa = metaLocation.getServerName(); HRegionInfo hri = metaLocation.getRegionInfo(); if (unassign) { LOG.info("Undeploying meta region " + hri + " from server " + hsa); - try (HConnection unmanagedConnection = - (HConnection) ConnectionFactory.createConnection(conf)) { + try (Connection unmanagedConnection = ConnectionFactory.createConnection(conf)) { undeployRegion(unmanagedConnection, hsa, hri); } } @@ -2464,12 +2456,12 @@ public class TestHBaseFsck { HTableDescriptor desc = new HTableDescriptor(table); HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toString(FAM)); desc.addFamily(hcd); // If a table has no CF's it doesn't get checked - TEST_UTIL.getHBaseAdmin().createTable(desc); - tbl = new HTable(TEST_UTIL.getConfiguration(), table, executorService); + admin.createTable(desc); + tbl = (HTable) connection.getTable(table, tableExecutorService); // Mess it up by leaving a hole in the assignment, meta, and hdfs data - deleteRegion(conf, tbl.getTableDescriptor(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false, - false, true); + deleteRegion(conf, tbl.getTableDescriptor(), HConstants.EMPTY_START_ROW, + HConstants.EMPTY_END_ROW, false, false, true); HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.NOT_IN_HDFS }); @@ -2482,7 +2474,7 @@ public class TestHBaseFsck { // check that hole fixed assertNoErrors(doFsck(conf, false)); } finally { - deleteTable(table); + cleanupTable(table); } } @@ -2498,16 +2490,15 @@ public class TestHBaseFsck { assertEquals(ROWKEYS.length, countRows()); // make sure data in regions, if in wal only there is no data loss - TEST_UTIL.getHBaseAdmin().flush(table); - HRegionInfo region1 = tbl.getRegionLocation("A").getRegionInfo(); - HRegionInfo region2 = tbl.getRegionLocation("B").getRegionInfo(); + admin.flush(table); + HRegionInfo region1 = tbl.getRegionLocation(Bytes.toBytes("A")).getRegionInfo(); + HRegionInfo region2 = tbl.getRegionLocation(Bytes.toBytes("B")).getRegionInfo(); int regionCountBeforeMerge = tbl.getRegionLocations().size(); assertNotEquals(region1, region2); // do a region merge - Admin admin = TEST_UTIL.getHBaseAdmin(); admin.mergeRegions(region1.getEncodedNameAsBytes(), region2.getEncodedNameAsBytes(), false); @@ -2530,12 +2521,12 @@ public class TestHBaseFsck { } finally { TEST_UTIL.getHBaseCluster().getMaster().setCatalogJanitorEnabled(true); - deleteTable(table); + cleanupTable(table); IOUtils.closeQuietly(meta); } } - @Test (timeout=180000) + @Test (timeout = 180000) public void testRegionBoundariesCheck() throws Exception { HBaseFsck hbck = doFsck(conf, false); assertNoErrors(hbck); // no errors diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java index 1f6ec70d585..217f60b88d8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java @@ -40,7 +40,7 @@ public class HbckTestingUtil { public static HBaseFsck doFsck( Configuration conf, boolean fix, TableName table) throws Exception { - return doFsck(conf, fix, fix, fix, fix,fix, fix, fix, fix, fix, fix, table); + return doFsck(conf, fix, fix, fix, fix, fix, fix, fix, fix, fix, fix, table); } public static HBaseFsck doFsck(Configuration conf, boolean fixAssignments, @@ -66,6 +66,7 @@ public class HbckTestingUtil { fsck.includeTable(table); } fsck.onlineHbck(); + fsck.close(); return fsck; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildHole.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildHole.java index 610de2be688..6320b93c471 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildHole.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildHole.java @@ -25,9 +25,6 @@ import static org.junit.Assert.assertFalse; import java.util.Arrays; import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.util.HBaseFsck; @@ -67,6 +64,7 @@ public class TestOfflineMetaRebuildHole extends OfflineMetaRebuildTestCore { // attempt to rebuild meta table from scratch HBaseFsck fsck = new HBaseFsck(conf); assertFalse(fsck.rebuildMeta(false)); + fsck.close(); // bring up the minicluster TEST_UTIL.startMiniZKCluster(); // tables seem enabled by default