From d64d015f5138b5496f20875cfc108f64b904ce55 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Fri, 12 Apr 2019 15:08:11 +0800 Subject: [PATCH] HBASE-21718 Implement Admin based on AsyncAdmin --- .../hadoop/hbase/backup/util/RestoreTool.java | 2 +- .../hadoop/hbase/backup/TestBackupBase.java | 6 +- .../hbase/backup/TestBackupDeleteRestore.java | 4 +- .../hadoop/hbase/backup/TestBackupMerge.java | 4 +- .../backup/TestBackupMultipleDeletes.java | 5 +- .../hbase/backup/TestBackupSystemTable.java | 2 +- .../hbase/backup/TestFullBackupSet.java | 4 +- .../backup/TestFullBackupSetRestoreSet.java | 6 +- .../hadoop/hbase/backup/TestFullRestore.java | 16 +- .../hbase/backup/TestIncrementalBackup.java | 7 +- .../TestIncrementalBackupDeleteTable.java | 7 +- ...estIncrementalBackupMergeWithFailures.java | 4 +- .../TestIncrementalBackupWithBulkLoad.java | 6 +- .../TestIncrementalBackupWithFailures.java | 5 +- .../hadoop/hbase/backup/TestRemoteBackup.java | 4 +- .../hbase/backup/TestRemoteRestore.java | 4 +- .../backup/TestRestoreBoundaryTests.java | 6 +- .../hbase/backup/TestSystemTableSnapshot.java | 4 +- .../hadoop/hbase/AsyncMetaTableAccessor.java | 29 +- .../org/apache/hadoop/hbase/client/Admin.java | 31 +- .../hbase/client/AdminOverAsyncAdmin.java | 945 ++++++++++++++++++ .../client/ConnectionOverAsyncConnection.java | 2 +- .../hadoop/hbase/client/ConnectionUtils.java | 18 + .../CoprocessorBlockingRpcCallback.java | 68 ++ .../hadoop/hbase/client/HBaseAdmin.java | 9 +- .../hbase/client/RawAsyncHBaseAdmin.java | 37 +- .../RegionCoprocessorRpcChannelImpl.java | 21 +- .../client/SyncCoprocessorRpcChannel.java | 3 + .../hbase/client/TableOverAsyncTable.java | 51 +- .../hbase/client/TestInterfaceAlign.java | 2 + .../hadoop/hbase/PerformanceEvaluation.java | 6 +- .../hbase/rest/client/TestRemoteTable.java | 4 +- .../hadoop/hbase/HBaseTestingUtility.java | 42 +- .../hadoop/hbase/client/TestAdmin1.java | 41 +- .../hadoop/hbase/client/TestAdmin2.java | 35 +- .../hbase/client/TestFromClientSide.java | 7 +- .../hbase/client/TestFromClientSide3.java | 10 +- .../TestSnapshotDFSTemporaryDirectory.java | 5 +- .../TestSnapshotTemporaryDirectory.java | 6 +- .../hbase/client/TestSplitOrMergeStatus.java | 13 +- .../hbase/coprocessor/TestMasterObserver.java | 2 +- .../hadoop/hbase/master/TestMaster.java | 4 +- .../master/TestMasterMetricsWrapper.java | 4 +- .../TestMergeTableRegionsWhileRSCrash.java | 2 +- .../master/TestSplitRegionWhileRSCrash.java | 2 +- .../assignment/TestAssignmentOnRSCrash.java | 2 +- .../TestMasterAbortWhileMergingTable.java | 2 +- .../TestModifyTableWhileMerging.java | 2 +- ...TestCleanupCompactedFileOnRegionClose.java | 6 +- .../regionserver/TestRegionServerAbort.java | 2 +- .../regionserver/TestReplicator.java | 4 +- .../hbase/snapshot/SnapshotTestingUtils.java | 29 - .../snapshot/TestFlushSnapshotFromClient.java | 41 +- .../hadoop/hbase/tool/TestBulkLoadHFiles.java | 6 +- .../thrift2/ThriftHBaseServiceHandler.java | 6 +- .../hbase/thrift2/client/ThriftAdmin.java | 9 +- 56 files changed, 1276 insertions(+), 328 deletions(-) create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorBlockingRpcCallback.java diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java index d2038c74592..ff8d26a53e3 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java @@ -489,7 +489,7 @@ public class RestoreTool { LOG.info("Creating target table '" + targetTableName + "'"); byte[][] keys; if (regionDirList == null || regionDirList.size() == 0) { - admin.createTable(htd, null); + admin.createTable(htd); } else { keys = generateBoundaryKeys(regionDirList); // create table using table descriptor and region boundaries diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java index e0fca20b54f..64978bcd773 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java @@ -26,7 +26,6 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Objects; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocatedFileStatus; @@ -53,7 +52,6 @@ import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Durability; -import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.master.cleaner.LogCleaner; @@ -342,7 +340,7 @@ public class TestBackupBase { @AfterClass public static void tearDown() throws Exception { try{ - SnapshotTestingUtils.deleteAllSnapshots(TEST_UTIL.getHBaseAdmin()); + SnapshotTestingUtils.deleteAllSnapshots(TEST_UTIL.getAdmin()); } catch (Exception e) { } SnapshotTestingUtils.deleteArchiveDirectory(TEST_UTIL); @@ -416,7 +414,7 @@ public class TestBackupBase { protected static void createTables() throws Exception { long tid = System.currentTimeMillis(); table1 = TableName.valueOf("test-" + tid); - HBaseAdmin ha = TEST_UTIL.getHBaseAdmin(); + Admin ha = TEST_UTIL.getAdmin(); // Create namespaces NamespaceDescriptor desc1 = NamespaceDescriptor.create("ns1").build(); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java index 74176e390c6..f649b921b27 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java @@ -24,8 +24,8 @@ import java.util.List; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.util.BackupUtils; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; @@ -61,7 +61,7 @@ public class TestBackupDeleteRestore extends TestBackupBase { assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); int numRows = TEST_UTIL.countRows(table1); - HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); + Admin hba = TEST_UTIL.getAdmin(); // delete row try (Table table = TEST_UTIL.getConnection().getTable(table1)) { Delete delete = new Delete(Bytes.toBytes("row0")); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java index beacef3c14e..1a8638c3b7d 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java @@ -24,9 +24,9 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; import org.apache.hadoop.hbase.backup.util.BackupUtils; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.junit.Assert; @@ -62,7 +62,7 @@ public class TestBackupMerge extends TestBackupBase { Connection conn = ConnectionFactory.createConnection(conf1); - HBaseAdmin admin = (HBaseAdmin) conn.getAdmin(); + Admin admin = conn.getAdmin(); BackupAdminImpl client = new BackupAdminImpl(conn); BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java index bffa4808171..538488b4c4e 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java @@ -26,9 +26,9 @@ import java.util.Set; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.testclassification.LargeTests; @@ -59,9 +59,8 @@ public class TestBackupMultipleDeletes extends TestBackupBase { // #1 - create full backup for all tables LOG.info("create full backup image for all tables"); List tables = Lists.newArrayList(table1, table2); - HBaseAdmin admin = null; Connection conn = ConnectionFactory.createConnection(conf1); - admin = (HBaseAdmin) conn.getAdmin(); + Admin admin = conn.getAdmin(); BackupAdmin client = new BackupAdminImpl(conn); BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); String backupIdFull = client.backupTables(request); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java index aa6e5dd668e..5d48fc52b5d 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java @@ -119,7 +119,7 @@ public class TestBackupSystemTable { } private void cleanBackupTable() throws IOException { - Admin admin = UTIL.getHBaseAdmin(); + Admin admin = UTIL.getAdmin(); admin.disableTable(BackupSystemTable.getTableName(conf)); admin.truncateTable(BackupSystemTable.getTableName(conf), true); if (admin.isTableDisabled(BackupSystemTable.getTableName(conf))) { diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSet.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSet.java index 89ff5711e6c..7a3aec46a9a 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSet.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSet.java @@ -25,7 +25,7 @@ import java.util.List; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; -import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.util.ToolRunner; import org.junit.ClassRule; @@ -80,7 +80,7 @@ public class TestFullBackupSet extends TestBackupBase { // Run backup ret = ToolRunner.run(conf1, new RestoreDriver(), args); assertTrue(ret == 0); - HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); + Admin hba = TEST_UTIL.getAdmin(); assertTrue(hba.tableExists(table1_restore)); // Verify number of rows in both tables assertEquals(TEST_UTIL.countRows(table1), TEST_UTIL.countRows(table1_restore)); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSetRestoreSet.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSetRestoreSet.java index ca70f6a75c8..3543133734e 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSetRestoreSet.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSetRestoreSet.java @@ -25,7 +25,7 @@ import java.util.List; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; -import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.util.ToolRunner; import org.junit.ClassRule; @@ -76,7 +76,7 @@ public class TestFullBackupSetRestoreSet extends TestBackupBase { // Run backup ret = ToolRunner.run(conf1, new RestoreDriver(), args); assertTrue(ret == 0); - HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); + Admin hba = TEST_UTIL.getAdmin(); assertTrue(hba.tableExists(table1_restore)); // Verify number of rows in both tables assertEquals(TEST_UTIL.countRows(table1), TEST_UTIL.countRows(table1_restore)); @@ -118,7 +118,7 @@ public class TestFullBackupSetRestoreSet extends TestBackupBase { // Run backup ret = ToolRunner.run(conf1, new RestoreDriver(), args); assertTrue(ret == 0); - HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); + Admin hba = TEST_UTIL.getAdmin(); assertTrue(hba.tableExists(table1)); // Verify number of rows in both tables assertEquals(count, TEST_UTIL.countRows(table1)); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java index 2201e2f8cd0..f5ad0d7b827 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java @@ -26,7 +26,7 @@ import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.util.BackupUtils; -import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.util.ToolRunner; import org.junit.ClassRule; @@ -66,7 +66,7 @@ public class TestFullRestore extends TestBackupBase { BackupAdmin client = getBackupAdmin(); client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, tableset, tablemap, false)); - HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); + Admin hba = TEST_UTIL.getAdmin(); assertTrue(hba.tableExists(table1_restore)); TEST_UTIL.deleteTable(table1_restore); hba.close(); @@ -88,7 +88,7 @@ public class TestFullRestore extends TestBackupBase { int ret = ToolRunner.run(conf1, new RestoreDriver(), args); assertTrue(ret == 0); - HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); + Admin hba = TEST_UTIL.getAdmin(); assertTrue(hba.tableExists(table1_restore)); TEST_UTIL.deleteTable(table1_restore); hba.close(); @@ -110,7 +110,7 @@ public class TestFullRestore extends TestBackupBase { int ret = ToolRunner.run(conf1, new RestoreDriver(), args); assertTrue(ret == 0); //Verify that table has not been restored - HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); + Admin hba = TEST_UTIL.getAdmin(); assertFalse(hba.tableExists(table1_restore)); } @@ -131,7 +131,7 @@ public class TestFullRestore extends TestBackupBase { BackupAdmin client = getBackupAdmin(); client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, restore_tableset, tablemap, false)); - HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); + Admin hba = TEST_UTIL.getAdmin(); assertTrue(hba.tableExists(table2_restore)); assertTrue(hba.tableExists(table3_restore)); TEST_UTIL.deleteTable(table2_restore); @@ -162,7 +162,7 @@ public class TestFullRestore extends TestBackupBase { int ret = ToolRunner.run(conf1, new RestoreDriver(), args); assertTrue(ret == 0); - HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); + Admin hba = TEST_UTIL.getAdmin(); assertTrue(hba.tableExists(table2_restore)); assertTrue(hba.tableExists(table3_restore)); TEST_UTIL.deleteTable(table2_restore); @@ -210,7 +210,7 @@ public class TestFullRestore extends TestBackupBase { int ret = ToolRunner.run(conf1, new RestoreDriver(), args); assertTrue(ret == 0); - HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); + Admin hba = TEST_UTIL.getAdmin(); assertTrue(hba.tableExists(table1)); hba.close(); } @@ -256,7 +256,7 @@ public class TestFullRestore extends TestBackupBase { int ret = ToolRunner.run(conf1, new RestoreDriver(), args); assertTrue(ret == 0); - HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); + Admin hba = TEST_UTIL.getAdmin(); assertTrue(hba.tableExists(table2)); assertTrue(hba.tableExists(table3)); hba.close(); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java index 35a77eab642..d7c2cd020c0 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java @@ -29,9 +29,9 @@ import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; import org.apache.hadoop.hbase.backup.util.BackupUtils; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.regionserver.HRegion; @@ -93,8 +93,7 @@ public class TestIncrementalBackup extends TestBackupBase { int NB_ROWS_FAM3 = 6; insertIntoTable(conn, table1, fam3Name, 3, NB_ROWS_FAM3).close(); insertIntoTable(conn, table1, mobName, 3, NB_ROWS_FAM3).close(); - HBaseAdmin admin = null; - admin = (HBaseAdmin) conn.getAdmin(); + Admin admin = conn.getAdmin(); BackupAdminImpl client = new BackupAdminImpl(conn); BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); String backupIdFull = client.backupTables(request); @@ -182,7 +181,7 @@ public class TestIncrementalBackup extends TestBackupBase { tablesRestoreFull, tablesMapFull, true)); // #6.1 - check tables for full restore - HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin(); + Admin hAdmin = TEST_UTIL.getAdmin(); assertTrue(hAdmin.tableExists(table1_restore)); assertTrue(hAdmin.tableExists(table2_restore)); hAdmin.close(); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java index 08834f2fae9..837de4dd616 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java @@ -24,9 +24,9 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; import org.apache.hadoop.hbase.backup.util.BackupUtils; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.testclassification.LargeTests; @@ -64,9 +64,8 @@ public class TestIncrementalBackupDeleteTable extends TestBackupBase { LOG.info("create full backup image for all tables"); List tables = Lists.newArrayList(table1, table2); - HBaseAdmin admin = null; Connection conn = ConnectionFactory.createConnection(conf1); - admin = (HBaseAdmin) conn.getAdmin(); + Admin admin = conn.getAdmin(); BackupAdminImpl client = new BackupAdminImpl(conn); BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); @@ -105,7 +104,7 @@ public class TestIncrementalBackupDeleteTable extends TestBackupBase { tablesRestoreFull, tablesMapFull, false)); // #5.1 - check tables for full restore - HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin(); + Admin hAdmin = TEST_UTIL.getAdmin(); assertTrue(hAdmin.tableExists(table1_restore)); assertTrue(hAdmin.tableExists(table2_restore)); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java index 73512587c40..1bde63ba552 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java @@ -36,9 +36,9 @@ import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; import org.apache.hadoop.hbase.backup.mapreduce.MapReduceBackupMergeJob; import org.apache.hadoop.hbase.backup.mapreduce.MapReduceHFileSplitterJob; import org.apache.hadoop.hbase.backup.util.BackupUtils; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Pair; @@ -235,7 +235,7 @@ public class TestIncrementalBackupMergeWithFailures extends TestBackupBase { Connection conn = ConnectionFactory.createConnection(conf1); - HBaseAdmin admin = (HBaseAdmin) conn.getAdmin(); + Admin admin = conn.getAdmin(); BackupAdminImpl client = new BackupAdminImpl(conn); BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java index 4b020779347..60aa635045a 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java @@ -26,9 +26,9 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; import org.apache.hadoop.hbase.backup.util.BackupUtils; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.testclassification.LargeTests; @@ -70,7 +70,7 @@ public class TestIncrementalBackupWithBulkLoad extends TestBackupBase { List tables = Lists.newArrayList(table1); Connection conn = ConnectionFactory.createConnection(conf1); - HBaseAdmin admin = (HBaseAdmin) conn.getAdmin(); + Admin admin = conn.getAdmin(); BackupAdminImpl client = new BackupAdminImpl(conn); BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); @@ -119,7 +119,7 @@ public class TestIncrementalBackupWithBulkLoad extends TestBackupBase { // Delete all data in table1 TEST_UTIL.deleteTableData(table1); // #5.1 - check tables for full restore */ - HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin(); + Admin hAdmin = TEST_UTIL.getAdmin(); // #6 - restore incremental backup for table1 TableName[] tablesRestoreIncMultiple = new TableName[] { table1 }; diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java index f6725d9e250..546cf414ebb 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java @@ -32,9 +32,9 @@ import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; import org.apache.hadoop.hbase.backup.impl.TableBackupClient; import org.apache.hadoop.hbase.backup.impl.TableBackupClient.Stage; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.testclassification.LargeTests; @@ -90,8 +90,7 @@ public class TestIncrementalBackupWithFailures extends TestBackupBase { int NB_ROWS_FAM3 = 6; insertIntoTable(conn, table1, fam3Name, 3, NB_ROWS_FAM3).close(); - HBaseAdmin admin = null; - admin = (HBaseAdmin) conn.getAdmin(); + Admin admin = conn.getAdmin(); BackupAdminImpl client = new BackupAdminImpl(conn); BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java index 05826e204bb..2d99e0dd86e 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java @@ -26,9 +26,9 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.util.BackupUtils; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils; @@ -126,7 +126,7 @@ public class TestRemoteBackup extends TestBackupBase { tablesRestoreFull, tablesMapFull, false)); // check tables for full restore - HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin(); + Admin hAdmin = TEST_UTIL.getAdmin(); assertTrue(hAdmin.tableExists(table1_restore)); // #5.2 - checking row count of tables for full restore diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java index 25ebca2b756..d6701449fb2 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java @@ -22,7 +22,7 @@ import static org.junit.Assert.assertTrue; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.util.BackupUtils; -import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.junit.ClassRule; import org.junit.Test; @@ -61,7 +61,7 @@ public class TestRemoteRestore extends TestBackupBase { getBackupAdmin().restore( BackupUtils.createRestoreRequest(BACKUP_REMOTE_ROOT_DIR, backupId, false, tableset, tablemap, false)); - HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); + Admin hba = TEST_UTIL.getAdmin(); assertTrue(hba.tableExists(table1_restore)); TEST_UTIL.deleteTable(table1_restore); hba.close(); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java index 07f57cc3237..a6808cd69dc 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java @@ -23,7 +23,7 @@ import java.util.List; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.util.BackupUtils; -import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.junit.ClassRule; import org.junit.Test; @@ -55,7 +55,7 @@ public class TestRestoreBoundaryTests extends TestBackupBase { getBackupAdmin().restore( BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, tableset, tablemap, false)); - HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); + Admin hba = TEST_UTIL.getAdmin(); assertTrue(hba.tableExists(table1_restore)); TEST_UTIL.deleteTable(table1_restore); } @@ -76,7 +76,7 @@ public class TestRestoreBoundaryTests extends TestBackupBase { getBackupAdmin().restore( BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, restore_tableset, tablemap, false)); - HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); + Admin hba = TEST_UTIL.getAdmin(); assertTrue(hba.tableExists(table2_restore)); assertTrue(hba.tableExists(table3_restore)); TEST_UTIL.deleteTable(table2_restore); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestSystemTableSnapshot.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestSystemTableSnapshot.java index b93fa77f578..bd295122a28 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestSystemTableSnapshot.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestSystemTableSnapshot.java @@ -20,7 +20,7 @@ package org.apache.hadoop.hbase.backup; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; -import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.junit.ClassRule; import org.junit.experimental.categories.Category; @@ -47,7 +47,7 @@ public class TestSystemTableSnapshot extends TestBackupBase { TableName backupSystem = BackupSystemTable.getTableName(conf1); - HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); + Admin hba = TEST_UTIL.getAdmin(); String snapshotName = "sysTable"; hba.snapshot(snapshotName, backupSystem); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java index 4a886d13e97..d04ea527f74 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java @@ -47,7 +47,6 @@ import org.apache.hadoop.hbase.client.Scan.ReadType; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -80,23 +79,17 @@ public class AsyncMetaTableAccessor { TableName tableName) { CompletableFuture> future = new CompletableFuture<>(); Get get = new Get(tableName.getName()).addColumn(getTableFamily(), getStateColumn()); - long time = EnvironmentEdgeManager.currentTime(); - try { - get.setTimeRange(0, time); - addListener(metaTable.get(get), (result, error) -> { - if (error != null) { - future.completeExceptionally(error); - return; - } - try { - future.complete(getTableState(result)); - } catch (IOException e) { - future.completeExceptionally(e); - } - }); - } catch (IOException ioe) { - future.completeExceptionally(ioe); - } + addListener(metaTable.get(get), (result, error) -> { + if (error != null) { + future.completeExceptionally(error); + return; + } + try { + future.complete(getTableState(result)); + } catch (IOException e) { + future.completeExceptionally(e); + } + }); return future; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 938b63edcd4..6bf5ae9dc23 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -255,13 +255,14 @@ public interface Admin extends Abortable, Closeable { Future createTableAsync(TableDescriptor desc) throws IOException; /** - * Creates a new table but does not block and wait for it to come online. You can use - * Future.get(long, TimeUnit) to wait on the operation to complete. It may throw - * ExecutionException if there was an error while executing the operation or TimeoutException in - * case the wait timeout was not long enough to allow the operation to complete. - *

- * Throws IllegalArgumentException Bad table name, if the split keys are repeated and if the split - * key has empty byte array. + * Creates a new table but does not block and wait for it to come online. + * You can use Future.get(long, TimeUnit) to wait on the operation to complete. + * It may throw ExecutionException if there was an error while executing the operation + * or TimeoutException in case the wait timeout was not long enough to allow the + * operation to complete. + * Throws IllegalArgumentException Bad table name, if the split keys + * are repeated and if the split key has empty byte array. + * * @param desc table descriptor for table * @param splitKeys keys to check if the table has been created with all split keys * @throws IOException if a remote or network exception occurs @@ -723,7 +724,7 @@ public interface Admin extends Abortable, Closeable { } /** - * Move the region rencodedRegionName to destServerName. + * Move the region encodedRegionName to destServerName. * @param encodedRegionName The encoded region name; i.e. the hash that makes up the region name * suffix: e.g. if regionname is * TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396., @@ -922,15 +923,13 @@ public interface Admin extends Abortable, Closeable { /** * Split a table. The method will execute split action for each region in table. - * Asynchronous operation. * @param tableName table to split * @throws IOException if a remote or network exception occurs */ void split(TableName tableName) throws IOException; /** - * Split a table. Asynchronous operation. - * + * Split a table. * @param tableName table to split * @param splitPoint the explicit position to split on * @throws IOException if a remote or network exception occurs @@ -1065,9 +1064,7 @@ public interface Admin extends Abortable, Closeable { * @return a {@link RegionMetrics} list of all regions hosted on a region server * @throws IOException if a remote or network exception occurs */ - default List getRegionMetrics(ServerName serverName) throws IOException { - return getRegionMetrics(serverName, null); - } + List getRegionMetrics(ServerName serverName) throws IOException; /** * Get {@link RegionMetrics} of all regions hosted on a regionserver for a table. @@ -1660,7 +1657,10 @@ public interface Admin extends Abortable, Closeable { * * * @return A MasterCoprocessorRpcChannel instance + * @deprecated since 3.0.0, will removed in 4.0.0. This is too low level, please stop using it any + * more. Use the coprocessorService methods in {@link AsyncAdmin} instead. */ + @Deprecated CoprocessorRpcChannel coprocessorService(); @@ -1685,7 +1685,10 @@ public interface Admin extends Abortable, Closeable { * * @param serverName the server name to which the endpoint call is made * @return A RegionServerCoprocessorRpcChannel instance + * @deprecated since 3.0.0, will removed in 4.0.0. This is too low level, please stop using it any + * more. Use the coprocessorService methods in {@link AsyncAdmin} instead. */ + @Deprecated CoprocessorRpcChannel coprocessorService(ServerName serverName); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java new file mode 100644 index 00000000000..599e5d69f6e --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java @@ -0,0 +1,945 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.apache.hadoop.hbase.client.ConnectionUtils.setCoprocessorError; +import static org.apache.hadoop.hbase.util.FutureUtils.get; + +import com.google.protobuf.Descriptors.MethodDescriptor; +import com.google.protobuf.Message; +import com.google.protobuf.RpcCallback; +import com.google.protobuf.RpcChannel; +import com.google.protobuf.RpcController; +import com.google.protobuf.ServiceException; +import java.io.IOException; +import java.util.Arrays; +import java.util.EnumSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.Future; +import java.util.regex.Pattern; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.CacheEvictionStats; +import org.apache.hadoop.hbase.ClusterMetrics; +import org.apache.hadoop.hbase.ClusterMetrics.Option; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.NamespaceNotFoundException; +import org.apache.hadoop.hbase.RegionMetrics; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableExistsException; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.client.replication.TableCFs; +import org.apache.hadoop.hbase.client.security.SecurityCapability; +import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; +import org.apache.hadoop.hbase.quotas.QuotaFilter; +import org.apache.hadoop.hbase.quotas.QuotaSettings; +import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotView; +import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException; +import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; +import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; +import org.apache.hadoop.hbase.replication.SyncReplicationState; +import org.apache.hadoop.hbase.security.access.GetUserPermissionsRequest; +import org.apache.hadoop.hbase.security.access.Permission; +import org.apache.hadoop.hbase.security.access.UserPermission; +import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException; +import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; +import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; +import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * The {@link Admin} implementation which is based on an {@link AsyncAdmin}. + */ +@InterfaceAudience.Private +class AdminOverAsyncAdmin implements Admin { + + private static final Logger LOG = LoggerFactory.getLogger(AdminOverAsyncAdmin.class); + + private volatile boolean aborted = false; + + private final Connection conn; + + private final RawAsyncHBaseAdmin admin; + + private final int operationTimeout; + + private final int syncWaitTimeout; + + public AdminOverAsyncAdmin(Connection conn, RawAsyncHBaseAdmin admin) { + this.conn = conn; + this.admin = admin; + this.operationTimeout = conn.getConfiguration().getInt( + HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); + this.syncWaitTimeout = + conn.getConfiguration().getInt("hbase.client.sync.wait.timeout.msec", 10 * 60000); // 10min + } + + @Override + public int getOperationTimeout() { + return operationTimeout; + } + + @Override + public int getSyncWaitTimeout() { + return syncWaitTimeout; + } + + @Override + public void abort(String why, Throwable e) { + LOG.warn("Aborting becasue of {}", why, e); + this.aborted = true; + } + + @Override + public boolean isAborted() { + return aborted; + } + + @Override + public Connection getConnection() { + return conn; + } + + @Override + public boolean tableExists(TableName tableName) throws IOException { + return get(admin.tableExists(tableName)); + } + + @Override + public List listTableDescriptors() throws IOException { + return get(admin.listTableDescriptors()); + } + + @Override + public List listTableDescriptors(Pattern pattern, boolean includeSysTables) + throws IOException { + return get(admin.listTableDescriptors(pattern, includeSysTables)); + } + + @Override + public TableName[] listTableNames() throws IOException { + return get(admin.listTableNames()).toArray(new TableName[0]); + } + + @Override + public TableName[] listTableNames(Pattern pattern, boolean includeSysTables) throws IOException { + return get(admin.listTableNames(pattern, includeSysTables)).toArray(new TableName[0]); + } + + @Override + public TableDescriptor getDescriptor(TableName tableName) + throws TableNotFoundException, IOException { + return get(admin.getDescriptor(tableName)); + } + + @Override + public void createTable(TableDescriptor desc, byte[] startKey, byte[] endKey, int numRegions) + throws IOException { + get(admin.createTable(desc, startKey, endKey, numRegions)); + } + + @Override + public Future createTableAsync(TableDescriptor desc) throws IOException { + return admin.createTable(desc); + } + + @Override + public Future createTableAsync(TableDescriptor desc, byte[][] splitKeys) + throws IOException { + return admin.createTable(desc, splitKeys); + } + + @Override + public Future deleteTableAsync(TableName tableName) throws IOException { + return admin.deleteTable(tableName); + } + + @Override + public Future truncateTableAsync(TableName tableName, boolean preserveSplits) + throws IOException { + return admin.truncateTable(tableName, preserveSplits); + } + + @Override + public Future enableTableAsync(TableName tableName) throws IOException { + return admin.enableTable(tableName); + } + + @Override + public Future disableTableAsync(TableName tableName) throws IOException { + return admin.disableTable(tableName); + } + + @Override + public boolean isTableEnabled(TableName tableName) throws IOException { + return get(admin.isTableEnabled(tableName)); + } + + @Override + public boolean isTableDisabled(TableName tableName) throws IOException { + return get(admin.isTableDisabled(tableName)); + } + + @Override + public boolean isTableAvailable(TableName tableName) throws IOException { + return get(admin.isTableAvailable(tableName)); + } + + @Override + public Future addColumnFamilyAsync(TableName tableName, ColumnFamilyDescriptor columnFamily) + throws IOException { + return admin.addColumnFamily(tableName, columnFamily); + } + + @Override + public Future deleteColumnFamilyAsync(TableName tableName, byte[] columnFamily) + throws IOException { + return admin.deleteColumnFamily(tableName, columnFamily); + } + + @Override + public Future modifyColumnFamilyAsync(TableName tableName, + ColumnFamilyDescriptor columnFamily) throws IOException { + return admin.modifyColumnFamily(tableName, columnFamily); + } + + @Override + public List getRegions(ServerName serverName) throws IOException { + return get(admin.getRegions(serverName)); + } + + @Override + public void flush(TableName tableName) throws IOException { + get(admin.flush(tableName)); + } + + @Override + public void flushRegion(byte[] regionName) throws IOException { + get(admin.flushRegion(regionName)); + } + + @Override + public void flushRegionServer(ServerName serverName) throws IOException { + get(admin.flushRegionServer(serverName)); + } + + @Override + public void compact(TableName tableName) throws IOException { + get(admin.compact(tableName)); + } + + @Override + public void compactRegion(byte[] regionName) throws IOException { + get(admin.compactRegion(regionName)); + } + + @Override + public void compact(TableName tableName, byte[] columnFamily) throws IOException { + get(admin.compact(tableName, columnFamily)); + } + + @Override + public void compactRegion(byte[] regionName, byte[] columnFamily) throws IOException { + get(admin.compactRegion(regionName, columnFamily)); + } + + @Override + public void compact(TableName tableName, CompactType compactType) + throws IOException, InterruptedException { + get(admin.compact(tableName, compactType)); + } + + @Override + public void compact(TableName tableName, byte[] columnFamily, CompactType compactType) + throws IOException, InterruptedException { + get(admin.compact(tableName, columnFamily, compactType)); + } + + @Override + public void majorCompact(TableName tableName) throws IOException { + get(admin.majorCompact(tableName)); + } + + @Override + public void majorCompactRegion(byte[] regionName) throws IOException { + get(admin.majorCompactRegion(regionName)); + } + + @Override + public void majorCompact(TableName tableName, byte[] columnFamily) throws IOException { + get(admin.majorCompact(tableName, columnFamily)); + } + + @Override + public void majorCompactRegion(byte[] regionName, byte[] columnFamily) throws IOException { + get(admin.majorCompactRegion(regionName, columnFamily)); + } + + @Override + public void majorCompact(TableName tableName, CompactType compactType) + throws IOException, InterruptedException { + get(admin.majorCompact(tableName, compactType)); + } + + @Override + public void majorCompact(TableName tableName, byte[] columnFamily, CompactType compactType) + throws IOException, InterruptedException { + get(admin.majorCompact(tableName, columnFamily, compactType)); + } + + @Override + public Map compactionSwitch(boolean switchState, + List serverNamesList) throws IOException { + return get(admin.compactionSwitch(switchState, serverNamesList)); + } + + @Override + public void compactRegionServer(ServerName serverName) throws IOException { + get(admin.compactRegionServer(serverName)); + } + + @Override + public void majorCompactRegionServer(ServerName serverName) throws IOException { + get(admin.majorCompactRegionServer(serverName)); + } + + @Override + public void move(byte[] encodedRegionName) throws IOException { + get(admin.move(encodedRegionName)); + } + + @Override + public void move(byte[] encodedRegionName, ServerName destServerName) throws IOException { + get(admin.move(encodedRegionName, destServerName)); + } + + @Override + public void assign(byte[] regionName) throws IOException { + get(admin.assign(regionName)); + } + + @Override + public void unassign(byte[] regionName, boolean force) throws IOException { + get(admin.unassign(regionName, force)); + } + + @Override + public void offline(byte[] regionName) throws IOException { + get(admin.offline(regionName)); + } + + @Override + public boolean balancerSwitch(boolean onOrOff, boolean synchronous) throws IOException { + return get(admin.balancerSwitch(onOrOff, synchronous)); + } + + @Override + public boolean balance() throws IOException { + return get(admin.balance()); + } + + @Override + public boolean balance(boolean force) throws IOException { + return get(admin.balance(force)); + } + + @Override + public boolean isBalancerEnabled() throws IOException { + return get(admin.isBalancerEnabled()); + } + + @Override + public CacheEvictionStats clearBlockCache(TableName tableName) throws IOException { + return get(admin.clearBlockCache(tableName)); + } + + @Override + public boolean normalize() throws IOException { + return get(admin.normalize()); + } + + @Override + public boolean isNormalizerEnabled() throws IOException { + return get(admin.isNormalizerEnabled()); + } + + @Override + public boolean normalizerSwitch(boolean on) throws IOException { + return get(admin.normalizerSwitch(on)); + } + + @Override + public boolean catalogJanitorSwitch(boolean onOrOff) throws IOException { + return get(admin.catalogJanitorSwitch(onOrOff)); + } + + @Override + public int runCatalogJanitor() throws IOException { + return get(admin.runCatalogJanitor()); + } + + @Override + public boolean isCatalogJanitorEnabled() throws IOException { + return get(admin.isCatalogJanitorEnabled()); + } + + @Override + public boolean cleanerChoreSwitch(boolean onOrOff) throws IOException { + return get(admin.cleanerChoreSwitch(onOrOff)); + } + + @Override + public boolean runCleanerChore() throws IOException { + return get(admin.runCleanerChore()); + } + + @Override + public boolean isCleanerChoreEnabled() throws IOException { + return get(admin.isCleanerChoreEnabled()); + } + + @Override + public Future mergeRegionsAsync(byte[][] nameOfRegionsToMerge, boolean forcible) + throws IOException { + return admin.mergeRegions(Arrays.asList(nameOfRegionsToMerge), forcible); + } + + @Override + public void split(TableName tableName) throws IOException { + get(admin.split(tableName)); + } + + @Override + public void split(TableName tableName, byte[] splitPoint) throws IOException { + get(admin.split(tableName, splitPoint)); + } + + @Override + public Future splitRegionAsync(byte[] regionName) throws IOException { + return admin.splitRegion(regionName); + } + + @Override + public Future splitRegionAsync(byte[] regionName, byte[] splitPoint) throws IOException { + return admin.splitRegion(regionName, splitPoint); + } + + @Override + public Future modifyTableAsync(TableDescriptor td) throws IOException { + return admin.modifyTable(td); + } + + @Override + public void shutdown() throws IOException { + get(admin.shutdown()); + } + + @Override + public void stopMaster() throws IOException { + get(admin.stopMaster()); + } + + @Override + public boolean isMasterInMaintenanceMode() throws IOException { + return get(admin.isMasterInMaintenanceMode()); + } + + @Override + public void stopRegionServer(String hostnamePort) throws IOException { + get(admin.stopRegionServer(ServerName.valueOf(hostnamePort, 0))); + } + + @Override + public ClusterMetrics getClusterMetrics(EnumSet