HBASE-21718 Implement Admin based on AsyncAdmin
This commit is contained in:
parent
6b87a4ce98
commit
d64d015f51
|
@ -489,7 +489,7 @@ public class RestoreTool {
|
||||||
LOG.info("Creating target table '" + targetTableName + "'");
|
LOG.info("Creating target table '" + targetTableName + "'");
|
||||||
byte[][] keys;
|
byte[][] keys;
|
||||||
if (regionDirList == null || regionDirList.size() == 0) {
|
if (regionDirList == null || regionDirList.size() == 0) {
|
||||||
admin.createTable(htd, null);
|
admin.createTable(htd);
|
||||||
} else {
|
} else {
|
||||||
keys = generateBoundaryKeys(regionDirList);
|
keys = generateBoundaryKeys(regionDirList);
|
||||||
// create table using table descriptor and region boundaries
|
// create table using table descriptor and region boundaries
|
||||||
|
|
|
@ -26,7 +26,6 @@ import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Map.Entry;
|
import java.util.Map.Entry;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.LocatedFileStatus;
|
import org.apache.hadoop.fs.LocatedFileStatus;
|
||||||
|
@ -53,7 +52,6 @@ import org.apache.hadoop.hbase.client.Admin;
|
||||||
import org.apache.hadoop.hbase.client.Connection;
|
import org.apache.hadoop.hbase.client.Connection;
|
||||||
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
||||||
import org.apache.hadoop.hbase.client.Durability;
|
import org.apache.hadoop.hbase.client.Durability;
|
||||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
|
||||||
import org.apache.hadoop.hbase.client.Put;
|
import org.apache.hadoop.hbase.client.Put;
|
||||||
import org.apache.hadoop.hbase.client.Table;
|
import org.apache.hadoop.hbase.client.Table;
|
||||||
import org.apache.hadoop.hbase.master.cleaner.LogCleaner;
|
import org.apache.hadoop.hbase.master.cleaner.LogCleaner;
|
||||||
|
@ -342,7 +340,7 @@ public class TestBackupBase {
|
||||||
@AfterClass
|
@AfterClass
|
||||||
public static void tearDown() throws Exception {
|
public static void tearDown() throws Exception {
|
||||||
try{
|
try{
|
||||||
SnapshotTestingUtils.deleteAllSnapshots(TEST_UTIL.getHBaseAdmin());
|
SnapshotTestingUtils.deleteAllSnapshots(TEST_UTIL.getAdmin());
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
}
|
}
|
||||||
SnapshotTestingUtils.deleteArchiveDirectory(TEST_UTIL);
|
SnapshotTestingUtils.deleteArchiveDirectory(TEST_UTIL);
|
||||||
|
@ -416,7 +414,7 @@ public class TestBackupBase {
|
||||||
protected static void createTables() throws Exception {
|
protected static void createTables() throws Exception {
|
||||||
long tid = System.currentTimeMillis();
|
long tid = System.currentTimeMillis();
|
||||||
table1 = TableName.valueOf("test-" + tid);
|
table1 = TableName.valueOf("test-" + tid);
|
||||||
HBaseAdmin ha = TEST_UTIL.getHBaseAdmin();
|
Admin ha = TEST_UTIL.getAdmin();
|
||||||
|
|
||||||
// Create namespaces
|
// Create namespaces
|
||||||
NamespaceDescriptor desc1 = NamespaceDescriptor.create("ns1").build();
|
NamespaceDescriptor desc1 = NamespaceDescriptor.create("ns1").build();
|
||||||
|
|
|
@ -24,8 +24,8 @@ import java.util.List;
|
||||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.backup.util.BackupUtils;
|
import org.apache.hadoop.hbase.backup.util.BackupUtils;
|
||||||
|
import org.apache.hadoop.hbase.client.Admin;
|
||||||
import org.apache.hadoop.hbase.client.Delete;
|
import org.apache.hadoop.hbase.client.Delete;
|
||||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
|
||||||
import org.apache.hadoop.hbase.client.Table;
|
import org.apache.hadoop.hbase.client.Table;
|
||||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
@ -61,7 +61,7 @@ public class TestBackupDeleteRestore extends TestBackupBase {
|
||||||
assertTrue(checkSucceeded(backupId));
|
assertTrue(checkSucceeded(backupId));
|
||||||
LOG.info("backup complete");
|
LOG.info("backup complete");
|
||||||
int numRows = TEST_UTIL.countRows(table1);
|
int numRows = TEST_UTIL.countRows(table1);
|
||||||
HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
|
Admin hba = TEST_UTIL.getAdmin();
|
||||||
// delete row
|
// delete row
|
||||||
try (Table table = TEST_UTIL.getConnection().getTable(table1)) {
|
try (Table table = TEST_UTIL.getConnection().getTable(table1)) {
|
||||||
Delete delete = new Delete(Bytes.toBytes("row0"));
|
Delete delete = new Delete(Bytes.toBytes("row0"));
|
||||||
|
|
|
@ -24,9 +24,9 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
|
import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
|
||||||
import org.apache.hadoop.hbase.backup.util.BackupUtils;
|
import org.apache.hadoop.hbase.backup.util.BackupUtils;
|
||||||
|
import org.apache.hadoop.hbase.client.Admin;
|
||||||
import org.apache.hadoop.hbase.client.Connection;
|
import org.apache.hadoop.hbase.client.Connection;
|
||||||
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
||||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
|
||||||
import org.apache.hadoop.hbase.client.Table;
|
import org.apache.hadoop.hbase.client.Table;
|
||||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
|
@ -62,7 +62,7 @@ public class TestBackupMerge extends TestBackupBase {
|
||||||
|
|
||||||
Connection conn = ConnectionFactory.createConnection(conf1);
|
Connection conn = ConnectionFactory.createConnection(conf1);
|
||||||
|
|
||||||
HBaseAdmin admin = (HBaseAdmin) conn.getAdmin();
|
Admin admin = conn.getAdmin();
|
||||||
BackupAdminImpl client = new BackupAdminImpl(conn);
|
BackupAdminImpl client = new BackupAdminImpl(conn);
|
||||||
|
|
||||||
BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
|
BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
|
||||||
|
|
|
@ -26,9 +26,9 @@ import java.util.Set;
|
||||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
|
import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
|
||||||
|
import org.apache.hadoop.hbase.client.Admin;
|
||||||
import org.apache.hadoop.hbase.client.Connection;
|
import org.apache.hadoop.hbase.client.Connection;
|
||||||
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
||||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
|
||||||
import org.apache.hadoop.hbase.client.Put;
|
import org.apache.hadoop.hbase.client.Put;
|
||||||
import org.apache.hadoop.hbase.client.Table;
|
import org.apache.hadoop.hbase.client.Table;
|
||||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||||
|
@ -59,9 +59,8 @@ public class TestBackupMultipleDeletes extends TestBackupBase {
|
||||||
// #1 - create full backup for all tables
|
// #1 - create full backup for all tables
|
||||||
LOG.info("create full backup image for all tables");
|
LOG.info("create full backup image for all tables");
|
||||||
List<TableName> tables = Lists.newArrayList(table1, table2);
|
List<TableName> tables = Lists.newArrayList(table1, table2);
|
||||||
HBaseAdmin admin = null;
|
|
||||||
Connection conn = ConnectionFactory.createConnection(conf1);
|
Connection conn = ConnectionFactory.createConnection(conf1);
|
||||||
admin = (HBaseAdmin) conn.getAdmin();
|
Admin admin = conn.getAdmin();
|
||||||
BackupAdmin client = new BackupAdminImpl(conn);
|
BackupAdmin client = new BackupAdminImpl(conn);
|
||||||
BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
|
BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
|
||||||
String backupIdFull = client.backupTables(request);
|
String backupIdFull = client.backupTables(request);
|
||||||
|
|
|
@ -119,7 +119,7 @@ public class TestBackupSystemTable {
|
||||||
}
|
}
|
||||||
|
|
||||||
private void cleanBackupTable() throws IOException {
|
private void cleanBackupTable() throws IOException {
|
||||||
Admin admin = UTIL.getHBaseAdmin();
|
Admin admin = UTIL.getAdmin();
|
||||||
admin.disableTable(BackupSystemTable.getTableName(conf));
|
admin.disableTable(BackupSystemTable.getTableName(conf));
|
||||||
admin.truncateTable(BackupSystemTable.getTableName(conf), true);
|
admin.truncateTable(BackupSystemTable.getTableName(conf), true);
|
||||||
if (admin.isTableDisabled(BackupSystemTable.getTableName(conf))) {
|
if (admin.isTableDisabled(BackupSystemTable.getTableName(conf))) {
|
||||||
|
|
|
@ -25,7 +25,7 @@ import java.util.List;
|
||||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
|
import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
|
||||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
import org.apache.hadoop.hbase.client.Admin;
|
||||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||||
import org.apache.hadoop.util.ToolRunner;
|
import org.apache.hadoop.util.ToolRunner;
|
||||||
import org.junit.ClassRule;
|
import org.junit.ClassRule;
|
||||||
|
@ -80,7 +80,7 @@ public class TestFullBackupSet extends TestBackupBase {
|
||||||
// Run backup
|
// Run backup
|
||||||
ret = ToolRunner.run(conf1, new RestoreDriver(), args);
|
ret = ToolRunner.run(conf1, new RestoreDriver(), args);
|
||||||
assertTrue(ret == 0);
|
assertTrue(ret == 0);
|
||||||
HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
|
Admin hba = TEST_UTIL.getAdmin();
|
||||||
assertTrue(hba.tableExists(table1_restore));
|
assertTrue(hba.tableExists(table1_restore));
|
||||||
// Verify number of rows in both tables
|
// Verify number of rows in both tables
|
||||||
assertEquals(TEST_UTIL.countRows(table1), TEST_UTIL.countRows(table1_restore));
|
assertEquals(TEST_UTIL.countRows(table1), TEST_UTIL.countRows(table1_restore));
|
||||||
|
|
|
@ -25,7 +25,7 @@ import java.util.List;
|
||||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
|
import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
|
||||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
import org.apache.hadoop.hbase.client.Admin;
|
||||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||||
import org.apache.hadoop.util.ToolRunner;
|
import org.apache.hadoop.util.ToolRunner;
|
||||||
import org.junit.ClassRule;
|
import org.junit.ClassRule;
|
||||||
|
@ -76,7 +76,7 @@ public class TestFullBackupSetRestoreSet extends TestBackupBase {
|
||||||
// Run backup
|
// Run backup
|
||||||
ret = ToolRunner.run(conf1, new RestoreDriver(), args);
|
ret = ToolRunner.run(conf1, new RestoreDriver(), args);
|
||||||
assertTrue(ret == 0);
|
assertTrue(ret == 0);
|
||||||
HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
|
Admin hba = TEST_UTIL.getAdmin();
|
||||||
assertTrue(hba.tableExists(table1_restore));
|
assertTrue(hba.tableExists(table1_restore));
|
||||||
// Verify number of rows in both tables
|
// Verify number of rows in both tables
|
||||||
assertEquals(TEST_UTIL.countRows(table1), TEST_UTIL.countRows(table1_restore));
|
assertEquals(TEST_UTIL.countRows(table1), TEST_UTIL.countRows(table1_restore));
|
||||||
|
@ -118,7 +118,7 @@ public class TestFullBackupSetRestoreSet extends TestBackupBase {
|
||||||
// Run backup
|
// Run backup
|
||||||
ret = ToolRunner.run(conf1, new RestoreDriver(), args);
|
ret = ToolRunner.run(conf1, new RestoreDriver(), args);
|
||||||
assertTrue(ret == 0);
|
assertTrue(ret == 0);
|
||||||
HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
|
Admin hba = TEST_UTIL.getAdmin();
|
||||||
assertTrue(hba.tableExists(table1));
|
assertTrue(hba.tableExists(table1));
|
||||||
// Verify number of rows in both tables
|
// Verify number of rows in both tables
|
||||||
assertEquals(count, TEST_UTIL.countRows(table1));
|
assertEquals(count, TEST_UTIL.countRows(table1));
|
||||||
|
|
|
@ -26,7 +26,7 @@ import org.apache.commons.lang3.StringUtils;
|
||||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.backup.util.BackupUtils;
|
import org.apache.hadoop.hbase.backup.util.BackupUtils;
|
||||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
import org.apache.hadoop.hbase.client.Admin;
|
||||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||||
import org.apache.hadoop.util.ToolRunner;
|
import org.apache.hadoop.util.ToolRunner;
|
||||||
import org.junit.ClassRule;
|
import org.junit.ClassRule;
|
||||||
|
@ -66,7 +66,7 @@ public class TestFullRestore extends TestBackupBase {
|
||||||
BackupAdmin client = getBackupAdmin();
|
BackupAdmin client = getBackupAdmin();
|
||||||
client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false,
|
client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false,
|
||||||
tableset, tablemap, false));
|
tableset, tablemap, false));
|
||||||
HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
|
Admin hba = TEST_UTIL.getAdmin();
|
||||||
assertTrue(hba.tableExists(table1_restore));
|
assertTrue(hba.tableExists(table1_restore));
|
||||||
TEST_UTIL.deleteTable(table1_restore);
|
TEST_UTIL.deleteTable(table1_restore);
|
||||||
hba.close();
|
hba.close();
|
||||||
|
@ -88,7 +88,7 @@ public class TestFullRestore extends TestBackupBase {
|
||||||
int ret = ToolRunner.run(conf1, new RestoreDriver(), args);
|
int ret = ToolRunner.run(conf1, new RestoreDriver(), args);
|
||||||
|
|
||||||
assertTrue(ret == 0);
|
assertTrue(ret == 0);
|
||||||
HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
|
Admin hba = TEST_UTIL.getAdmin();
|
||||||
assertTrue(hba.tableExists(table1_restore));
|
assertTrue(hba.tableExists(table1_restore));
|
||||||
TEST_UTIL.deleteTable(table1_restore);
|
TEST_UTIL.deleteTable(table1_restore);
|
||||||
hba.close();
|
hba.close();
|
||||||
|
@ -110,7 +110,7 @@ public class TestFullRestore extends TestBackupBase {
|
||||||
int ret = ToolRunner.run(conf1, new RestoreDriver(), args);
|
int ret = ToolRunner.run(conf1, new RestoreDriver(), args);
|
||||||
assertTrue(ret == 0);
|
assertTrue(ret == 0);
|
||||||
//Verify that table has not been restored
|
//Verify that table has not been restored
|
||||||
HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
|
Admin hba = TEST_UTIL.getAdmin();
|
||||||
assertFalse(hba.tableExists(table1_restore));
|
assertFalse(hba.tableExists(table1_restore));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -131,7 +131,7 @@ public class TestFullRestore extends TestBackupBase {
|
||||||
BackupAdmin client = getBackupAdmin();
|
BackupAdmin client = getBackupAdmin();
|
||||||
client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false,
|
client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false,
|
||||||
restore_tableset, tablemap, false));
|
restore_tableset, tablemap, false));
|
||||||
HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
|
Admin hba = TEST_UTIL.getAdmin();
|
||||||
assertTrue(hba.tableExists(table2_restore));
|
assertTrue(hba.tableExists(table2_restore));
|
||||||
assertTrue(hba.tableExists(table3_restore));
|
assertTrue(hba.tableExists(table3_restore));
|
||||||
TEST_UTIL.deleteTable(table2_restore);
|
TEST_UTIL.deleteTable(table2_restore);
|
||||||
|
@ -162,7 +162,7 @@ public class TestFullRestore extends TestBackupBase {
|
||||||
int ret = ToolRunner.run(conf1, new RestoreDriver(), args);
|
int ret = ToolRunner.run(conf1, new RestoreDriver(), args);
|
||||||
|
|
||||||
assertTrue(ret == 0);
|
assertTrue(ret == 0);
|
||||||
HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
|
Admin hba = TEST_UTIL.getAdmin();
|
||||||
assertTrue(hba.tableExists(table2_restore));
|
assertTrue(hba.tableExists(table2_restore));
|
||||||
assertTrue(hba.tableExists(table3_restore));
|
assertTrue(hba.tableExists(table3_restore));
|
||||||
TEST_UTIL.deleteTable(table2_restore);
|
TEST_UTIL.deleteTable(table2_restore);
|
||||||
|
@ -210,7 +210,7 @@ public class TestFullRestore extends TestBackupBase {
|
||||||
int ret = ToolRunner.run(conf1, new RestoreDriver(), args);
|
int ret = ToolRunner.run(conf1, new RestoreDriver(), args);
|
||||||
assertTrue(ret == 0);
|
assertTrue(ret == 0);
|
||||||
|
|
||||||
HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
|
Admin hba = TEST_UTIL.getAdmin();
|
||||||
assertTrue(hba.tableExists(table1));
|
assertTrue(hba.tableExists(table1));
|
||||||
hba.close();
|
hba.close();
|
||||||
}
|
}
|
||||||
|
@ -256,7 +256,7 @@ public class TestFullRestore extends TestBackupBase {
|
||||||
int ret = ToolRunner.run(conf1, new RestoreDriver(), args);
|
int ret = ToolRunner.run(conf1, new RestoreDriver(), args);
|
||||||
|
|
||||||
assertTrue(ret == 0);
|
assertTrue(ret == 0);
|
||||||
HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
|
Admin hba = TEST_UTIL.getAdmin();
|
||||||
assertTrue(hba.tableExists(table2));
|
assertTrue(hba.tableExists(table2));
|
||||||
assertTrue(hba.tableExists(table3));
|
assertTrue(hba.tableExists(table3));
|
||||||
hba.close();
|
hba.close();
|
||||||
|
|
|
@ -29,9 +29,9 @@ import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
|
import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
|
||||||
import org.apache.hadoop.hbase.backup.util.BackupUtils;
|
import org.apache.hadoop.hbase.backup.util.BackupUtils;
|
||||||
|
import org.apache.hadoop.hbase.client.Admin;
|
||||||
import org.apache.hadoop.hbase.client.Connection;
|
import org.apache.hadoop.hbase.client.Connection;
|
||||||
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
||||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
|
||||||
import org.apache.hadoop.hbase.client.Put;
|
import org.apache.hadoop.hbase.client.Put;
|
||||||
import org.apache.hadoop.hbase.client.Table;
|
import org.apache.hadoop.hbase.client.Table;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||||
|
@ -93,8 +93,7 @@ public class TestIncrementalBackup extends TestBackupBase {
|
||||||
int NB_ROWS_FAM3 = 6;
|
int NB_ROWS_FAM3 = 6;
|
||||||
insertIntoTable(conn, table1, fam3Name, 3, NB_ROWS_FAM3).close();
|
insertIntoTable(conn, table1, fam3Name, 3, NB_ROWS_FAM3).close();
|
||||||
insertIntoTable(conn, table1, mobName, 3, NB_ROWS_FAM3).close();
|
insertIntoTable(conn, table1, mobName, 3, NB_ROWS_FAM3).close();
|
||||||
HBaseAdmin admin = null;
|
Admin admin = conn.getAdmin();
|
||||||
admin = (HBaseAdmin) conn.getAdmin();
|
|
||||||
BackupAdminImpl client = new BackupAdminImpl(conn);
|
BackupAdminImpl client = new BackupAdminImpl(conn);
|
||||||
BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
|
BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
|
||||||
String backupIdFull = client.backupTables(request);
|
String backupIdFull = client.backupTables(request);
|
||||||
|
@ -182,7 +181,7 @@ public class TestIncrementalBackup extends TestBackupBase {
|
||||||
tablesRestoreFull, tablesMapFull, true));
|
tablesRestoreFull, tablesMapFull, true));
|
||||||
|
|
||||||
// #6.1 - check tables for full restore
|
// #6.1 - check tables for full restore
|
||||||
HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin();
|
Admin hAdmin = TEST_UTIL.getAdmin();
|
||||||
assertTrue(hAdmin.tableExists(table1_restore));
|
assertTrue(hAdmin.tableExists(table1_restore));
|
||||||
assertTrue(hAdmin.tableExists(table2_restore));
|
assertTrue(hAdmin.tableExists(table2_restore));
|
||||||
hAdmin.close();
|
hAdmin.close();
|
||||||
|
|
|
@ -24,9 +24,9 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
|
import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
|
||||||
import org.apache.hadoop.hbase.backup.util.BackupUtils;
|
import org.apache.hadoop.hbase.backup.util.BackupUtils;
|
||||||
|
import org.apache.hadoop.hbase.client.Admin;
|
||||||
import org.apache.hadoop.hbase.client.Connection;
|
import org.apache.hadoop.hbase.client.Connection;
|
||||||
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
||||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
|
||||||
import org.apache.hadoop.hbase.client.Put;
|
import org.apache.hadoop.hbase.client.Put;
|
||||||
import org.apache.hadoop.hbase.client.Table;
|
import org.apache.hadoop.hbase.client.Table;
|
||||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||||
|
@ -64,9 +64,8 @@ public class TestIncrementalBackupDeleteTable extends TestBackupBase {
|
||||||
LOG.info("create full backup image for all tables");
|
LOG.info("create full backup image for all tables");
|
||||||
|
|
||||||
List<TableName> tables = Lists.newArrayList(table1, table2);
|
List<TableName> tables = Lists.newArrayList(table1, table2);
|
||||||
HBaseAdmin admin = null;
|
|
||||||
Connection conn = ConnectionFactory.createConnection(conf1);
|
Connection conn = ConnectionFactory.createConnection(conf1);
|
||||||
admin = (HBaseAdmin) conn.getAdmin();
|
Admin admin = conn.getAdmin();
|
||||||
BackupAdminImpl client = new BackupAdminImpl(conn);
|
BackupAdminImpl client = new BackupAdminImpl(conn);
|
||||||
|
|
||||||
BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
|
BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
|
||||||
|
@ -105,7 +104,7 @@ public class TestIncrementalBackupDeleteTable extends TestBackupBase {
|
||||||
tablesRestoreFull, tablesMapFull, false));
|
tablesRestoreFull, tablesMapFull, false));
|
||||||
|
|
||||||
// #5.1 - check tables for full restore
|
// #5.1 - check tables for full restore
|
||||||
HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin();
|
Admin hAdmin = TEST_UTIL.getAdmin();
|
||||||
assertTrue(hAdmin.tableExists(table1_restore));
|
assertTrue(hAdmin.tableExists(table1_restore));
|
||||||
assertTrue(hAdmin.tableExists(table2_restore));
|
assertTrue(hAdmin.tableExists(table2_restore));
|
||||||
|
|
||||||
|
|
|
@ -36,9 +36,9 @@ import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
|
||||||
import org.apache.hadoop.hbase.backup.mapreduce.MapReduceBackupMergeJob;
|
import org.apache.hadoop.hbase.backup.mapreduce.MapReduceBackupMergeJob;
|
||||||
import org.apache.hadoop.hbase.backup.mapreduce.MapReduceHFileSplitterJob;
|
import org.apache.hadoop.hbase.backup.mapreduce.MapReduceHFileSplitterJob;
|
||||||
import org.apache.hadoop.hbase.backup.util.BackupUtils;
|
import org.apache.hadoop.hbase.backup.util.BackupUtils;
|
||||||
|
import org.apache.hadoop.hbase.client.Admin;
|
||||||
import org.apache.hadoop.hbase.client.Connection;
|
import org.apache.hadoop.hbase.client.Connection;
|
||||||
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
||||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
|
||||||
import org.apache.hadoop.hbase.client.Table;
|
import org.apache.hadoop.hbase.client.Table;
|
||||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||||
import org.apache.hadoop.hbase.util.Pair;
|
import org.apache.hadoop.hbase.util.Pair;
|
||||||
|
@ -235,7 +235,7 @@ public class TestIncrementalBackupMergeWithFailures extends TestBackupBase {
|
||||||
|
|
||||||
Connection conn = ConnectionFactory.createConnection(conf1);
|
Connection conn = ConnectionFactory.createConnection(conf1);
|
||||||
|
|
||||||
HBaseAdmin admin = (HBaseAdmin) conn.getAdmin();
|
Admin admin = conn.getAdmin();
|
||||||
BackupAdminImpl client = new BackupAdminImpl(conn);
|
BackupAdminImpl client = new BackupAdminImpl(conn);
|
||||||
|
|
||||||
BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
|
BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
|
||||||
|
|
|
@ -26,9 +26,9 @@ import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
|
import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
|
||||||
import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
|
import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
|
||||||
import org.apache.hadoop.hbase.backup.util.BackupUtils;
|
import org.apache.hadoop.hbase.backup.util.BackupUtils;
|
||||||
|
import org.apache.hadoop.hbase.client.Admin;
|
||||||
import org.apache.hadoop.hbase.client.Connection;
|
import org.apache.hadoop.hbase.client.Connection;
|
||||||
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
||||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
|
||||||
import org.apache.hadoop.hbase.client.Put;
|
import org.apache.hadoop.hbase.client.Put;
|
||||||
import org.apache.hadoop.hbase.client.Table;
|
import org.apache.hadoop.hbase.client.Table;
|
||||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||||
|
@ -70,7 +70,7 @@ public class TestIncrementalBackupWithBulkLoad extends TestBackupBase {
|
||||||
|
|
||||||
List<TableName> tables = Lists.newArrayList(table1);
|
List<TableName> tables = Lists.newArrayList(table1);
|
||||||
Connection conn = ConnectionFactory.createConnection(conf1);
|
Connection conn = ConnectionFactory.createConnection(conf1);
|
||||||
HBaseAdmin admin = (HBaseAdmin) conn.getAdmin();
|
Admin admin = conn.getAdmin();
|
||||||
BackupAdminImpl client = new BackupAdminImpl(conn);
|
BackupAdminImpl client = new BackupAdminImpl(conn);
|
||||||
|
|
||||||
BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
|
BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
|
||||||
|
@ -119,7 +119,7 @@ public class TestIncrementalBackupWithBulkLoad extends TestBackupBase {
|
||||||
// Delete all data in table1
|
// Delete all data in table1
|
||||||
TEST_UTIL.deleteTableData(table1);
|
TEST_UTIL.deleteTableData(table1);
|
||||||
// #5.1 - check tables for full restore */
|
// #5.1 - check tables for full restore */
|
||||||
HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin();
|
Admin hAdmin = TEST_UTIL.getAdmin();
|
||||||
|
|
||||||
// #6 - restore incremental backup for table1
|
// #6 - restore incremental backup for table1
|
||||||
TableName[] tablesRestoreIncMultiple = new TableName[] { table1 };
|
TableName[] tablesRestoreIncMultiple = new TableName[] { table1 };
|
||||||
|
|
|
@ -32,9 +32,9 @@ import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
|
||||||
import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
|
import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
|
||||||
import org.apache.hadoop.hbase.backup.impl.TableBackupClient;
|
import org.apache.hadoop.hbase.backup.impl.TableBackupClient;
|
||||||
import org.apache.hadoop.hbase.backup.impl.TableBackupClient.Stage;
|
import org.apache.hadoop.hbase.backup.impl.TableBackupClient.Stage;
|
||||||
|
import org.apache.hadoop.hbase.client.Admin;
|
||||||
import org.apache.hadoop.hbase.client.Connection;
|
import org.apache.hadoop.hbase.client.Connection;
|
||||||
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
||||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
|
||||||
import org.apache.hadoop.hbase.client.Put;
|
import org.apache.hadoop.hbase.client.Put;
|
||||||
import org.apache.hadoop.hbase.client.Table;
|
import org.apache.hadoop.hbase.client.Table;
|
||||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||||
|
@ -90,8 +90,7 @@ public class TestIncrementalBackupWithFailures extends TestBackupBase {
|
||||||
int NB_ROWS_FAM3 = 6;
|
int NB_ROWS_FAM3 = 6;
|
||||||
insertIntoTable(conn, table1, fam3Name, 3, NB_ROWS_FAM3).close();
|
insertIntoTable(conn, table1, fam3Name, 3, NB_ROWS_FAM3).close();
|
||||||
|
|
||||||
HBaseAdmin admin = null;
|
Admin admin = conn.getAdmin();
|
||||||
admin = (HBaseAdmin) conn.getAdmin();
|
|
||||||
BackupAdminImpl client = new BackupAdminImpl(conn);
|
BackupAdminImpl client = new BackupAdminImpl(conn);
|
||||||
|
|
||||||
BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
|
BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
|
||||||
|
|
|
@ -26,9 +26,9 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.backup.util.BackupUtils;
|
import org.apache.hadoop.hbase.backup.util.BackupUtils;
|
||||||
|
import org.apache.hadoop.hbase.client.Admin;
|
||||||
import org.apache.hadoop.hbase.client.Connection;
|
import org.apache.hadoop.hbase.client.Connection;
|
||||||
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
||||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
|
||||||
import org.apache.hadoop.hbase.client.Put;
|
import org.apache.hadoop.hbase.client.Put;
|
||||||
import org.apache.hadoop.hbase.client.Table;
|
import org.apache.hadoop.hbase.client.Table;
|
||||||
import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils;
|
import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils;
|
||||||
|
@ -126,7 +126,7 @@ public class TestRemoteBackup extends TestBackupBase {
|
||||||
tablesRestoreFull, tablesMapFull, false));
|
tablesRestoreFull, tablesMapFull, false));
|
||||||
|
|
||||||
// check tables for full restore
|
// check tables for full restore
|
||||||
HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin();
|
Admin hAdmin = TEST_UTIL.getAdmin();
|
||||||
assertTrue(hAdmin.tableExists(table1_restore));
|
assertTrue(hAdmin.tableExists(table1_restore));
|
||||||
|
|
||||||
// #5.2 - checking row count of tables for full restore
|
// #5.2 - checking row count of tables for full restore
|
||||||
|
|
|
@ -22,7 +22,7 @@ import static org.junit.Assert.assertTrue;
|
||||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.backup.util.BackupUtils;
|
import org.apache.hadoop.hbase.backup.util.BackupUtils;
|
||||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
import org.apache.hadoop.hbase.client.Admin;
|
||||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||||
import org.junit.ClassRule;
|
import org.junit.ClassRule;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
@ -61,7 +61,7 @@ public class TestRemoteRestore extends TestBackupBase {
|
||||||
getBackupAdmin().restore(
|
getBackupAdmin().restore(
|
||||||
BackupUtils.createRestoreRequest(BACKUP_REMOTE_ROOT_DIR, backupId, false, tableset,
|
BackupUtils.createRestoreRequest(BACKUP_REMOTE_ROOT_DIR, backupId, false, tableset,
|
||||||
tablemap, false));
|
tablemap, false));
|
||||||
HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
|
Admin hba = TEST_UTIL.getAdmin();
|
||||||
assertTrue(hba.tableExists(table1_restore));
|
assertTrue(hba.tableExists(table1_restore));
|
||||||
TEST_UTIL.deleteTable(table1_restore);
|
TEST_UTIL.deleteTable(table1_restore);
|
||||||
hba.close();
|
hba.close();
|
||||||
|
|
|
@ -23,7 +23,7 @@ import java.util.List;
|
||||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.backup.util.BackupUtils;
|
import org.apache.hadoop.hbase.backup.util.BackupUtils;
|
||||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
import org.apache.hadoop.hbase.client.Admin;
|
||||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||||
import org.junit.ClassRule;
|
import org.junit.ClassRule;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
@ -55,7 +55,7 @@ public class TestRestoreBoundaryTests extends TestBackupBase {
|
||||||
getBackupAdmin().restore(
|
getBackupAdmin().restore(
|
||||||
BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, tableset, tablemap,
|
BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, tableset, tablemap,
|
||||||
false));
|
false));
|
||||||
HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
|
Admin hba = TEST_UTIL.getAdmin();
|
||||||
assertTrue(hba.tableExists(table1_restore));
|
assertTrue(hba.tableExists(table1_restore));
|
||||||
TEST_UTIL.deleteTable(table1_restore);
|
TEST_UTIL.deleteTable(table1_restore);
|
||||||
}
|
}
|
||||||
|
@ -76,7 +76,7 @@ public class TestRestoreBoundaryTests extends TestBackupBase {
|
||||||
getBackupAdmin().restore(
|
getBackupAdmin().restore(
|
||||||
BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, restore_tableset,
|
BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, restore_tableset,
|
||||||
tablemap, false));
|
tablemap, false));
|
||||||
HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
|
Admin hba = TEST_UTIL.getAdmin();
|
||||||
assertTrue(hba.tableExists(table2_restore));
|
assertTrue(hba.tableExists(table2_restore));
|
||||||
assertTrue(hba.tableExists(table3_restore));
|
assertTrue(hba.tableExists(table3_restore));
|
||||||
TEST_UTIL.deleteTable(table2_restore);
|
TEST_UTIL.deleteTable(table2_restore);
|
||||||
|
|
|
@ -20,7 +20,7 @@ package org.apache.hadoop.hbase.backup;
|
||||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
|
import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
|
||||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
import org.apache.hadoop.hbase.client.Admin;
|
||||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||||
import org.junit.ClassRule;
|
import org.junit.ClassRule;
|
||||||
import org.junit.experimental.categories.Category;
|
import org.junit.experimental.categories.Category;
|
||||||
|
@ -47,7 +47,7 @@ public class TestSystemTableSnapshot extends TestBackupBase {
|
||||||
|
|
||||||
TableName backupSystem = BackupSystemTable.getTableName(conf1);
|
TableName backupSystem = BackupSystemTable.getTableName(conf1);
|
||||||
|
|
||||||
HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
|
Admin hba = TEST_UTIL.getAdmin();
|
||||||
String snapshotName = "sysTable";
|
String snapshotName = "sysTable";
|
||||||
hba.snapshot(snapshotName, backupSystem);
|
hba.snapshot(snapshotName, backupSystem);
|
||||||
|
|
||||||
|
|
|
@ -47,7 +47,6 @@ import org.apache.hadoop.hbase.client.Scan.ReadType;
|
||||||
import org.apache.hadoop.hbase.client.TableState;
|
import org.apache.hadoop.hbase.client.TableState;
|
||||||
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
|
||||||
import org.apache.hadoop.hbase.util.Pair;
|
import org.apache.hadoop.hbase.util.Pair;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
|
@ -80,23 +79,17 @@ public class AsyncMetaTableAccessor {
|
||||||
TableName tableName) {
|
TableName tableName) {
|
||||||
CompletableFuture<Optional<TableState>> future = new CompletableFuture<>();
|
CompletableFuture<Optional<TableState>> future = new CompletableFuture<>();
|
||||||
Get get = new Get(tableName.getName()).addColumn(getTableFamily(), getStateColumn());
|
Get get = new Get(tableName.getName()).addColumn(getTableFamily(), getStateColumn());
|
||||||
long time = EnvironmentEdgeManager.currentTime();
|
addListener(metaTable.get(get), (result, error) -> {
|
||||||
try {
|
if (error != null) {
|
||||||
get.setTimeRange(0, time);
|
future.completeExceptionally(error);
|
||||||
addListener(metaTable.get(get), (result, error) -> {
|
return;
|
||||||
if (error != null) {
|
}
|
||||||
future.completeExceptionally(error);
|
try {
|
||||||
return;
|
future.complete(getTableState(result));
|
||||||
}
|
} catch (IOException e) {
|
||||||
try {
|
future.completeExceptionally(e);
|
||||||
future.complete(getTableState(result));
|
}
|
||||||
} catch (IOException e) {
|
});
|
||||||
future.completeExceptionally(e);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
} catch (IOException ioe) {
|
|
||||||
future.completeExceptionally(ioe);
|
|
||||||
}
|
|
||||||
return future;
|
return future;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -255,13 +255,14 @@ public interface Admin extends Abortable, Closeable {
|
||||||
Future<Void> createTableAsync(TableDescriptor desc) throws IOException;
|
Future<Void> createTableAsync(TableDescriptor desc) throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a new table but does not block and wait for it to come online. You can use
|
* Creates a new table but does not block and wait for it to come online.
|
||||||
* Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
|
* You can use Future.get(long, TimeUnit) to wait on the operation to complete.
|
||||||
* ExecutionException if there was an error while executing the operation or TimeoutException in
|
* It may throw ExecutionException if there was an error while executing the operation
|
||||||
* case the wait timeout was not long enough to allow the operation to complete.
|
* or TimeoutException in case the wait timeout was not long enough to allow the
|
||||||
* <p/>
|
* operation to complete.
|
||||||
* Throws IllegalArgumentException Bad table name, if the split keys are repeated and if the split
|
* Throws IllegalArgumentException Bad table name, if the split keys
|
||||||
* key has empty byte array.
|
* are repeated and if the split key has empty byte array.
|
||||||
|
*
|
||||||
* @param desc table descriptor for table
|
* @param desc table descriptor for table
|
||||||
* @param splitKeys keys to check if the table has been created with all split keys
|
* @param splitKeys keys to check if the table has been created with all split keys
|
||||||
* @throws IOException if a remote or network exception occurs
|
* @throws IOException if a remote or network exception occurs
|
||||||
|
@ -723,7 +724,7 @@ public interface Admin extends Abortable, Closeable {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Move the region <code>rencodedRegionName</code> to <code>destServerName</code>.
|
* Move the region <code>encodedRegionName</code> to <code>destServerName</code>.
|
||||||
* @param encodedRegionName The encoded region name; i.e. the hash that makes up the region name
|
* @param encodedRegionName The encoded region name; i.e. the hash that makes up the region name
|
||||||
* suffix: e.g. if regionname is
|
* suffix: e.g. if regionname is
|
||||||
* <code>TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.</code>,
|
* <code>TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.</code>,
|
||||||
|
@ -922,15 +923,13 @@ public interface Admin extends Abortable, Closeable {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Split a table. The method will execute split action for each region in table.
|
* Split a table. The method will execute split action for each region in table.
|
||||||
* Asynchronous operation.
|
|
||||||
* @param tableName table to split
|
* @param tableName table to split
|
||||||
* @throws IOException if a remote or network exception occurs
|
* @throws IOException if a remote or network exception occurs
|
||||||
*/
|
*/
|
||||||
void split(TableName tableName) throws IOException;
|
void split(TableName tableName) throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Split a table. Asynchronous operation.
|
* Split a table.
|
||||||
*
|
|
||||||
* @param tableName table to split
|
* @param tableName table to split
|
||||||
* @param splitPoint the explicit position to split on
|
* @param splitPoint the explicit position to split on
|
||||||
* @throws IOException if a remote or network exception occurs
|
* @throws IOException if a remote or network exception occurs
|
||||||
|
@ -1065,9 +1064,7 @@ public interface Admin extends Abortable, Closeable {
|
||||||
* @return a {@link RegionMetrics} list of all regions hosted on a region server
|
* @return a {@link RegionMetrics} list of all regions hosted on a region server
|
||||||
* @throws IOException if a remote or network exception occurs
|
* @throws IOException if a remote or network exception occurs
|
||||||
*/
|
*/
|
||||||
default List<RegionMetrics> getRegionMetrics(ServerName serverName) throws IOException {
|
List<RegionMetrics> getRegionMetrics(ServerName serverName) throws IOException;
|
||||||
return getRegionMetrics(serverName, null);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get {@link RegionMetrics} of all regions hosted on a regionserver for a table.
|
* Get {@link RegionMetrics} of all regions hosted on a regionserver for a table.
|
||||||
|
@ -1660,7 +1657,10 @@ public interface Admin extends Abortable, Closeable {
|
||||||
* </pre></blockquote></div>
|
* </pre></blockquote></div>
|
||||||
*
|
*
|
||||||
* @return A MasterCoprocessorRpcChannel instance
|
* @return A MasterCoprocessorRpcChannel instance
|
||||||
|
* @deprecated since 3.0.0, will removed in 4.0.0. This is too low level, please stop using it any
|
||||||
|
* more. Use the coprocessorService methods in {@link AsyncAdmin} instead.
|
||||||
*/
|
*/
|
||||||
|
@Deprecated
|
||||||
CoprocessorRpcChannel coprocessorService();
|
CoprocessorRpcChannel coprocessorService();
|
||||||
|
|
||||||
|
|
||||||
|
@ -1685,7 +1685,10 @@ public interface Admin extends Abortable, Closeable {
|
||||||
*
|
*
|
||||||
* @param serverName the server name to which the endpoint call is made
|
* @param serverName the server name to which the endpoint call is made
|
||||||
* @return A RegionServerCoprocessorRpcChannel instance
|
* @return A RegionServerCoprocessorRpcChannel instance
|
||||||
|
* @deprecated since 3.0.0, will removed in 4.0.0. This is too low level, please stop using it any
|
||||||
|
* more. Use the coprocessorService methods in {@link AsyncAdmin} instead.
|
||||||
*/
|
*/
|
||||||
|
@Deprecated
|
||||||
CoprocessorRpcChannel coprocessorService(ServerName serverName);
|
CoprocessorRpcChannel coprocessorService(ServerName serverName);
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,945 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase.client;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.hbase.client.ConnectionUtils.setCoprocessorError;
|
||||||
|
import static org.apache.hadoop.hbase.util.FutureUtils.get;
|
||||||
|
|
||||||
|
import com.google.protobuf.Descriptors.MethodDescriptor;
|
||||||
|
import com.google.protobuf.Message;
|
||||||
|
import com.google.protobuf.RpcCallback;
|
||||||
|
import com.google.protobuf.RpcChannel;
|
||||||
|
import com.google.protobuf.RpcController;
|
||||||
|
import com.google.protobuf.ServiceException;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.EnumSet;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Set;
|
||||||
|
import java.util.concurrent.Future;
|
||||||
|
import java.util.regex.Pattern;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.hbase.CacheEvictionStats;
|
||||||
|
import org.apache.hadoop.hbase.ClusterMetrics;
|
||||||
|
import org.apache.hadoop.hbase.ClusterMetrics.Option;
|
||||||
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
|
import org.apache.hadoop.hbase.NamespaceDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.NamespaceNotFoundException;
|
||||||
|
import org.apache.hadoop.hbase.RegionMetrics;
|
||||||
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
|
import org.apache.hadoop.hbase.TableExistsException;
|
||||||
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||||
|
import org.apache.hadoop.hbase.client.replication.TableCFs;
|
||||||
|
import org.apache.hadoop.hbase.client.security.SecurityCapability;
|
||||||
|
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
|
||||||
|
import org.apache.hadoop.hbase.quotas.QuotaFilter;
|
||||||
|
import org.apache.hadoop.hbase.quotas.QuotaSettings;
|
||||||
|
import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotView;
|
||||||
|
import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
|
||||||
|
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
|
||||||
|
import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
|
||||||
|
import org.apache.hadoop.hbase.replication.SyncReplicationState;
|
||||||
|
import org.apache.hadoop.hbase.security.access.GetUserPermissionsRequest;
|
||||||
|
import org.apache.hadoop.hbase.security.access.Permission;
|
||||||
|
import org.apache.hadoop.hbase.security.access.UserPermission;
|
||||||
|
import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
|
||||||
|
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
|
||||||
|
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
|
||||||
|
import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
|
||||||
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The {@link Admin} implementation which is based on an {@link AsyncAdmin}.
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
class AdminOverAsyncAdmin implements Admin {
|
||||||
|
|
||||||
|
private static final Logger LOG = LoggerFactory.getLogger(AdminOverAsyncAdmin.class);
|
||||||
|
|
||||||
|
private volatile boolean aborted = false;
|
||||||
|
|
||||||
|
private final Connection conn;
|
||||||
|
|
||||||
|
private final RawAsyncHBaseAdmin admin;
|
||||||
|
|
||||||
|
private final int operationTimeout;
|
||||||
|
|
||||||
|
private final int syncWaitTimeout;
|
||||||
|
|
||||||
|
public AdminOverAsyncAdmin(Connection conn, RawAsyncHBaseAdmin admin) {
|
||||||
|
this.conn = conn;
|
||||||
|
this.admin = admin;
|
||||||
|
this.operationTimeout = conn.getConfiguration().getInt(
|
||||||
|
HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
|
||||||
|
this.syncWaitTimeout =
|
||||||
|
conn.getConfiguration().getInt("hbase.client.sync.wait.timeout.msec", 10 * 60000); // 10min
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getOperationTimeout() {
|
||||||
|
return operationTimeout;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getSyncWaitTimeout() {
|
||||||
|
return syncWaitTimeout;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void abort(String why, Throwable e) {
|
||||||
|
LOG.warn("Aborting becasue of {}", why, e);
|
||||||
|
this.aborted = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isAborted() {
|
||||||
|
return aborted;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Connection getConnection() {
|
||||||
|
return conn;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean tableExists(TableName tableName) throws IOException {
|
||||||
|
return get(admin.tableExists(tableName));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<TableDescriptor> listTableDescriptors() throws IOException {
|
||||||
|
return get(admin.listTableDescriptors());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<TableDescriptor> listTableDescriptors(Pattern pattern, boolean includeSysTables)
|
||||||
|
throws IOException {
|
||||||
|
return get(admin.listTableDescriptors(pattern, includeSysTables));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public TableName[] listTableNames() throws IOException {
|
||||||
|
return get(admin.listTableNames()).toArray(new TableName[0]);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public TableName[] listTableNames(Pattern pattern, boolean includeSysTables) throws IOException {
|
||||||
|
return get(admin.listTableNames(pattern, includeSysTables)).toArray(new TableName[0]);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public TableDescriptor getDescriptor(TableName tableName)
|
||||||
|
throws TableNotFoundException, IOException {
|
||||||
|
return get(admin.getDescriptor(tableName));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void createTable(TableDescriptor desc, byte[] startKey, byte[] endKey, int numRegions)
|
||||||
|
throws IOException {
|
||||||
|
get(admin.createTable(desc, startKey, endKey, numRegions));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Future<Void> createTableAsync(TableDescriptor desc) throws IOException {
|
||||||
|
return admin.createTable(desc);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Future<Void> createTableAsync(TableDescriptor desc, byte[][] splitKeys)
|
||||||
|
throws IOException {
|
||||||
|
return admin.createTable(desc, splitKeys);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Future<Void> deleteTableAsync(TableName tableName) throws IOException {
|
||||||
|
return admin.deleteTable(tableName);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Future<Void> truncateTableAsync(TableName tableName, boolean preserveSplits)
|
||||||
|
throws IOException {
|
||||||
|
return admin.truncateTable(tableName, preserveSplits);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Future<Void> enableTableAsync(TableName tableName) throws IOException {
|
||||||
|
return admin.enableTable(tableName);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Future<Void> disableTableAsync(TableName tableName) throws IOException {
|
||||||
|
return admin.disableTable(tableName);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isTableEnabled(TableName tableName) throws IOException {
|
||||||
|
return get(admin.isTableEnabled(tableName));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isTableDisabled(TableName tableName) throws IOException {
|
||||||
|
return get(admin.isTableDisabled(tableName));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isTableAvailable(TableName tableName) throws IOException {
|
||||||
|
return get(admin.isTableAvailable(tableName));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Future<Void> addColumnFamilyAsync(TableName tableName, ColumnFamilyDescriptor columnFamily)
|
||||||
|
throws IOException {
|
||||||
|
return admin.addColumnFamily(tableName, columnFamily);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Future<Void> deleteColumnFamilyAsync(TableName tableName, byte[] columnFamily)
|
||||||
|
throws IOException {
|
||||||
|
return admin.deleteColumnFamily(tableName, columnFamily);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Future<Void> modifyColumnFamilyAsync(TableName tableName,
|
||||||
|
ColumnFamilyDescriptor columnFamily) throws IOException {
|
||||||
|
return admin.modifyColumnFamily(tableName, columnFamily);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<RegionInfo> getRegions(ServerName serverName) throws IOException {
|
||||||
|
return get(admin.getRegions(serverName));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void flush(TableName tableName) throws IOException {
|
||||||
|
get(admin.flush(tableName));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void flushRegion(byte[] regionName) throws IOException {
|
||||||
|
get(admin.flushRegion(regionName));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void flushRegionServer(ServerName serverName) throws IOException {
|
||||||
|
get(admin.flushRegionServer(serverName));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void compact(TableName tableName) throws IOException {
|
||||||
|
get(admin.compact(tableName));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void compactRegion(byte[] regionName) throws IOException {
|
||||||
|
get(admin.compactRegion(regionName));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void compact(TableName tableName, byte[] columnFamily) throws IOException {
|
||||||
|
get(admin.compact(tableName, columnFamily));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void compactRegion(byte[] regionName, byte[] columnFamily) throws IOException {
|
||||||
|
get(admin.compactRegion(regionName, columnFamily));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void compact(TableName tableName, CompactType compactType)
|
||||||
|
throws IOException, InterruptedException {
|
||||||
|
get(admin.compact(tableName, compactType));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void compact(TableName tableName, byte[] columnFamily, CompactType compactType)
|
||||||
|
throws IOException, InterruptedException {
|
||||||
|
get(admin.compact(tableName, columnFamily, compactType));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void majorCompact(TableName tableName) throws IOException {
|
||||||
|
get(admin.majorCompact(tableName));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void majorCompactRegion(byte[] regionName) throws IOException {
|
||||||
|
get(admin.majorCompactRegion(regionName));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void majorCompact(TableName tableName, byte[] columnFamily) throws IOException {
|
||||||
|
get(admin.majorCompact(tableName, columnFamily));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void majorCompactRegion(byte[] regionName, byte[] columnFamily) throws IOException {
|
||||||
|
get(admin.majorCompactRegion(regionName, columnFamily));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void majorCompact(TableName tableName, CompactType compactType)
|
||||||
|
throws IOException, InterruptedException {
|
||||||
|
get(admin.majorCompact(tableName, compactType));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void majorCompact(TableName tableName, byte[] columnFamily, CompactType compactType)
|
||||||
|
throws IOException, InterruptedException {
|
||||||
|
get(admin.majorCompact(tableName, columnFamily, compactType));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Map<ServerName, Boolean> compactionSwitch(boolean switchState,
|
||||||
|
List<String> serverNamesList) throws IOException {
|
||||||
|
return get(admin.compactionSwitch(switchState, serverNamesList));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void compactRegionServer(ServerName serverName) throws IOException {
|
||||||
|
get(admin.compactRegionServer(serverName));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void majorCompactRegionServer(ServerName serverName) throws IOException {
|
||||||
|
get(admin.majorCompactRegionServer(serverName));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void move(byte[] encodedRegionName) throws IOException {
|
||||||
|
get(admin.move(encodedRegionName));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void move(byte[] encodedRegionName, ServerName destServerName) throws IOException {
|
||||||
|
get(admin.move(encodedRegionName, destServerName));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void assign(byte[] regionName) throws IOException {
|
||||||
|
get(admin.assign(regionName));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void unassign(byte[] regionName, boolean force) throws IOException {
|
||||||
|
get(admin.unassign(regionName, force));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void offline(byte[] regionName) throws IOException {
|
||||||
|
get(admin.offline(regionName));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean balancerSwitch(boolean onOrOff, boolean synchronous) throws IOException {
|
||||||
|
return get(admin.balancerSwitch(onOrOff, synchronous));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean balance() throws IOException {
|
||||||
|
return get(admin.balance());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean balance(boolean force) throws IOException {
|
||||||
|
return get(admin.balance(force));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isBalancerEnabled() throws IOException {
|
||||||
|
return get(admin.isBalancerEnabled());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CacheEvictionStats clearBlockCache(TableName tableName) throws IOException {
|
||||||
|
return get(admin.clearBlockCache(tableName));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean normalize() throws IOException {
|
||||||
|
return get(admin.normalize());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isNormalizerEnabled() throws IOException {
|
||||||
|
return get(admin.isNormalizerEnabled());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean normalizerSwitch(boolean on) throws IOException {
|
||||||
|
return get(admin.normalizerSwitch(on));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean catalogJanitorSwitch(boolean onOrOff) throws IOException {
|
||||||
|
return get(admin.catalogJanitorSwitch(onOrOff));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int runCatalogJanitor() throws IOException {
|
||||||
|
return get(admin.runCatalogJanitor());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isCatalogJanitorEnabled() throws IOException {
|
||||||
|
return get(admin.isCatalogJanitorEnabled());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean cleanerChoreSwitch(boolean onOrOff) throws IOException {
|
||||||
|
return get(admin.cleanerChoreSwitch(onOrOff));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean runCleanerChore() throws IOException {
|
||||||
|
return get(admin.runCleanerChore());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isCleanerChoreEnabled() throws IOException {
|
||||||
|
return get(admin.isCleanerChoreEnabled());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Future<Void> mergeRegionsAsync(byte[][] nameOfRegionsToMerge, boolean forcible)
|
||||||
|
throws IOException {
|
||||||
|
return admin.mergeRegions(Arrays.asList(nameOfRegionsToMerge), forcible);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void split(TableName tableName) throws IOException {
|
||||||
|
get(admin.split(tableName));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void split(TableName tableName, byte[] splitPoint) throws IOException {
|
||||||
|
get(admin.split(tableName, splitPoint));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Future<Void> splitRegionAsync(byte[] regionName) throws IOException {
|
||||||
|
return admin.splitRegion(regionName);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Future<Void> splitRegionAsync(byte[] regionName, byte[] splitPoint) throws IOException {
|
||||||
|
return admin.splitRegion(regionName, splitPoint);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Future<Void> modifyTableAsync(TableDescriptor td) throws IOException {
|
||||||
|
return admin.modifyTable(td);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void shutdown() throws IOException {
|
||||||
|
get(admin.shutdown());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void stopMaster() throws IOException {
|
||||||
|
get(admin.stopMaster());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isMasterInMaintenanceMode() throws IOException {
|
||||||
|
return get(admin.isMasterInMaintenanceMode());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void stopRegionServer(String hostnamePort) throws IOException {
|
||||||
|
get(admin.stopRegionServer(ServerName.valueOf(hostnamePort, 0)));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ClusterMetrics getClusterMetrics(EnumSet<Option> options) throws IOException {
|
||||||
|
return get(admin.getClusterMetrics(options));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<RegionMetrics> getRegionMetrics(ServerName serverName) throws IOException {
|
||||||
|
return get(admin.getRegionMetrics(serverName));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<RegionMetrics> getRegionMetrics(ServerName serverName, TableName tableName)
|
||||||
|
throws IOException {
|
||||||
|
return get(admin.getRegionMetrics(serverName, tableName));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Configuration getConfiguration() {
|
||||||
|
return conn.getConfiguration();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Future<Void> createNamespaceAsync(NamespaceDescriptor descriptor) throws IOException {
|
||||||
|
return admin.createNamespace(descriptor);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Future<Void> modifyNamespaceAsync(NamespaceDescriptor descriptor) throws IOException {
|
||||||
|
return admin.modifyNamespace(descriptor);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Future<Void> deleteNamespaceAsync(String name) throws IOException {
|
||||||
|
return admin.deleteNamespace(name);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public NamespaceDescriptor getNamespaceDescriptor(String name)
|
||||||
|
throws NamespaceNotFoundException, IOException {
|
||||||
|
return get(admin.getNamespaceDescriptor(name));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String[] listNamespaces() throws IOException {
|
||||||
|
return get(admin.listNamespaces()).toArray(new String[0]);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public NamespaceDescriptor[] listNamespaceDescriptors() throws IOException {
|
||||||
|
return get(admin.listNamespaceDescriptors()).toArray(new NamespaceDescriptor[0]);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<TableDescriptor> listTableDescriptorsByNamespace(byte[] name) throws IOException {
|
||||||
|
return get(admin.listTableDescriptorsByNamespace(Bytes.toString(name)));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public TableName[] listTableNamesByNamespace(String name) throws IOException {
|
||||||
|
return get(admin.listTableNamesByNamespace(name)).toArray(new TableName[0]);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<RegionInfo> getRegions(TableName tableName) throws IOException {
|
||||||
|
return get(admin.getRegions(tableName));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void close() {
|
||||||
|
// do nothing, AsyncAdmin is not a Closeable.
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<TableDescriptor> listTableDescriptors(List<TableName> tableNames) throws IOException {
|
||||||
|
return get(admin.listTableDescriptors(tableNames));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Future<Boolean> abortProcedureAsync(long procId, boolean mayInterruptIfRunning)
|
||||||
|
throws IOException {
|
||||||
|
return admin.abortProcedure(procId, mayInterruptIfRunning);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getProcedures() throws IOException {
|
||||||
|
return get(admin.getProcedures());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getLocks() throws IOException {
|
||||||
|
return get(admin.getLocks());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void rollWALWriter(ServerName serverName) throws IOException, FailedLogCloseException {
|
||||||
|
get(admin.rollWALWriter(serverName));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CompactionState getCompactionState(TableName tableName) throws IOException {
|
||||||
|
return get(admin.getCompactionState(tableName));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CompactionState getCompactionState(TableName tableName, CompactType compactType)
|
||||||
|
throws IOException {
|
||||||
|
return get(admin.getCompactionState(tableName, compactType));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CompactionState getCompactionStateForRegion(byte[] regionName) throws IOException {
|
||||||
|
return get(admin.getCompactionStateForRegion(regionName));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long getLastMajorCompactionTimestamp(TableName tableName) throws IOException {
|
||||||
|
return get(admin.getLastMajorCompactionTimestamp(tableName)).orElse(0L);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException {
|
||||||
|
return get(admin.getLastMajorCompactionTimestampForRegion(regionName)).orElse(0L);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void snapshot(SnapshotDescription snapshot)
|
||||||
|
throws IOException, SnapshotCreationException, IllegalArgumentException {
|
||||||
|
get(admin.snapshot(snapshot));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Future<Void> snapshotAsync(SnapshotDescription snapshot)
|
||||||
|
throws IOException, SnapshotCreationException {
|
||||||
|
return admin.snapshot(snapshot);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isSnapshotFinished(SnapshotDescription snapshot)
|
||||||
|
throws IOException, HBaseSnapshotException, UnknownSnapshotException {
|
||||||
|
return get(admin.isSnapshotFinished(snapshot));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void restoreSnapshot(String snapshotName) throws IOException, RestoreSnapshotException {
|
||||||
|
get(admin.restoreSnapshot(snapshotName));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot, boolean restoreAcl)
|
||||||
|
throws IOException, RestoreSnapshotException {
|
||||||
|
get(admin.restoreSnapshot(snapshotName, takeFailSafeSnapshot, restoreAcl));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Future<Void> cloneSnapshotAsync(String snapshotName, TableName tableName,
|
||||||
|
boolean restoreAcl) throws IOException, TableExistsException, RestoreSnapshotException {
|
||||||
|
return admin.cloneSnapshot(snapshotName, tableName, restoreAcl);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void execProcedure(String signature, String instance, Map<String, String> props)
|
||||||
|
throws IOException {
|
||||||
|
get(admin.execProcedure(signature, instance, props));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public byte[] execProcedureWithReturn(String signature, String instance,
|
||||||
|
Map<String, String> props) throws IOException {
|
||||||
|
return get(admin.execProcedureWithReturn(signature, instance, props));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isProcedureFinished(String signature, String instance, Map<String, String> props)
|
||||||
|
throws IOException {
|
||||||
|
return get(admin.isProcedureFinished(signature, instance, props));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<SnapshotDescription> listSnapshots() throws IOException {
|
||||||
|
return get(admin.listSnapshots());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<SnapshotDescription> listSnapshots(Pattern pattern) throws IOException {
|
||||||
|
return get(admin.listSnapshots(pattern));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<SnapshotDescription> listTableSnapshots(Pattern tableNamePattern,
|
||||||
|
Pattern snapshotNamePattern) throws IOException {
|
||||||
|
return get(admin.listTableSnapshots(tableNamePattern, snapshotNamePattern));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void deleteSnapshot(String snapshotName) throws IOException {
|
||||||
|
get(admin.deleteSnapshot(snapshotName));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void deleteSnapshots(Pattern pattern) throws IOException {
|
||||||
|
get(admin.deleteSnapshots(pattern));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void deleteTableSnapshots(Pattern tableNamePattern, Pattern snapshotNamePattern)
|
||||||
|
throws IOException {
|
||||||
|
get(admin.deleteTableSnapshots(tableNamePattern, snapshotNamePattern));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setQuota(QuotaSettings quota) throws IOException {
|
||||||
|
get(admin.setQuota(quota));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<QuotaSettings> getQuota(QuotaFilter filter) throws IOException {
|
||||||
|
return get(admin.getQuota(filter));
|
||||||
|
}
|
||||||
|
|
||||||
|
@SuppressWarnings("deprecation")
|
||||||
|
private static final class SyncCoprocessorRpcChannelOverAsync implements CoprocessorRpcChannel {
|
||||||
|
|
||||||
|
private final RpcChannel delegate;
|
||||||
|
|
||||||
|
public SyncCoprocessorRpcChannelOverAsync(RpcChannel delegate) {
|
||||||
|
this.delegate = delegate;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void callMethod(MethodDescriptor method, RpcController controller, Message request,
|
||||||
|
Message responsePrototype, RpcCallback<Message> done) {
|
||||||
|
ClientCoprocessorRpcController c = new ClientCoprocessorRpcController();
|
||||||
|
CoprocessorBlockingRpcCallback<Message> callback = new CoprocessorBlockingRpcCallback<>();
|
||||||
|
delegate.callMethod(method, c, request, responsePrototype, callback);
|
||||||
|
Message ret;
|
||||||
|
try {
|
||||||
|
ret = callback.get();
|
||||||
|
} catch (IOException e) {
|
||||||
|
setCoprocessorError(controller, e);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (c.failed()) {
|
||||||
|
setCoprocessorError(controller, c.getFailed());
|
||||||
|
}
|
||||||
|
done.run(ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Message callBlockingMethod(MethodDescriptor method, RpcController controller,
|
||||||
|
Message request, Message responsePrototype) throws ServiceException {
|
||||||
|
ClientCoprocessorRpcController c = new ClientCoprocessorRpcController();
|
||||||
|
CoprocessorBlockingRpcCallback<Message> done = new CoprocessorBlockingRpcCallback<>();
|
||||||
|
callMethod(method, c, request, responsePrototype, done);
|
||||||
|
Message ret;
|
||||||
|
try {
|
||||||
|
ret = done.get();
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new ServiceException(e);
|
||||||
|
}
|
||||||
|
if (c.failed()) {
|
||||||
|
setCoprocessorError(controller, c.getFailed());
|
||||||
|
throw new ServiceException(c.getFailed());
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@SuppressWarnings("deprecation")
|
||||||
|
@Override
|
||||||
|
public CoprocessorRpcChannel coprocessorService() {
|
||||||
|
return new SyncCoprocessorRpcChannelOverAsync(
|
||||||
|
new MasterCoprocessorRpcChannelImpl(admin.<Message> newMasterCaller()));
|
||||||
|
}
|
||||||
|
|
||||||
|
@SuppressWarnings("deprecation")
|
||||||
|
@Override
|
||||||
|
public CoprocessorRpcChannel coprocessorService(ServerName serverName) {
|
||||||
|
return new SyncCoprocessorRpcChannelOverAsync(new RegionServerCoprocessorRpcChannelImpl(
|
||||||
|
admin.<Message> newServerCaller().serverName(serverName)));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void updateConfiguration(ServerName server) throws IOException {
|
||||||
|
get(admin.updateConfiguration(server));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void updateConfiguration() throws IOException {
|
||||||
|
get(admin.updateConfiguration());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<SecurityCapability> getSecurityCapabilities() throws IOException {
|
||||||
|
return get(admin.getSecurityCapabilities());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean splitSwitch(boolean enabled, boolean synchronous) throws IOException {
|
||||||
|
return get(admin.splitSwitch(enabled, synchronous));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean mergeSwitch(boolean enabled, boolean synchronous) throws IOException {
|
||||||
|
return get(admin.mergeSwitch(enabled, synchronous));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isSplitEnabled() throws IOException {
|
||||||
|
return get(admin.isSplitEnabled());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isMergeEnabled() throws IOException {
|
||||||
|
return get(admin.isMergeEnabled());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Future<Void> addReplicationPeerAsync(String peerId, ReplicationPeerConfig peerConfig,
|
||||||
|
boolean enabled) throws IOException {
|
||||||
|
return admin.addReplicationPeer(peerId, peerConfig, enabled);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Future<Void> removeReplicationPeerAsync(String peerId) throws IOException {
|
||||||
|
return admin.removeReplicationPeer(peerId);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Future<Void> enableReplicationPeerAsync(String peerId) throws IOException {
|
||||||
|
return admin.enableReplicationPeer(peerId);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Future<Void> disableReplicationPeerAsync(String peerId) throws IOException {
|
||||||
|
return admin.disableReplicationPeer(peerId);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ReplicationPeerConfig getReplicationPeerConfig(String peerId) throws IOException {
|
||||||
|
return get(admin.getReplicationPeerConfig(peerId));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Future<Void> updateReplicationPeerConfigAsync(String peerId,
|
||||||
|
ReplicationPeerConfig peerConfig) throws IOException {
|
||||||
|
return admin.updateReplicationPeerConfig(peerId, peerConfig);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<ReplicationPeerDescription> listReplicationPeers() throws IOException {
|
||||||
|
return get(admin.listReplicationPeers());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<ReplicationPeerDescription> listReplicationPeers(Pattern pattern) throws IOException {
|
||||||
|
return get(admin.listReplicationPeers(pattern));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Future<Void> transitReplicationPeerSyncReplicationStateAsync(String peerId,
|
||||||
|
SyncReplicationState state) throws IOException {
|
||||||
|
return admin.transitReplicationPeerSyncReplicationState(peerId, state);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void decommissionRegionServers(List<ServerName> servers, boolean offload)
|
||||||
|
throws IOException {
|
||||||
|
get(admin.decommissionRegionServers(servers, offload));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<ServerName> listDecommissionedRegionServers() throws IOException {
|
||||||
|
return get(admin.listDecommissionedRegionServers());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void recommissionRegionServer(ServerName server, List<byte[]> encodedRegionNames)
|
||||||
|
throws IOException {
|
||||||
|
get(admin.recommissionRegionServer(server, encodedRegionNames));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<TableCFs> listReplicatedTableCFs() throws IOException {
|
||||||
|
return get(admin.listReplicatedTableCFs());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void enableTableReplication(TableName tableName) throws IOException {
|
||||||
|
get(admin.enableTableReplication(tableName));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void disableTableReplication(TableName tableName) throws IOException {
|
||||||
|
get(admin.disableTableReplication(tableName));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void clearCompactionQueues(ServerName serverName, Set<String> queues)
|
||||||
|
throws IOException, InterruptedException {
|
||||||
|
get(admin.clearCompactionQueues(serverName, queues));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<ServerName> clearDeadServers(List<ServerName> servers) throws IOException {
|
||||||
|
return get(admin.clearDeadServers(servers));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void cloneTableSchema(TableName tableName, TableName newTableName, boolean preserveSplits)
|
||||||
|
throws IOException {
|
||||||
|
get(admin.cloneTableSchema(tableName, newTableName, preserveSplits));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean switchRpcThrottle(boolean enable) throws IOException {
|
||||||
|
return get(admin.switchRpcThrottle(enable));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isRpcThrottleEnabled() throws IOException {
|
||||||
|
return get(admin.isRpcThrottleEnabled());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean exceedThrottleQuotaSwitch(boolean enable) throws IOException {
|
||||||
|
return get(admin.exceedThrottleQuotaSwitch(enable));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Map<TableName, Long> getSpaceQuotaTableSizes() throws IOException {
|
||||||
|
return get(admin.getSpaceQuotaTableSizes());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Map<TableName, ? extends SpaceQuotaSnapshotView> getRegionServerSpaceQuotaSnapshots(
|
||||||
|
ServerName serverName) throws IOException {
|
||||||
|
return get(admin.getRegionServerSpaceQuotaSnapshots(serverName));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public SpaceQuotaSnapshotView getCurrentSpaceQuotaSnapshot(String namespace) throws IOException {
|
||||||
|
return get(admin.getCurrentSpaceQuotaSnapshot(namespace));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public SpaceQuotaSnapshotView getCurrentSpaceQuotaSnapshot(TableName tableName)
|
||||||
|
throws IOException {
|
||||||
|
return get(admin.getCurrentSpaceQuotaSnapshot(tableName));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void grant(UserPermission userPermission, boolean mergeExistingPermissions)
|
||||||
|
throws IOException {
|
||||||
|
get(admin.grant(userPermission, mergeExistingPermissions));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void revoke(UserPermission userPermission) throws IOException {
|
||||||
|
get(admin.revoke(userPermission));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<UserPermission> getUserPermissions(
|
||||||
|
GetUserPermissionsRequest getUserPermissionsRequest) throws IOException {
|
||||||
|
return get(admin.getUserPermissions(getUserPermissionsRequest));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<Boolean> hasUserPermissions(String userName, List<Permission> permissions)
|
||||||
|
throws IOException {
|
||||||
|
return get(admin.hasUserPermissions(userName, permissions));
|
||||||
|
}
|
||||||
|
}
|
|
@ -102,7 +102,7 @@ class ConnectionOverAsyncConnection implements Connection {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Admin getAdmin() throws IOException {
|
public Admin getAdmin() throws IOException {
|
||||||
return oldConn.getAdmin();
|
return new AdminOverAsyncAdmin(this, (RawAsyncHBaseAdmin) conn.getAdmin());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
|
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
|
||||||
import org.apache.hadoop.hbase.ipc.HBaseRpcController;
|
import org.apache.hadoop.hbase.ipc.HBaseRpcController;
|
||||||
|
import org.apache.hadoop.hbase.ipc.ServerRpcController;
|
||||||
import org.apache.hadoop.hbase.security.User;
|
import org.apache.hadoop.hbase.security.User;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.ReflectionUtils;
|
import org.apache.hadoop.hbase.util.ReflectionUtils;
|
||||||
|
@ -715,4 +716,21 @@ public final class ConnectionUtils {
|
||||||
pool.shutdownNow();
|
pool.shutdownNow();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void setCoprocessorError(com.google.protobuf.RpcController controller, Throwable error) {
|
||||||
|
if (controller == null) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (controller instanceof ServerRpcController) {
|
||||||
|
if (error instanceof IOException) {
|
||||||
|
((ServerRpcController) controller).setFailedOn((IOException) error);
|
||||||
|
} else {
|
||||||
|
((ServerRpcController) controller).setFailedOn(new IOException(error));
|
||||||
|
}
|
||||||
|
} else if (controller instanceof ClientCoprocessorRpcController) {
|
||||||
|
((ClientCoprocessorRpcController) controller).setFailed(error);
|
||||||
|
} else {
|
||||||
|
controller.setFailed(error.toString());
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,68 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase.client;
|
||||||
|
|
||||||
|
import com.google.protobuf.RpcCallback;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InterruptedIOException;
|
||||||
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* For implementation coprocessor related methods in {@link Table} and {@link Admin} interface.
|
||||||
|
* @deprecated since 3.0.0, will be removed in 4.0.0 along with the coprocessor related methods in
|
||||||
|
* {@link Table} and {@link Admin} interface.
|
||||||
|
*/
|
||||||
|
@Deprecated
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
class CoprocessorBlockingRpcCallback<R> implements RpcCallback<R> {
|
||||||
|
private R result;
|
||||||
|
private boolean resultSet = false;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Called on completion of the RPC call with the response object, or {@code null} in the case of
|
||||||
|
* an error.
|
||||||
|
* @param parameter the response object or {@code null} if an error occurred
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public void run(R parameter) {
|
||||||
|
synchronized (this) {
|
||||||
|
result = parameter;
|
||||||
|
resultSet = true;
|
||||||
|
this.notifyAll();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the parameter passed to {@link #run(Object)} or {@code null} if a null value was
|
||||||
|
* passed. When used asynchronously, this method will block until the {@link #run(Object)} method
|
||||||
|
* has been called.
|
||||||
|
* @return the response object or {@code null} if no response was passed
|
||||||
|
*/
|
||||||
|
public synchronized R get() throws IOException {
|
||||||
|
while (!resultSet) {
|
||||||
|
try {
|
||||||
|
this.wait();
|
||||||
|
} catch (InterruptedException ie) {
|
||||||
|
InterruptedIOException exception = new InterruptedIOException(ie.getMessage());
|
||||||
|
exception.initCause(ie);
|
||||||
|
throw exception;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
}
|
|
@ -1156,10 +1156,10 @@ public class HBaseAdmin implements Admin {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void move(byte[] encodedRegionName) throws IOException {
|
public void move(byte[] encodedRegionName) throws IOException {
|
||||||
move(encodedRegionName, (ServerName) null);
|
move(encodedRegionName, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void move(final byte[] encodedRegionName, ServerName destServerName) throws IOException {
|
public void move(byte[] encodedRegionName, ServerName destServerName) throws IOException {
|
||||||
executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
|
executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
|
||||||
@Override
|
@Override
|
||||||
protected Void rpcCall() throws Exception {
|
protected Void rpcCall() throws Exception {
|
||||||
|
@ -3909,6 +3909,11 @@ public class HBaseAdmin implements Admin {
|
||||||
return splitRegionAsync(regionName, null);
|
return splitRegionAsync(regionName, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<RegionMetrics> getRegionMetrics(ServerName serverName) throws IOException {
|
||||||
|
return getRegionMetrics(serverName, null);
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Future<Void> createTableAsync(TableDescriptor desc) throws IOException {
|
public Future<Void> createTableAsync(TableDescriptor desc) throws IOException {
|
||||||
return createTableAsync(desc, null);
|
return createTableAsync(desc, null);
|
||||||
|
|
|
@ -360,7 +360,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
|
||||||
this.ng = connection.getNonceGenerator();
|
this.ng = connection.getNonceGenerator();
|
||||||
}
|
}
|
||||||
|
|
||||||
private <T> MasterRequestCallerBuilder<T> newMasterCaller() {
|
<T> MasterRequestCallerBuilder<T> newMasterCaller() {
|
||||||
return this.connection.callerFactory.<T> masterRequest()
|
return this.connection.callerFactory.<T> masterRequest()
|
||||||
.rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS)
|
.rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS)
|
||||||
.operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS)
|
.operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS)
|
||||||
|
@ -702,11 +702,6 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public CompletableFuture<Boolean> isTableAvailable(TableName tableName) {
|
public CompletableFuture<Boolean> isTableAvailable(TableName tableName) {
|
||||||
return isTableAvailable(tableName, Optional.empty());
|
|
||||||
}
|
|
||||||
|
|
||||||
private CompletableFuture<Boolean> isTableAvailable(TableName tableName,
|
|
||||||
Optional<byte[][]> splitKeys) {
|
|
||||||
if (TableName.isMetaTableName(tableName)) {
|
if (TableName.isMetaTableName(tableName)) {
|
||||||
return connection.registry.getMetaRegionLocation().thenApply(locs -> Stream
|
return connection.registry.getMetaRegionLocation().thenApply(locs -> Stream
|
||||||
.of(locs.getRegionLocations()).allMatch(loc -> loc != null && loc.getServerName() != null));
|
.of(locs.getRegionLocations()).allMatch(loc -> loc != null && loc.getServerName() != null));
|
||||||
|
@ -740,35 +735,13 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
|
||||||
future.complete(false);
|
future.complete(false);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
future.complete(true);
|
||||||
Optional<Boolean> available =
|
|
||||||
splitKeys.map(keys -> compareRegionsWithSplitKeys(locations, keys));
|
|
||||||
future.complete(available.orElse(true));
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
return future;
|
return future;
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean compareRegionsWithSplitKeys(List<HRegionLocation> locations, byte[][] splitKeys) {
|
|
||||||
int regionCount = 0;
|
|
||||||
for (HRegionLocation location : locations) {
|
|
||||||
RegionInfo info = location.getRegion();
|
|
||||||
if (Bytes.equals(info.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) {
|
|
||||||
regionCount++;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
for (byte[] splitKey : splitKeys) {
|
|
||||||
// Just check if the splitkey is available
|
|
||||||
if (Bytes.equals(info.getStartKey(), splitKey)) {
|
|
||||||
regionCount++;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return regionCount == splitKeys.length + 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public CompletableFuture<Void> addColumnFamily(TableName tableName, ColumnFamilyDescriptor columnFamily) {
|
public CompletableFuture<Void> addColumnFamily(TableName tableName, ColumnFamilyDescriptor columnFamily) {
|
||||||
return this.<AddColumnRequest, AddColumnResponse> procedureCall(tableName,
|
return this.<AddColumnRequest, AddColumnResponse> procedureCall(tableName,
|
||||||
|
@ -2004,10 +1977,8 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
|
||||||
LOG.error(
|
LOG.error(
|
||||||
"Unable to remove the failsafe snapshot: " + failSafeSnapshotSnapshotName,
|
"Unable to remove the failsafe snapshot: " + failSafeSnapshotSnapshotName,
|
||||||
err3);
|
err3);
|
||||||
future.completeExceptionally(err3);
|
|
||||||
} else {
|
|
||||||
future.complete(ret3);
|
|
||||||
}
|
}
|
||||||
|
future.complete(ret3);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
@ -3393,7 +3364,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
|
||||||
.call();
|
.call();
|
||||||
}
|
}
|
||||||
|
|
||||||
private <T> ServerRequestCallerBuilder<T> newServerCaller() {
|
<T> ServerRequestCallerBuilder<T> newServerCaller() {
|
||||||
return this.connection.callerFactory.<T> serverRequest()
|
return this.connection.callerFactory.<T> serverRequest()
|
||||||
.rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS)
|
.rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS)
|
||||||
.operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS)
|
.operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS)
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.client;
|
package org.apache.hadoop.hbase.client;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.hbase.client.ConnectionUtils.setCoprocessorError;
|
||||||
import static org.apache.hadoop.hbase.util.FutureUtils.addListener;
|
import static org.apache.hadoop.hbase.util.FutureUtils.addListener;
|
||||||
|
|
||||||
import com.google.protobuf.Descriptors.MethodDescriptor;
|
import com.google.protobuf.Descriptors.MethodDescriptor;
|
||||||
|
@ -32,7 +33,6 @@ import org.apache.hadoop.hbase.HRegionLocation;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
|
import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
|
||||||
import org.apache.hadoop.hbase.ipc.HBaseRpcController;
|
import org.apache.hadoop.hbase.ipc.HBaseRpcController;
|
||||||
import org.apache.hadoop.hbase.ipc.ServerRpcController;
|
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
|
||||||
|
@ -101,23 +101,6 @@ class RegionCoprocessorRpcChannelImpl implements RpcChannel {
|
||||||
return future;
|
return future;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected final void setError(RpcController controller, Throwable error) {
|
|
||||||
if (controller == null) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (controller instanceof ServerRpcController) {
|
|
||||||
if (error instanceof IOException) {
|
|
||||||
((ServerRpcController) controller).setFailedOn((IOException) error);
|
|
||||||
} else {
|
|
||||||
((ServerRpcController) controller).setFailedOn(new IOException(error));
|
|
||||||
}
|
|
||||||
} else if (controller instanceof ClientCoprocessorRpcController) {
|
|
||||||
((ClientCoprocessorRpcController) controller).setFailed(error);
|
|
||||||
} else {
|
|
||||||
controller.setFailed(error.toString());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void callMethod(MethodDescriptor method, RpcController controller, Message request,
|
public void callMethod(MethodDescriptor method, RpcController controller, Message request,
|
||||||
Message responsePrototype, RpcCallback<Message> done) {
|
Message responsePrototype, RpcCallback<Message> done) {
|
||||||
|
@ -128,7 +111,7 @@ class RegionCoprocessorRpcChannelImpl implements RpcChannel {
|
||||||
.action((c, l, s) -> rpcCall(method, request, responsePrototype, c, l, s)).call(),
|
.action((c, l, s) -> rpcCall(method, request, responsePrototype, c, l, s)).call(),
|
||||||
(r, e) -> {
|
(r, e) -> {
|
||||||
if (e != null) {
|
if (e != null) {
|
||||||
setError(controller, e);
|
setCoprocessorError(controller, e);
|
||||||
}
|
}
|
||||||
done.run(r);
|
done.run(r);
|
||||||
});
|
});
|
||||||
|
|
|
@ -36,7 +36,10 @@ import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
|
||||||
* call coprocessor endpoint {@link com.google.protobuf.Service}s.
|
* call coprocessor endpoint {@link com.google.protobuf.Service}s.
|
||||||
* Note that clients should not use this class directly, except through
|
* Note that clients should not use this class directly, except through
|
||||||
* {@link org.apache.hadoop.hbase.client.Table#coprocessorService(byte[])}.
|
* {@link org.apache.hadoop.hbase.client.Table#coprocessorService(byte[])}.
|
||||||
|
* @deprecated Please stop using this class again, as it is too low level, which is part of the rpc
|
||||||
|
* framework for HBase. Will be deleted in 4.0.0.
|
||||||
*/
|
*/
|
||||||
|
@Deprecated
|
||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Public
|
||||||
abstract class SyncCoprocessorRpcChannel implements CoprocessorRpcChannel {
|
abstract class SyncCoprocessorRpcChannel implements CoprocessorRpcChannel {
|
||||||
private static final Logger LOG = LoggerFactory.getLogger(SyncCoprocessorRpcChannel.class);
|
private static final Logger LOG = LoggerFactory.getLogger(SyncCoprocessorRpcChannel.class);
|
||||||
|
|
|
@ -17,6 +17,8 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.client;
|
package org.apache.hadoop.hbase.client;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.hbase.client.ConnectionUtils.setCoprocessorError;
|
||||||
|
|
||||||
import com.google.protobuf.Descriptors.MethodDescriptor;
|
import com.google.protobuf.Descriptors.MethodDescriptor;
|
||||||
import com.google.protobuf.Message;
|
import com.google.protobuf.Message;
|
||||||
import com.google.protobuf.RpcCallback;
|
import com.google.protobuf.RpcCallback;
|
||||||
|
@ -298,44 +300,7 @@ class TableOverAsyncTable implements Table {
|
||||||
public void close() {
|
public void close() {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static final class BlockingRpcCallback<R> implements RpcCallback<R> {
|
@SuppressWarnings("deprecation")
|
||||||
private R result;
|
|
||||||
private boolean resultSet = false;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Called on completion of the RPC call with the response object, or {@code null} in the case of
|
|
||||||
* an error.
|
|
||||||
* @param parameter the response object or {@code null} if an error occurred
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
public void run(R parameter) {
|
|
||||||
synchronized (this) {
|
|
||||||
result = parameter;
|
|
||||||
resultSet = true;
|
|
||||||
this.notifyAll();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the parameter passed to {@link #run(Object)} or {@code null} if a null value was
|
|
||||||
* passed. When used asynchronously, this method will block until the {@link #run(Object)}
|
|
||||||
* method has been called.
|
|
||||||
* @return the response object or {@code null} if no response was passed
|
|
||||||
*/
|
|
||||||
public synchronized R get() throws IOException {
|
|
||||||
while (!resultSet) {
|
|
||||||
try {
|
|
||||||
this.wait();
|
|
||||||
} catch (InterruptedException ie) {
|
|
||||||
InterruptedIOException exception = new InterruptedIOException(ie.getMessage());
|
|
||||||
exception.initCause(ie);
|
|
||||||
throw exception;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private static final class RegionCoprocessorRpcChannel extends RegionCoprocessorRpcChannelImpl
|
private static final class RegionCoprocessorRpcChannel extends RegionCoprocessorRpcChannelImpl
|
||||||
implements CoprocessorRpcChannel {
|
implements CoprocessorRpcChannel {
|
||||||
|
|
||||||
|
@ -348,17 +313,17 @@ class TableOverAsyncTable implements Table {
|
||||||
public void callMethod(MethodDescriptor method, RpcController controller, Message request,
|
public void callMethod(MethodDescriptor method, RpcController controller, Message request,
|
||||||
Message responsePrototype, RpcCallback<Message> done) {
|
Message responsePrototype, RpcCallback<Message> done) {
|
||||||
ClientCoprocessorRpcController c = new ClientCoprocessorRpcController();
|
ClientCoprocessorRpcController c = new ClientCoprocessorRpcController();
|
||||||
BlockingRpcCallback<Message> callback = new BlockingRpcCallback<>();
|
CoprocessorBlockingRpcCallback<Message> callback = new CoprocessorBlockingRpcCallback<>();
|
||||||
super.callMethod(method, c, request, responsePrototype, callback);
|
super.callMethod(method, c, request, responsePrototype, callback);
|
||||||
Message ret;
|
Message ret;
|
||||||
try {
|
try {
|
||||||
ret = callback.get();
|
ret = callback.get();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
setError(controller, e);
|
setCoprocessorError(controller, e);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (c.failed()) {
|
if (c.failed()) {
|
||||||
setError(controller, c.getFailed());
|
setCoprocessorError(controller, c.getFailed());
|
||||||
}
|
}
|
||||||
done.run(ret);
|
done.run(ret);
|
||||||
}
|
}
|
||||||
|
@ -367,7 +332,7 @@ class TableOverAsyncTable implements Table {
|
||||||
public Message callBlockingMethod(MethodDescriptor method, RpcController controller,
|
public Message callBlockingMethod(MethodDescriptor method, RpcController controller,
|
||||||
Message request, Message responsePrototype) throws ServiceException {
|
Message request, Message responsePrototype) throws ServiceException {
|
||||||
ClientCoprocessorRpcController c = new ClientCoprocessorRpcController();
|
ClientCoprocessorRpcController c = new ClientCoprocessorRpcController();
|
||||||
BlockingRpcCallback<Message> done = new BlockingRpcCallback<>();
|
CoprocessorBlockingRpcCallback<Message> done = new CoprocessorBlockingRpcCallback<>();
|
||||||
callMethod(method, c, request, responsePrototype, done);
|
callMethod(method, c, request, responsePrototype, done);
|
||||||
Message ret;
|
Message ret;
|
||||||
try {
|
try {
|
||||||
|
@ -376,7 +341,7 @@ class TableOverAsyncTable implements Table {
|
||||||
throw new ServiceException(e);
|
throw new ServiceException(e);
|
||||||
}
|
}
|
||||||
if (c.failed()) {
|
if (c.failed()) {
|
||||||
setError(controller, c.getFailed());
|
setCoprocessorError(controller, c.getFailed());
|
||||||
throw new ServiceException(c.getFailed());
|
throw new ServiceException(c.getFailed());
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -57,6 +57,8 @@ public class TestInterfaceAlign {
|
||||||
adminMethodNames.removeAll(getMethodNames(Abortable.class));
|
adminMethodNames.removeAll(getMethodNames(Abortable.class));
|
||||||
adminMethodNames.removeAll(getMethodNames(Closeable.class));
|
adminMethodNames.removeAll(getMethodNames(Closeable.class));
|
||||||
|
|
||||||
|
asyncAdminMethodNames.remove("coprocessorService");
|
||||||
|
|
||||||
adminMethodNames.forEach(method -> {
|
adminMethodNames.forEach(method -> {
|
||||||
boolean contains = asyncAdminMethodNames.contains(method);
|
boolean contains = asyncAdminMethodNames.contains(method);
|
||||||
if (method.endsWith("Async")) {
|
if (method.endsWith("Async")) {
|
||||||
|
|
|
@ -383,7 +383,11 @@ public class PerformanceEvaluation extends Configured implements Tool {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
admin.createTable(desc, splits);
|
if (splits != null) {
|
||||||
|
admin.createTable(desc, splits);
|
||||||
|
} else {
|
||||||
|
admin.createTable(desc);
|
||||||
|
}
|
||||||
LOG.info("Table " + desc + " created");
|
LOG.info("Table " + desc + " created");
|
||||||
}
|
}
|
||||||
return admin.tableExists(tableName);
|
return admin.tableExists(tableName);
|
||||||
|
|
|
@ -596,8 +596,8 @@ public class TestRemoteTable {
|
||||||
REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration());
|
REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration());
|
||||||
|
|
||||||
// Truncate the test table for inserting test scenarios rows keys
|
// Truncate the test table for inserting test scenarios rows keys
|
||||||
TEST_UTIL.getHBaseAdmin().disableTable(TABLE);
|
TEST_UTIL.getAdmin().disableTable(TABLE);
|
||||||
TEST_UTIL.getHBaseAdmin().truncateTable(TABLE, false);
|
TEST_UTIL.getAdmin().truncateTable(TABLE, false);
|
||||||
|
|
||||||
remoteTable = new RemoteHTable(
|
remoteTable = new RemoteHTable(
|
||||||
new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())),
|
new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())),
|
||||||
|
|
|
@ -74,7 +74,6 @@ import org.apache.hadoop.hbase.client.Consistency;
|
||||||
import org.apache.hadoop.hbase.client.Delete;
|
import org.apache.hadoop.hbase.client.Delete;
|
||||||
import org.apache.hadoop.hbase.client.Durability;
|
import org.apache.hadoop.hbase.client.Durability;
|
||||||
import org.apache.hadoop.hbase.client.Get;
|
import org.apache.hadoop.hbase.client.Get;
|
||||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
|
||||||
import org.apache.hadoop.hbase.client.Hbck;
|
import org.apache.hadoop.hbase.client.Hbck;
|
||||||
import org.apache.hadoop.hbase.client.ImmutableHRegionInfo;
|
import org.apache.hadoop.hbase.client.ImmutableHRegionInfo;
|
||||||
import org.apache.hadoop.hbase.client.ImmutableHTableDescriptor;
|
import org.apache.hadoop.hbase.client.ImmutableHTableDescriptor;
|
||||||
|
@ -1639,7 +1638,11 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
|
||||||
builder.setColumnFamily(cfdb.build());
|
builder.setColumnFamily(cfdb.build());
|
||||||
}
|
}
|
||||||
TableDescriptor td = builder.build();
|
TableDescriptor td = builder.build();
|
||||||
getAdmin().createTable(td, splitKeys);
|
if (splitKeys != null) {
|
||||||
|
getAdmin().createTable(td, splitKeys);
|
||||||
|
} else {
|
||||||
|
getAdmin().createTable(td);
|
||||||
|
}
|
||||||
// HBaseAdmin only waits for regions to appear in hbase:meta
|
// HBaseAdmin only waits for regions to appear in hbase:meta
|
||||||
// we should wait until they are assigned
|
// we should wait until they are assigned
|
||||||
waitUntilAllRegionsAssigned(td.getTableName());
|
waitUntilAllRegionsAssigned(td.getTableName());
|
||||||
|
@ -1662,7 +1665,11 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
|
||||||
.setNewVersionBehavior(true).build());
|
.setNewVersionBehavior(true).build());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
getAdmin().createTable(builder.build(), splitRows);
|
if (splitRows != null) {
|
||||||
|
getAdmin().createTable(builder.build(), splitRows);
|
||||||
|
} else {
|
||||||
|
getAdmin().createTable(builder.build());
|
||||||
|
}
|
||||||
// HBaseAdmin only waits for regions to appear in hbase:meta
|
// HBaseAdmin only waits for regions to appear in hbase:meta
|
||||||
// we should wait until they are assigned
|
// we should wait until they are assigned
|
||||||
waitUntilAllRegionsAssigned(htd.getTableName());
|
waitUntilAllRegionsAssigned(htd.getTableName());
|
||||||
|
@ -1731,7 +1738,11 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
|
||||||
}
|
}
|
||||||
desc.addFamily(hcd);
|
desc.addFamily(hcd);
|
||||||
}
|
}
|
||||||
getAdmin().createTable(desc, splitKeys);
|
if (splitKeys != null) {
|
||||||
|
getAdmin().createTable(desc, splitKeys);
|
||||||
|
} else {
|
||||||
|
getAdmin().createTable(desc);
|
||||||
|
}
|
||||||
// HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
|
// HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
|
||||||
// assigned
|
// assigned
|
||||||
waitUntilAllRegionsAssigned(tableName);
|
waitUntilAllRegionsAssigned(tableName);
|
||||||
|
@ -3094,37 +3105,18 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
|
||||||
this.asyncConnection = null;
|
this.asyncConnection = null;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns a Admin instance.
|
|
||||||
* This instance is shared between HBaseTestingUtility instance users. Closing it has no effect,
|
|
||||||
* it will be closed automatically when the cluster shutdowns
|
|
||||||
*
|
|
||||||
* @return HBaseAdmin instance which is guaranteed to support only {@link Admin} interface.
|
|
||||||
* Functions in HBaseAdmin not provided by {@link Admin} interface can be changed/deleted
|
|
||||||
* anytime.
|
|
||||||
* @deprecated Since 2.0. Will be removed in 3.0. Use {@link #getAdmin()} instead.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public synchronized HBaseAdmin getHBaseAdmin()
|
|
||||||
throws IOException {
|
|
||||||
if (hbaseAdmin == null){
|
|
||||||
this.hbaseAdmin = (HBaseAdmin) getConnection().getAdmin();
|
|
||||||
}
|
|
||||||
return hbaseAdmin;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns an Admin instance which is shared between HBaseTestingUtility instance users.
|
* Returns an Admin instance which is shared between HBaseTestingUtility instance users.
|
||||||
* Closing it has no effect, it will be closed automatically when the cluster shutdowns
|
* Closing it has no effect, it will be closed automatically when the cluster shutdowns
|
||||||
*/
|
*/
|
||||||
public synchronized Admin getAdmin() throws IOException {
|
public synchronized Admin getAdmin() throws IOException {
|
||||||
if (hbaseAdmin == null){
|
if (hbaseAdmin == null){
|
||||||
this.hbaseAdmin = (HBaseAdmin) getConnection().getAdmin();
|
this.hbaseAdmin = getConnection().getAdmin();
|
||||||
}
|
}
|
||||||
return hbaseAdmin;
|
return hbaseAdmin;
|
||||||
}
|
}
|
||||||
|
|
||||||
private HBaseAdmin hbaseAdmin = null;
|
private Admin hbaseAdmin = null;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns an {@link Hbck} instance. Needs be closed when done.
|
* Returns an {@link Hbck} instance. Needs be closed when done.
|
||||||
|
|
|
@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.testclassification.ClientTests;
|
||||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
|
import org.apache.hadoop.hbase.util.FutureUtils;
|
||||||
import org.apache.hadoop.hbase.util.Pair;
|
import org.apache.hadoop.hbase.util.Pair;
|
||||||
import org.apache.hadoop.hbase.util.Threads;
|
import org.apache.hadoop.hbase.util.Threads;
|
||||||
import org.junit.ClassRule;
|
import org.junit.ClassRule;
|
||||||
|
@ -273,7 +274,11 @@ public class TestAdmin1 extends TestAdminBase {
|
||||||
|
|
||||||
// Split the table
|
// Split the table
|
||||||
if (async) {
|
if (async) {
|
||||||
ADMIN.split(tableName, splitPoint);
|
if (splitPoint != null) {
|
||||||
|
ADMIN.split(tableName, splitPoint);
|
||||||
|
} else {
|
||||||
|
ADMIN.split(tableName);
|
||||||
|
}
|
||||||
final AtomicInteger count = new AtomicInteger(0);
|
final AtomicInteger count = new AtomicInteger(0);
|
||||||
Thread t = new Thread("CheckForSplit") {
|
Thread t = new Thread("CheckForSplit") {
|
||||||
@Override
|
@Override
|
||||||
|
@ -391,7 +396,8 @@ public class TestAdmin1 extends TestAdminBase {
|
||||||
// the element at index 1 would be a replica (since the metareader gives us ordered
|
// the element at index 1 would be a replica (since the metareader gives us ordered
|
||||||
// regions). Try splitting that region via the split API . Should fail
|
// regions). Try splitting that region via the split API . Should fail
|
||||||
try {
|
try {
|
||||||
TEST_UTIL.getAdmin().splitRegionAsync(regions.get(1).getFirst().getRegionName()).get();
|
FutureUtils.get(
|
||||||
|
TEST_UTIL.getAdmin().splitRegionAsync(regions.get(1).getFirst().getRegionName()));
|
||||||
} catch (IllegalArgumentException ex) {
|
} catch (IllegalArgumentException ex) {
|
||||||
gotException = true;
|
gotException = true;
|
||||||
}
|
}
|
||||||
|
@ -401,9 +407,9 @@ public class TestAdmin1 extends TestAdminBase {
|
||||||
// regions). Try splitting that region via a different split API (the difference is
|
// regions). Try splitting that region via a different split API (the difference is
|
||||||
// this API goes direct to the regionserver skipping any checks in the admin). Should fail
|
// this API goes direct to the regionserver skipping any checks in the admin). Should fail
|
||||||
try {
|
try {
|
||||||
TEST_UTIL.getHBaseAdmin().splitRegionAsync(regions.get(1).getFirst(),
|
FutureUtils.get(TEST_UTIL.getAdmin().splitRegionAsync(
|
||||||
new byte[] { (byte) '1' });
|
regions.get(1).getFirst().getEncodedNameAsBytes(), new byte[] { (byte) '1' }));
|
||||||
} catch (IOException ex) {
|
} catch (IllegalArgumentException ex) {
|
||||||
gotException = true;
|
gotException = true;
|
||||||
}
|
}
|
||||||
assertTrue(gotException);
|
assertTrue(gotException);
|
||||||
|
@ -411,9 +417,8 @@ public class TestAdmin1 extends TestAdminBase {
|
||||||
gotException = false;
|
gotException = false;
|
||||||
// testing Sync split operation
|
// testing Sync split operation
|
||||||
try {
|
try {
|
||||||
TEST_UTIL.getAdmin()
|
FutureUtils.get(TEST_UTIL.getAdmin()
|
||||||
.splitRegionAsync(regions.get(1).getFirst().getRegionName(), new byte[] { (byte) '1' })
|
.splitRegionAsync(regions.get(1).getFirst().getRegionName(), new byte[] { (byte) '1' }));
|
||||||
.get();
|
|
||||||
} catch (IllegalArgumentException ex) {
|
} catch (IllegalArgumentException ex) {
|
||||||
gotException = true;
|
gotException = true;
|
||||||
}
|
}
|
||||||
|
@ -422,8 +427,10 @@ public class TestAdmin1 extends TestAdminBase {
|
||||||
gotException = false;
|
gotException = false;
|
||||||
// Try merging a replica with another. Should fail.
|
// Try merging a replica with another. Should fail.
|
||||||
try {
|
try {
|
||||||
TEST_UTIL.getAdmin().mergeRegionsAsync(regions.get(1).getFirst().getEncodedNameAsBytes(),
|
FutureUtils.get(TEST_UTIL.getAdmin().mergeRegionsAsync(
|
||||||
regions.get(2).getFirst().getEncodedNameAsBytes(), true).get();
|
regions.get(1).getFirst().getEncodedNameAsBytes(),
|
||||||
|
regions.get(2).getFirst().getEncodedNameAsBytes(),
|
||||||
|
true));
|
||||||
} catch (IllegalArgumentException m) {
|
} catch (IllegalArgumentException m) {
|
||||||
gotException = true;
|
gotException = true;
|
||||||
}
|
}
|
||||||
|
@ -435,8 +442,8 @@ public class TestAdmin1 extends TestAdminBase {
|
||||||
nameofRegionsToMerge[1] = regions.get(2).getFirst().getEncodedNameAsBytes();
|
nameofRegionsToMerge[1] = regions.get(2).getFirst().getEncodedNameAsBytes();
|
||||||
MergeTableRegionsRequest request = RequestConverter.buildMergeTableRegionsRequest(
|
MergeTableRegionsRequest request = RequestConverter.buildMergeTableRegionsRequest(
|
||||||
nameofRegionsToMerge, true, HConstants.NO_NONCE, HConstants.NO_NONCE);
|
nameofRegionsToMerge, true, HConstants.NO_NONCE, HConstants.NO_NONCE);
|
||||||
((ConnectionImplementation) TEST_UTIL.getAdmin().getConnection()).getMaster()
|
TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterRpcServices().mergeTableRegions(null,
|
||||||
.mergeTableRegions(null, request);
|
request);
|
||||||
} catch (org.apache.hbase.thirdparty.com.google.protobuf.ServiceException m) {
|
} catch (org.apache.hbase.thirdparty.com.google.protobuf.ServiceException m) {
|
||||||
Throwable t = m.getCause();
|
Throwable t = m.getCause();
|
||||||
do {
|
do {
|
||||||
|
@ -568,24 +575,24 @@ public class TestAdmin1 extends TestAdminBase {
|
||||||
List<RegionInfo> tableRegions = ADMIN.getRegions(tableName);
|
List<RegionInfo> tableRegions = ADMIN.getRegions(tableName);
|
||||||
// 0
|
// 0
|
||||||
try {
|
try {
|
||||||
ADMIN.mergeRegionsAsync(new byte[0][0], false).get();
|
FutureUtils.get(ADMIN.mergeRegionsAsync(new byte[0][0], false));
|
||||||
fail();
|
fail();
|
||||||
} catch (IllegalArgumentException e) {
|
} catch (IllegalArgumentException e) {
|
||||||
// expected
|
// expected
|
||||||
}
|
}
|
||||||
// 1
|
// 1
|
||||||
try {
|
try {
|
||||||
ADMIN.mergeRegionsAsync(new byte[][] { tableRegions.get(0).getEncodedNameAsBytes() }, false)
|
FutureUtils.get(ADMIN
|
||||||
.get();
|
.mergeRegionsAsync(new byte[][] { tableRegions.get(0).getEncodedNameAsBytes() }, false));
|
||||||
fail();
|
fail();
|
||||||
} catch (IllegalArgumentException e) {
|
} catch (IllegalArgumentException e) {
|
||||||
// expected
|
// expected
|
||||||
}
|
}
|
||||||
// 3
|
// 3
|
||||||
try {
|
try {
|
||||||
ADMIN.mergeRegionsAsync(
|
FutureUtils.get(ADMIN.mergeRegionsAsync(
|
||||||
tableRegions.stream().map(RegionInfo::getEncodedNameAsBytes).toArray(byte[][]::new),
|
tableRegions.stream().map(RegionInfo::getEncodedNameAsBytes).toArray(byte[][]::new),
|
||||||
false).get();
|
false));
|
||||||
fail();
|
fail();
|
||||||
} catch (DoNotRetryIOException e) {
|
} catch (DoNotRetryIOException e) {
|
||||||
// expected
|
// expected
|
||||||
|
|
|
@ -35,7 +35,6 @@ import org.apache.hadoop.hbase.ClusterMetrics.Option;
|
||||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HRegionLocation;
|
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
|
@ -57,7 +56,6 @@ import org.apache.hadoop.hbase.regionserver.HStore;
|
||||||
import org.apache.hadoop.hbase.testclassification.ClientTests;
|
import org.apache.hadoop.hbase.testclassification.ClientTests;
|
||||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.Pair;
|
|
||||||
import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
|
import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.ClassRule;
|
import org.junit.ClassRule;
|
||||||
|
@ -169,7 +167,7 @@ public class TestAdmin2 extends TestAdminBase {
|
||||||
// Use 80 bit numbers to make sure we aren't limited
|
// Use 80 bit numbers to make sure we aren't limited
|
||||||
byte [] startKey = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
|
byte [] startKey = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
|
||||||
byte [] endKey = { 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 };
|
byte [] endKey = { 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 };
|
||||||
Admin hbaseadmin = TEST_UTIL.getHBaseAdmin();
|
Admin hbaseadmin = TEST_UTIL.getAdmin();
|
||||||
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
|
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
|
||||||
htd.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
|
htd.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
|
||||||
hbaseadmin.createTable(htd, startKey, endKey, expectedRegions);
|
hbaseadmin.createTable(htd, startKey, endKey, expectedRegions);
|
||||||
|
@ -349,14 +347,14 @@ public class TestAdmin2 extends TestAdminBase {
|
||||||
isInList);
|
isInList);
|
||||||
}
|
}
|
||||||
|
|
||||||
private HBaseAdmin createTable(TableName tableName) throws IOException {
|
private Admin createTable(TableName tableName) throws IOException {
|
||||||
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
|
Admin admin = TEST_UTIL.getAdmin();
|
||||||
|
|
||||||
HTableDescriptor htd = new HTableDescriptor(tableName);
|
HTableDescriptor htd = new HTableDescriptor(tableName);
|
||||||
HColumnDescriptor hcd = new HColumnDescriptor("value");
|
HColumnDescriptor hcd = new HColumnDescriptor("value");
|
||||||
|
|
||||||
htd.addFamily(hcd);
|
htd.addFamily(hcd);
|
||||||
admin.createTable(htd, null);
|
admin.createTable(htd);
|
||||||
return admin;
|
return admin;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -369,7 +367,7 @@ public class TestAdmin2 extends TestAdminBase {
|
||||||
HColumnDescriptor hcd = new HColumnDescriptor("value");
|
HColumnDescriptor hcd = new HColumnDescriptor("value");
|
||||||
htd.addFamily(hcd);
|
htd.addFamily(hcd);
|
||||||
|
|
||||||
ADMIN.createTable(htd, null);
|
ADMIN.createTable(htd);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -545,7 +543,7 @@ public class TestAdmin2 extends TestAdminBase {
|
||||||
new HTableDescriptor(TableName.valueOf(Bytes.toBytes(name.getMethodName())));
|
new HTableDescriptor(TableName.valueOf(Bytes.toBytes(name.getMethodName())));
|
||||||
HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes("cf1"));
|
HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes("cf1"));
|
||||||
htd.addFamily(hcd);
|
htd.addFamily(hcd);
|
||||||
TEST_UTIL.getHBaseAdmin().createTable(htd);
|
TEST_UTIL.getAdmin().createTable(htd);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -563,27 +561,6 @@ public class TestAdmin2 extends TestAdminBase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testGetRegion() throws Exception {
|
|
||||||
// We use actual HBaseAdmin instance instead of going via Admin interface in
|
|
||||||
// here because makes use of an internal HBA method (TODO: Fix.).
|
|
||||||
HBaseAdmin rawAdmin = TEST_UTIL.getHBaseAdmin();
|
|
||||||
|
|
||||||
final TableName tableName = TableName.valueOf(name.getMethodName());
|
|
||||||
LOG.info("Started " + tableName);
|
|
||||||
Table t = TEST_UTIL.createMultiRegionTable(tableName, HConstants.CATALOG_FAMILY);
|
|
||||||
|
|
||||||
try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
|
|
||||||
HRegionLocation regionLocation = locator.getRegionLocation(Bytes.toBytes("mmm"));
|
|
||||||
RegionInfo region = regionLocation.getRegion();
|
|
||||||
byte[] regionName = region.getRegionName();
|
|
||||||
Pair<RegionInfo, ServerName> pair = rawAdmin.getRegion(regionName);
|
|
||||||
assertTrue(Bytes.equals(regionName, pair.getFirst().getRegionName()));
|
|
||||||
pair = rawAdmin.getRegion(region.getEncodedNameAsBytes());
|
|
||||||
assertTrue(Bytes.equals(regionName, pair.getFirst().getRegionName()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testBalancer() throws Exception {
|
public void testBalancer() throws Exception {
|
||||||
boolean initialState = ADMIN.isBalancerEnabled();
|
boolean initialState = ADMIN.isBalancerEnabled();
|
||||||
|
|
|
@ -4244,8 +4244,7 @@ public class TestFromClientSide {
|
||||||
final TableName tableName = TableName.valueOf(name.getMethodName());
|
final TableName tableName = TableName.valueOf(name.getMethodName());
|
||||||
TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY);
|
TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY);
|
||||||
try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())) {
|
try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())) {
|
||||||
try (Table t = conn.getTable(tableName);
|
try (Table t = conn.getTable(tableName); Admin admin = conn.getAdmin()) {
|
||||||
Admin admin = conn.getAdmin()) {
|
|
||||||
assertTrue(admin.tableExists(tableName));
|
assertTrue(admin.tableExists(tableName));
|
||||||
assertTrue(t.get(new Get(ROW)).isEmpty());
|
assertTrue(t.get(new Get(ROW)).isEmpty());
|
||||||
}
|
}
|
||||||
|
@ -4264,8 +4263,8 @@ public class TestFromClientSide {
|
||||||
boolean tablesOnMaster = LoadBalancer.isTablesOnMaster(TEST_UTIL.getConfiguration());
|
boolean tablesOnMaster = LoadBalancer.isTablesOnMaster(TEST_UTIL.getConfiguration());
|
||||||
try (Admin admin = conn.getAdmin()) {
|
try (Admin admin = conn.getAdmin()) {
|
||||||
assertTrue(admin.tableExists(tableName));
|
assertTrue(admin.tableExists(tableName));
|
||||||
assertTrue(admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS))
|
assertTrue(admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics()
|
||||||
.getLiveServerMetrics().size() == SLAVES + (tablesOnMaster ? 1 : 0));
|
.size() == SLAVES + (tablesOnMaster ? 1 : 0));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -272,13 +272,13 @@ public class TestFromClientSide3 {
|
||||||
TEST_UTIL.getConfiguration().setInt("hbase.hstore.compaction.min", 3);
|
TEST_UTIL.getConfiguration().setInt("hbase.hstore.compaction.min", 3);
|
||||||
|
|
||||||
final TableName tableName = TableName.valueOf(name.getMethodName());
|
final TableName tableName = TableName.valueOf(name.getMethodName());
|
||||||
try (Table hTable = TEST_UTIL.createTable(tableName, FAMILY, 10)) {
|
try (Table table = TEST_UTIL.createTable(tableName, FAMILY, 10)) {
|
||||||
TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS);
|
TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS);
|
||||||
Admin admin = TEST_UTIL.getAdmin();
|
Admin admin = TEST_UTIL.getAdmin();
|
||||||
|
|
||||||
// Create 3 store files.
|
// Create 3 store files.
|
||||||
byte[] row = Bytes.toBytes(random.nextInt());
|
byte[] row = Bytes.toBytes(random.nextInt());
|
||||||
performMultiplePutAndFlush(admin, hTable, row, FAMILY, 3, 100);
|
performMultiplePutAndFlush(admin, table, row, FAMILY, 3, 100);
|
||||||
|
|
||||||
try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
|
try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
|
||||||
// Verify we have multiple store files.
|
// Verify we have multiple store files.
|
||||||
|
@ -304,13 +304,13 @@ public class TestFromClientSide3 {
|
||||||
|
|
||||||
// change the compaction.min config option for this table to 5
|
// change the compaction.min config option for this table to 5
|
||||||
LOG.info("hbase.hstore.compaction.min should now be 5");
|
LOG.info("hbase.hstore.compaction.min should now be 5");
|
||||||
HTableDescriptor htd = new HTableDescriptor(hTable.getDescriptor());
|
HTableDescriptor htd = new HTableDescriptor(table.getDescriptor());
|
||||||
htd.setValue("hbase.hstore.compaction.min", String.valueOf(5));
|
htd.setValue("hbase.hstore.compaction.min", String.valueOf(5));
|
||||||
admin.modifyTable(htd);
|
admin.modifyTable(htd);
|
||||||
LOG.info("alter status finished");
|
LOG.info("alter status finished");
|
||||||
|
|
||||||
// Create 3 more store files.
|
// Create 3 more store files.
|
||||||
performMultiplePutAndFlush(admin, hTable, row, FAMILY, 3, 10);
|
performMultiplePutAndFlush(admin, table, row, FAMILY, 3, 10);
|
||||||
|
|
||||||
// Issue a compaction request
|
// Issue a compaction request
|
||||||
admin.compact(tableName);
|
admin.compact(tableName);
|
||||||
|
@ -357,7 +357,7 @@ public class TestFromClientSide3 {
|
||||||
htd.modifyFamily(hcd);
|
htd.modifyFamily(hcd);
|
||||||
admin.modifyTable(htd);
|
admin.modifyTable(htd);
|
||||||
LOG.info("alter status finished");
|
LOG.info("alter status finished");
|
||||||
assertNull(hTable.getDescriptor().getColumnFamily(FAMILY)
|
assertNull(table.getDescriptor().getColumnFamily(FAMILY)
|
||||||
.getValue(Bytes.toBytes("hbase.hstore.compaction.min")));
|
.getValue(Bytes.toBytes("hbase.hstore.compaction.min")));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -50,10 +50,11 @@ public class TestSnapshotDFSTemporaryDirectory
|
||||||
*
|
*
|
||||||
* @throws Exception on failure
|
* @throws Exception on failure
|
||||||
*/
|
*/
|
||||||
@BeforeClass public static void setupCluster() throws Exception {
|
@BeforeClass
|
||||||
|
public static void setupCluster() throws Exception {
|
||||||
setupConf(UTIL.getConfiguration());
|
setupConf(UTIL.getConfiguration());
|
||||||
UTIL.startMiniCluster(NUM_RS);
|
UTIL.startMiniCluster(NUM_RS);
|
||||||
admin = UTIL.getHBaseAdmin();
|
admin = UTIL.getAdmin();
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void setupConf(Configuration conf) throws IOException {
|
private static void setupConf(Configuration conf) throws IOException {
|
||||||
|
|
|
@ -104,7 +104,7 @@ public class TestSnapshotTemporaryDirectory {
|
||||||
public static void setupCluster() throws Exception {
|
public static void setupCluster() throws Exception {
|
||||||
setupConf(UTIL.getConfiguration());
|
setupConf(UTIL.getConfiguration());
|
||||||
UTIL.startMiniCluster(NUM_RS);
|
UTIL.startMiniCluster(NUM_RS);
|
||||||
admin = UTIL.getHBaseAdmin();
|
admin = UTIL.getAdmin();
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void setupConf(Configuration conf) {
|
private static void setupConf(Configuration conf) {
|
||||||
|
@ -139,7 +139,7 @@ public class TestSnapshotTemporaryDirectory {
|
||||||
@After
|
@After
|
||||||
public void tearDown() throws Exception {
|
public void tearDown() throws Exception {
|
||||||
UTIL.deleteTable(TABLE_NAME);
|
UTIL.deleteTable(TABLE_NAME);
|
||||||
SnapshotTestingUtils.deleteAllSnapshots(UTIL.getHBaseAdmin());
|
SnapshotTestingUtils.deleteAllSnapshots(UTIL.getAdmin());
|
||||||
SnapshotTestingUtils.deleteArchiveDirectory(UTIL);
|
SnapshotTestingUtils.deleteArchiveDirectory(UTIL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -290,7 +290,7 @@ public class TestSnapshotTemporaryDirectory {
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testOfflineTableSnapshot() throws Exception {
|
public void testOfflineTableSnapshot() throws Exception {
|
||||||
Admin admin = UTIL.getHBaseAdmin();
|
Admin admin = UTIL.getAdmin();
|
||||||
// make sure we don't fail on listing snapshots
|
// make sure we don't fail on listing snapshots
|
||||||
SnapshotTestingUtils.assertNoSnapshots(admin);
|
SnapshotTestingUtils.assertNoSnapshots(admin);
|
||||||
|
|
||||||
|
|
|
@ -55,17 +55,11 @@ public class TestSplitOrMergeStatus {
|
||||||
@Rule
|
@Rule
|
||||||
public TestName name = new TestName();
|
public TestName name = new TestName();
|
||||||
|
|
||||||
/**
|
|
||||||
* @throws java.lang.Exception
|
|
||||||
*/
|
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
public static void setUpBeforeClass() throws Exception {
|
public static void setUpBeforeClass() throws Exception {
|
||||||
TEST_UTIL.startMiniCluster(2);
|
TEST_UTIL.startMiniCluster(2);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @throws java.lang.Exception
|
|
||||||
*/
|
|
||||||
@AfterClass
|
@AfterClass
|
||||||
public static void tearDownAfterClass() throws Exception {
|
public static void tearDownAfterClass() throws Exception {
|
||||||
TEST_UTIL.shutdownMiniCluster();
|
TEST_UTIL.shutdownMiniCluster();
|
||||||
|
@ -84,7 +78,12 @@ public class TestSplitOrMergeStatus {
|
||||||
initSwitchStatus(admin);
|
initSwitchStatus(admin);
|
||||||
boolean result = admin.splitSwitch(false, false);
|
boolean result = admin.splitSwitch(false, false);
|
||||||
assertTrue(result);
|
assertTrue(result);
|
||||||
admin.split(t.getName());
|
try {
|
||||||
|
admin.split(t.getName());
|
||||||
|
fail();
|
||||||
|
} catch (IOException e) {
|
||||||
|
// expected
|
||||||
|
}
|
||||||
int count = admin.getRegions(tableName).size();
|
int count = admin.getRegions(tableName).size();
|
||||||
assertTrue(originalCount == count);
|
assertTrue(originalCount == count);
|
||||||
result = admin.splitSwitch(true, false);
|
result = admin.splitSwitch(true, false);
|
||||||
|
|
|
@ -1342,7 +1342,7 @@ public class TestMasterObserver {
|
||||||
List<HRegionLocation> regions = regionLocator.getAllRegionLocations();
|
List<HRegionLocation> regions = regionLocator.getAllRegionLocations();
|
||||||
|
|
||||||
admin.mergeRegionsAsync(regions.get(0).getRegion().getEncodedNameAsBytes(),
|
admin.mergeRegionsAsync(regions.get(0).getRegion().getEncodedNameAsBytes(),
|
||||||
regions.get(1).getRegion().getEncodedNameAsBytes(), true);
|
regions.get(1).getRegion().getEncodedNameAsBytes(), true).get();
|
||||||
assertTrue("Coprocessor should have been called on region merge",
|
assertTrue("Coprocessor should have been called on region merge",
|
||||||
cp.wasMergeRegionsCalled());
|
cp.wasMergeRegionsCalled());
|
||||||
|
|
||||||
|
|
|
@ -190,7 +190,7 @@ public class TestMaster {
|
||||||
HColumnDescriptor hcd = new HColumnDescriptor("value");
|
HColumnDescriptor hcd = new HColumnDescriptor("value");
|
||||||
htd.addFamily(hcd);
|
htd.addFamily(hcd);
|
||||||
|
|
||||||
admin.createTable(htd, null);
|
admin.createTable(htd);
|
||||||
try {
|
try {
|
||||||
RegionInfo hri = RegionInfoBuilder.newBuilder(tableName)
|
RegionInfo hri = RegionInfoBuilder.newBuilder(tableName)
|
||||||
.setStartKey(Bytes.toBytes("A"))
|
.setStartKey(Bytes.toBytes("A"))
|
||||||
|
@ -213,7 +213,7 @@ public class TestMaster {
|
||||||
HColumnDescriptor hcd = new HColumnDescriptor("value");
|
HColumnDescriptor hcd = new HColumnDescriptor("value");
|
||||||
htd.addFamily(hcd);
|
htd.addFamily(hcd);
|
||||||
|
|
||||||
admin.createTable(htd, null);
|
admin.createTable(htd);
|
||||||
try {
|
try {
|
||||||
List<RegionInfo> tableRegions = admin.getRegions(tableName);
|
List<RegionInfo> tableRegions = admin.getRegions(tableName);
|
||||||
|
|
||||||
|
|
|
@ -126,7 +126,7 @@ public class TestMasterMetricsWrapper {
|
||||||
HTableDescriptor desc = new HTableDescriptor(table);
|
HTableDescriptor desc = new HTableDescriptor(table);
|
||||||
byte[] FAMILY = Bytes.toBytes("FAMILY");
|
byte[] FAMILY = Bytes.toBytes("FAMILY");
|
||||||
desc.addFamily(new HColumnDescriptor(FAMILY));
|
desc.addFamily(new HColumnDescriptor(FAMILY));
|
||||||
TEST_UTIL.getHBaseAdmin().createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), 5);
|
TEST_UTIL.getAdmin().createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), 5);
|
||||||
|
|
||||||
// wait till the table is assigned
|
// wait till the table is assigned
|
||||||
long timeoutTime = System.currentTimeMillis() + 1000;
|
long timeoutTime = System.currentTimeMillis() + 1000;
|
||||||
|
@ -148,7 +148,7 @@ public class TestMasterMetricsWrapper {
|
||||||
assertEquals(5, regionNumberPair.getFirst().intValue());
|
assertEquals(5, regionNumberPair.getFirst().intValue());
|
||||||
assertEquals(0, regionNumberPair.getSecond().intValue());
|
assertEquals(0, regionNumberPair.getSecond().intValue());
|
||||||
|
|
||||||
TEST_UTIL.getHBaseAdmin().offline(hri.getRegionName());
|
TEST_UTIL.getAdmin().offline(hri.getRegionName());
|
||||||
|
|
||||||
timeoutTime = System.currentTimeMillis() + 800;
|
timeoutTime = System.currentTimeMillis() + 800;
|
||||||
RegionStates regionStates = master.getAssignmentManager().getRegionStates();
|
RegionStates regionStates = master.getAssignmentManager().getRegionStates();
|
||||||
|
|
|
@ -69,7 +69,7 @@ public class TestMergeTableRegionsWhileRSCrash {
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
public static void setupCluster() throws Exception {
|
public static void setupCluster() throws Exception {
|
||||||
UTIL.startMiniCluster(1);
|
UTIL.startMiniCluster(1);
|
||||||
admin = UTIL.getHBaseAdmin();
|
admin = UTIL.getAdmin();
|
||||||
byte[][] splitKeys = new byte[1][];
|
byte[][] splitKeys = new byte[1][];
|
||||||
splitKeys[0] = SPLITKEY;
|
splitKeys[0] = SPLITKEY;
|
||||||
TABLE = UTIL.createTable(TABLE_NAME, CF, splitKeys);
|
TABLE = UTIL.createTable(TABLE_NAME, CF, splitKeys);
|
||||||
|
|
|
@ -66,7 +66,7 @@ public class TestSplitRegionWhileRSCrash {
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
public static void setupCluster() throws Exception {
|
public static void setupCluster() throws Exception {
|
||||||
UTIL.startMiniCluster(1);
|
UTIL.startMiniCluster(1);
|
||||||
admin = UTIL.getHBaseAdmin();
|
admin = UTIL.getAdmin();
|
||||||
TABLE = UTIL.createTable(TABLE_NAME, CF);
|
TABLE = UTIL.createTable(TABLE_NAME, CF);
|
||||||
UTIL.waitTableAvailable(TABLE_NAME);
|
UTIL.waitTableAvailable(TABLE_NAME);
|
||||||
}
|
}
|
||||||
|
|
|
@ -106,7 +106,7 @@ public class TestAssignmentOnRSCrash {
|
||||||
throws Exception {
|
throws Exception {
|
||||||
final int NROWS = 100;
|
final int NROWS = 100;
|
||||||
int nkilled = 0;
|
int nkilled = 0;
|
||||||
for (RegionInfo hri: UTIL.getHBaseAdmin().getRegions(TEST_TABLE)) {
|
for (RegionInfo hri: UTIL.getAdmin().getRegions(TEST_TABLE)) {
|
||||||
ServerName serverName = AssignmentTestingUtil.getServerHoldingRegion(UTIL, hri);
|
ServerName serverName = AssignmentTestingUtil.getServerHoldingRegion(UTIL, hri);
|
||||||
if (AssignmentTestingUtil.isServerHoldingMeta(UTIL, serverName)) continue;
|
if (AssignmentTestingUtil.isServerHoldingMeta(UTIL, serverName)) continue;
|
||||||
|
|
||||||
|
|
|
@ -68,7 +68,7 @@ public class TestMasterAbortWhileMergingTable {
|
||||||
UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
|
UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
|
||||||
MergeRegionObserver.class.getName());
|
MergeRegionObserver.class.getName());
|
||||||
UTIL.startMiniCluster(3);
|
UTIL.startMiniCluster(3);
|
||||||
admin = UTIL.getHBaseAdmin();
|
admin = UTIL.getAdmin();
|
||||||
byte[][] splitKeys = new byte[1][];
|
byte[][] splitKeys = new byte[1][];
|
||||||
splitKeys[0] = SPLITKEY;
|
splitKeys[0] = SPLITKEY;
|
||||||
UTIL.createTable(TABLE_NAME, CF, splitKeys);
|
UTIL.createTable(TABLE_NAME, CF, splitKeys);
|
||||||
|
|
|
@ -67,7 +67,7 @@ public class TestModifyTableWhileMerging {
|
||||||
//Set procedure executor thread to 1, making reproducing this issue of HBASE-20921 easier
|
//Set procedure executor thread to 1, making reproducing this issue of HBASE-20921 easier
|
||||||
UTIL.getConfiguration().setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1);
|
UTIL.getConfiguration().setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1);
|
||||||
UTIL.startMiniCluster(1);
|
UTIL.startMiniCluster(1);
|
||||||
admin = UTIL.getHBaseAdmin();
|
admin = UTIL.getAdmin();
|
||||||
byte[][] splitKeys = new byte[1][];
|
byte[][] splitKeys = new byte[1][];
|
||||||
splitKeys[0] = SPLITKEY;
|
splitKeys[0] = SPLITKEY;
|
||||||
client = UTIL.createTable(TABLE_NAME, CF, splitKeys);
|
client = UTIL.createTable(TABLE_NAME, CF, splitKeys);
|
||||||
|
|
|
@ -24,13 +24,12 @@ import static org.junit.Assert.assertNotNull;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
import org.apache.hadoop.hbase.client.Admin;
|
||||||
import org.apache.hadoop.hbase.client.Delete;
|
import org.apache.hadoop.hbase.client.Delete;
|
||||||
import org.apache.hadoop.hbase.client.Get;
|
import org.apache.hadoop.hbase.client.Get;
|
||||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
|
||||||
import org.apache.hadoop.hbase.client.Put;
|
import org.apache.hadoop.hbase.client.Put;
|
||||||
import org.apache.hadoop.hbase.client.Result;
|
import org.apache.hadoop.hbase.client.Result;
|
||||||
import org.apache.hadoop.hbase.client.ResultScanner;
|
import org.apache.hadoop.hbase.client.ResultScanner;
|
||||||
|
@ -40,7 +39,6 @@ import org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner;
|
||||||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration;
|
import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration;
|
||||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
import org.junit.ClassRule;
|
import org.junit.ClassRule;
|
||||||
|
@ -78,7 +76,7 @@ public class TestCleanupCompactedFileOnRegionClose {
|
||||||
byte[] familyNameBytes = Bytes.toBytes(familyName);
|
byte[] familyNameBytes = Bytes.toBytes(familyName);
|
||||||
util.createTable(tableName, familyName);
|
util.createTable(tableName, familyName);
|
||||||
|
|
||||||
HBaseAdmin hBaseAdmin = util.getHBaseAdmin();
|
Admin hBaseAdmin = util.getAdmin();
|
||||||
Table table = util.getConnection().getTable(tableName);
|
Table table = util.getConnection().getTable(tableName);
|
||||||
|
|
||||||
HRegionServer rs = util.getRSForFirstRegionInTable(tableName);
|
HRegionServer rs = util.getRSForFirstRegionInTable(tableName);
|
||||||
|
|
|
@ -162,7 +162,7 @@ public class TestRegionServerAbort {
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testStopOverrideFromCoprocessor() throws Exception {
|
public void testStopOverrideFromCoprocessor() throws Exception {
|
||||||
Admin admin = testUtil.getHBaseAdmin();
|
Admin admin = testUtil.getAdmin();
|
||||||
HRegionServer regionserver = cluster.getRegionServer(0);
|
HRegionServer regionserver = cluster.getRegionServer(0);
|
||||||
admin.stopRegionServer(regionserver.getServerName().getHostAndPort());
|
admin.stopRegionServer(regionserver.getServerName().getHostAndPort());
|
||||||
|
|
||||||
|
|
|
@ -28,7 +28,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.Waiter;
|
import org.apache.hadoop.hbase.Waiter;
|
||||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
import org.apache.hadoop.hbase.client.Admin;
|
||||||
import org.apache.hadoop.hbase.client.Put;
|
import org.apache.hadoop.hbase.client.Put;
|
||||||
import org.apache.hadoop.hbase.ipc.RpcServer;
|
import org.apache.hadoop.hbase.ipc.RpcServer;
|
||||||
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
|
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
|
||||||
|
@ -165,7 +165,7 @@ public class TestReplicator extends TestReplicationBase {
|
||||||
}
|
}
|
||||||
|
|
||||||
private void truncateTable(HBaseTestingUtility util, TableName tablename) throws IOException {
|
private void truncateTable(HBaseTestingUtility util, TableName tablename) throws IOException {
|
||||||
HBaseAdmin admin = util.getHBaseAdmin();
|
Admin admin = util.getAdmin();
|
||||||
admin.disableTable(tableName);
|
admin.disableTable(tableName);
|
||||||
admin.truncateTable(tablename, false);
|
admin.truncateTable(tablename, false);
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,7 +21,6 @@ import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertFalse;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
import com.google.protobuf.ServiceException;
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
|
@ -74,7 +73,6 @@ import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
|
||||||
|
|
||||||
|
@ -270,33 +268,6 @@ public final class SnapshotTestingUtils {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Helper method for testing async snapshot operations. Just waits for the
|
|
||||||
* given snapshot to complete on the server by repeatedly checking the master.
|
|
||||||
*
|
|
||||||
* @param master the master running the snapshot
|
|
||||||
* @param snapshot the snapshot to check
|
|
||||||
* @param sleep amount to sleep between checks to see if the snapshot is done
|
|
||||||
* @throws ServiceException if the snapshot fails
|
|
||||||
* @throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException
|
|
||||||
*/
|
|
||||||
public static void waitForSnapshotToComplete(HMaster master,
|
|
||||||
SnapshotProtos.SnapshotDescription snapshot, long sleep)
|
|
||||||
throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException {
|
|
||||||
final IsSnapshotDoneRequest request = IsSnapshotDoneRequest.newBuilder()
|
|
||||||
.setSnapshot(snapshot).build();
|
|
||||||
IsSnapshotDoneResponse done = IsSnapshotDoneResponse.newBuilder()
|
|
||||||
.buildPartial();
|
|
||||||
while (!done.getDone()) {
|
|
||||||
done = master.getMasterRpcServices().isSnapshotDone(null, request);
|
|
||||||
try {
|
|
||||||
Thread.sleep(sleep);
|
|
||||||
} catch (InterruptedException e) {
|
|
||||||
throw new org.apache.hbase.thirdparty.com.google.protobuf.ServiceException(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Take snapshot with maximum of numTries attempts, ignoring CorruptedSnapshotException
|
* Take snapshot with maximum of numTries attempts, ignoring CorruptedSnapshotException
|
||||||
* except for the last CorruptedSnapshotException
|
* except for the last CorruptedSnapshotException
|
||||||
|
|
|
@ -28,6 +28,8 @@ import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.concurrent.CountDownLatch;
|
import java.util.concurrent.CountDownLatch;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
import java.util.concurrent.TimeoutException;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
@ -57,7 +59,11 @@ import org.junit.experimental.categories.Category;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
||||||
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
|
||||||
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -273,6 +279,38 @@ public class TestFlushSnapshotFromClient {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Helper method for testing async snapshot operations. Just waits for the given snapshot to
|
||||||
|
* complete on the server by repeatedly checking the master.
|
||||||
|
* @param master the master running the snapshot
|
||||||
|
* @param snapshot the snapshot to check
|
||||||
|
* @param sleep amount to sleep between checks to see if the snapshot is done
|
||||||
|
*/
|
||||||
|
private static void waitForSnapshotToComplete(HMaster master,
|
||||||
|
SnapshotProtos.SnapshotDescription snapshot, long timeoutNanos) throws Exception {
|
||||||
|
final IsSnapshotDoneRequest request =
|
||||||
|
IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot).build();
|
||||||
|
long start = System.nanoTime();
|
||||||
|
while (System.nanoTime() - start < timeoutNanos) {
|
||||||
|
try {
|
||||||
|
IsSnapshotDoneResponse done = master.getMasterRpcServices().isSnapshotDone(null, request);
|
||||||
|
if (done.getDone()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
} catch (ServiceException e) {
|
||||||
|
// ignore UnknownSnapshotException, this is possible as for AsyncAdmin, the method will
|
||||||
|
// return immediately after sending out the request, no matter whether the master has
|
||||||
|
// processed the request or not.
|
||||||
|
if (!(e.getCause() instanceof UnknownSnapshotException)) {
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Thread.sleep(200);
|
||||||
|
}
|
||||||
|
throw new TimeoutException("Timeout waiting for snapshot " + snapshot + " to complete");
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testAsyncFlushSnapshot() throws Exception {
|
public void testAsyncFlushSnapshot() throws Exception {
|
||||||
SnapshotProtos.SnapshotDescription snapshot = SnapshotProtos.SnapshotDescription.newBuilder()
|
SnapshotProtos.SnapshotDescription snapshot = SnapshotProtos.SnapshotDescription.newBuilder()
|
||||||
|
@ -285,7 +323,7 @@ public class TestFlushSnapshotFromClient {
|
||||||
|
|
||||||
// constantly loop, looking for the snapshot to complete
|
// constantly loop, looking for the snapshot to complete
|
||||||
HMaster master = UTIL.getMiniHBaseCluster().getMaster();
|
HMaster master = UTIL.getMiniHBaseCluster().getMaster();
|
||||||
SnapshotTestingUtils.waitForSnapshotToComplete(master, snapshot, 200);
|
waitForSnapshotToComplete(master, snapshot, TimeUnit.MINUTES.toNanos(1));
|
||||||
LOG.info(" === Async Snapshot Completed ===");
|
LOG.info(" === Async Snapshot Completed ===");
|
||||||
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG);
|
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG);
|
||||||
|
|
||||||
|
@ -524,7 +562,6 @@ public class TestFlushSnapshotFromClient {
|
||||||
SnapshotTestingUtils.waitForTableToBeOnline(UTIL, TABLE_NAME);
|
SnapshotTestingUtils.waitForTableToBeOnline(UTIL, TABLE_NAME);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
protected void verifyRowCount(final HBaseTestingUtility util, final TableName tableName,
|
protected void verifyRowCount(final HBaseTestingUtility util, final TableName tableName,
|
||||||
long expectedRows) throws IOException {
|
long expectedRows) throws IOException {
|
||||||
SnapshotTestingUtils.verifyRowCount(util, tableName, expectedRows);
|
SnapshotTestingUtils.verifyRowCount(util, tableName, expectedRows);
|
||||||
|
|
|
@ -335,7 +335,11 @@ public class TestBulkLoadHFiles {
|
||||||
|
|
||||||
TableName tableName = htd.getTableName();
|
TableName tableName = htd.getTableName();
|
||||||
if (!util.getAdmin().tableExists(tableName) && (preCreateTable || map != null)) {
|
if (!util.getAdmin().tableExists(tableName) && (preCreateTable || map != null)) {
|
||||||
util.getAdmin().createTable(htd, tableSplitKeys);
|
if (tableSplitKeys != null) {
|
||||||
|
util.getAdmin().createTable(htd, tableSplitKeys);
|
||||||
|
} else {
|
||||||
|
util.getAdmin().createTable(htd);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Configuration conf = util.getConfiguration();
|
Configuration conf = util.getConfiguration();
|
||||||
|
|
|
@ -626,7 +626,11 @@ public class ThriftHBaseServiceHandler extends HBaseServiceHandler implements TH
|
||||||
try {
|
try {
|
||||||
TableDescriptor descriptor = tableDescriptorFromThrift(desc);
|
TableDescriptor descriptor = tableDescriptorFromThrift(desc);
|
||||||
byte[][] split = splitKeyFromThrift(splitKeys);
|
byte[][] split = splitKeyFromThrift(splitKeys);
|
||||||
connectionCache.getAdmin().createTable(descriptor, split);
|
if (split != null) {
|
||||||
|
connectionCache.getAdmin().createTable(descriptor, split);
|
||||||
|
} else {
|
||||||
|
connectionCache.getAdmin().createTable(descriptor);
|
||||||
|
}
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw getTIOError(e);
|
throw getTIOError(e);
|
||||||
}
|
}
|
||||||
|
|
|
@ -719,6 +719,11 @@ public class ThriftAdmin implements Admin {
|
||||||
throw new NotImplementedException("getClusterMetrics not supported in ThriftAdmin");
|
throw new NotImplementedException("getClusterMetrics not supported in ThriftAdmin");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<RegionMetrics> getRegionMetrics(ServerName serverName) {
|
||||||
|
throw new NotImplementedException("getRegionMetrics not supported in ThriftAdmin");
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public List<RegionMetrics> getRegionMetrics(ServerName serverName, TableName tableName) {
|
public List<RegionMetrics> getRegionMetrics(ServerName serverName, TableName tableName) {
|
||||||
throw new NotImplementedException("getRegionMetrics not supported in ThriftAdmin");
|
throw new NotImplementedException("getRegionMetrics not supported in ThriftAdmin");
|
||||||
|
@ -1127,8 +1132,8 @@ public class ThriftAdmin implements Admin {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public List<UserPermission>
|
public List<UserPermission> getUserPermissions(
|
||||||
getUserPermissions(GetUserPermissionsRequest getUserPermissionsRequest) {
|
GetUserPermissionsRequest getUserPermissionsRequest) {
|
||||||
throw new NotImplementedException("getUserPermissions not supported in ThriftAdmin");
|
throw new NotImplementedException("getUserPermissions not supported in ThriftAdmin");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue