HBASE-14767 - Remove deprecated functions from HBaseAdmin Deprecated in HBASE-12083 (1.0.0, 2.0.0, 0.99.2) - HBaseAdmin(Configuration)
Deprecated in HBASE-10479 (0.99.0, hbase-10070) - HBaseAdmin(Connection) Deprecated in HBASE-11826 (0.99.0, 1.0.0, 2.0.0) - flush() - compact() - majorCompact() - split() - getCompactionState() - Changes many declarations from HBaseAdmin to Admin (Apekshit) Signed-off-by: stack <stack@apache.org>
This commit is contained in:
parent
9cce912de0
commit
08963189a2
|
@ -210,52 +210,17 @@ public class HBaseAdmin implements Admin {
|
|||
private final int retryLongerMultiplier;
|
||||
private final int syncWaitTimeout;
|
||||
private boolean aborted;
|
||||
private boolean cleanupConnectionOnClose = false; // close the connection in close()
|
||||
private boolean closed = false;
|
||||
private int operationTimeout;
|
||||
|
||||
private RpcRetryingCallerFactory rpcCallerFactory;
|
||||
|
||||
private NonceGenerator ng;
|
||||
|
||||
/**
|
||||
* Constructor.
|
||||
* See {@link #HBaseAdmin(Connection connection)}
|
||||
*
|
||||
* @param c Configuration object. Copied internally.
|
||||
* @deprecated Constructing HBaseAdmin objects manually has been deprecated.
|
||||
* Use {@link Connection#getAdmin()} to obtain an instance of {@link Admin} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public HBaseAdmin(Configuration c)
|
||||
throws MasterNotRunningException, ZooKeeperConnectionException, IOException {
|
||||
this(ConnectionFactory.createConnection(new Configuration(c)));
|
||||
this.cleanupConnectionOnClose = true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getOperationTimeout() {
|
||||
return operationTimeout;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Constructor for externally managed Connections.
|
||||
* The connection to master will be created when required by admin functions.
|
||||
*
|
||||
* @param connection The Connection instance to use
|
||||
* @throws MasterNotRunningException
|
||||
* @throws ZooKeeperConnectionException are not
|
||||
* thrown anymore but kept into the interface for backward api compatibility
|
||||
* @deprecated Constructing HBaseAdmin objects manually has been deprecated.
|
||||
* Use {@link Connection#getAdmin()} to obtain an instance of {@link Admin} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public HBaseAdmin(Connection connection)
|
||||
throws MasterNotRunningException, ZooKeeperConnectionException {
|
||||
this((ClusterConnection)connection);
|
||||
}
|
||||
|
||||
HBaseAdmin(ClusterConnection connection) {
|
||||
this.conf = connection.getConfiguration();
|
||||
this.connection = connection;
|
||||
|
@ -1854,31 +1819,6 @@ public class HBaseAdmin implements Admin {
|
|||
flush(regionServerPair.getSecond(), regionServerPair.getFirst());
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link #flush(org.apache.hadoop.hbase.TableName)} or {@link #flushRegion
|
||||
* (byte[])} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public void flush(final String tableNameOrRegionName)
|
||||
throws IOException, InterruptedException {
|
||||
flush(Bytes.toBytes(tableNameOrRegionName));
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link #flush(org.apache.hadoop.hbase.TableName)} or {@link #flushRegion
|
||||
* (byte[])} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public void flush(final byte[] tableNameOrRegionName)
|
||||
throws IOException, InterruptedException {
|
||||
try {
|
||||
flushRegion(tableNameOrRegionName);
|
||||
} catch (IllegalArgumentException e) {
|
||||
// Unknown region. Try table.
|
||||
flush(TableName.valueOf(tableNameOrRegionName));
|
||||
}
|
||||
}
|
||||
|
||||
private void flush(final ServerName sn, final HRegionInfo hri)
|
||||
throws IOException {
|
||||
AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
|
||||
|
@ -1909,30 +1849,6 @@ public class HBaseAdmin implements Admin {
|
|||
compactRegion(regionName, null, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link #compact(org.apache.hadoop.hbase.TableName)} or {@link #compactRegion
|
||||
* (byte[])} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public void compact(final String tableNameOrRegionName)
|
||||
throws IOException {
|
||||
compact(Bytes.toBytes(tableNameOrRegionName));
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link #compact(org.apache.hadoop.hbase.TableName)} or {@link #compactRegion
|
||||
* (byte[])} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public void compact(final byte[] tableNameOrRegionName)
|
||||
throws IOException {
|
||||
try {
|
||||
compactRegion(tableNameOrRegionName, null, false);
|
||||
} catch (IllegalArgumentException e) {
|
||||
compact(TableName.valueOf(tableNameOrRegionName), null, false, CompactType.NORMAL);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
|
@ -1951,31 +1867,6 @@ public class HBaseAdmin implements Admin {
|
|||
compactRegion(regionName, columnFamily, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link #compact(org.apache.hadoop.hbase.TableName)} or {@link #compactRegion
|
||||
* (byte[], byte[])} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public void compact(String tableOrRegionName, String columnFamily)
|
||||
throws IOException {
|
||||
compact(Bytes.toBytes(tableOrRegionName), Bytes.toBytes(columnFamily));
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link #compact(org.apache.hadoop.hbase.TableName)} or {@link #compactRegion
|
||||
* (byte[], byte[])} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public void compact(final byte[] tableNameOrRegionName, final byte[] columnFamily)
|
||||
throws IOException {
|
||||
try {
|
||||
compactRegion(tableNameOrRegionName, columnFamily, false);
|
||||
} catch (IllegalArgumentException e) {
|
||||
// Bad region, try table
|
||||
compact(TableName.valueOf(tableNameOrRegionName), columnFamily, false, CompactType.NORMAL);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
|
@ -2005,31 +1896,6 @@ public class HBaseAdmin implements Admin {
|
|||
compactRegion(regionName, null, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link #majorCompact(org.apache.hadoop.hbase.TableName)} or {@link
|
||||
* #majorCompactRegion(byte[])} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public void majorCompact(final String tableNameOrRegionName)
|
||||
throws IOException {
|
||||
majorCompact(Bytes.toBytes(tableNameOrRegionName));
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link #majorCompact(org.apache.hadoop.hbase.TableName)} or {@link
|
||||
* #majorCompactRegion(byte[])} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public void majorCompact(final byte[] tableNameOrRegionName)
|
||||
throws IOException {
|
||||
try {
|
||||
compactRegion(tableNameOrRegionName, null, true);
|
||||
} catch (IllegalArgumentException e) {
|
||||
// Invalid region, try table
|
||||
compact(TableName.valueOf(tableNameOrRegionName), null, true, CompactType.NORMAL);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
|
@ -2048,31 +1914,6 @@ public class HBaseAdmin implements Admin {
|
|||
compactRegion(regionName, columnFamily, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link #majorCompact(org.apache.hadoop.hbase.TableName,
|
||||
* byte[])} or {@link #majorCompactRegion(byte[], byte[])} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public void majorCompact(final String tableNameOrRegionName, final String columnFamily)
|
||||
throws IOException {
|
||||
majorCompact(Bytes.toBytes(tableNameOrRegionName), Bytes.toBytes(columnFamily));
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link #majorCompact(org.apache.hadoop.hbase.TableName,
|
||||
* byte[])} or {@link #majorCompactRegion(byte[], byte[])} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public void majorCompact(final byte[] tableNameOrRegionName, final byte[] columnFamily)
|
||||
throws IOException {
|
||||
try {
|
||||
compactRegion(tableNameOrRegionName, columnFamily, true);
|
||||
} catch (IllegalArgumentException e) {
|
||||
// Invalid region, try table
|
||||
compact(TableName.valueOf(tableNameOrRegionName), columnFamily, true, CompactType.NORMAL);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Compact a table.
|
||||
* Asynchronous operation.
|
||||
|
@ -2478,26 +2319,6 @@ public class HBaseAdmin implements Admin {
|
|||
splitRegion(regionName, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link #split(org.apache.hadoop.hbase.TableName)} or {@link #splitRegion
|
||||
* (byte[])} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public void split(final String tableNameOrRegionName)
|
||||
throws IOException, InterruptedException {
|
||||
split(Bytes.toBytes(tableNameOrRegionName));
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link #split(org.apache.hadoop.hbase.TableName)} or {@link #splitRegion
|
||||
* (byte[])} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public void split(final byte[] tableNameOrRegionName)
|
||||
throws IOException, InterruptedException {
|
||||
split(tableNameOrRegionName, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
|
@ -2555,31 +2376,6 @@ public class HBaseAdmin implements Admin {
|
|||
split(regionServerPair.getSecond(), regionServerPair.getFirst(), splitPoint);
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link #split(org.apache.hadoop.hbase.TableName,
|
||||
* byte[])} or {@link #splitRegion(byte[], byte[])} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public void split(final String tableNameOrRegionName,
|
||||
final String splitPoint) throws IOException {
|
||||
split(Bytes.toBytes(tableNameOrRegionName), Bytes.toBytes(splitPoint));
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link #split(org.apache.hadoop.hbase.TableName,
|
||||
* byte[])} or {@link #splitRegion(byte[], byte[])} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public void split(final byte[] tableNameOrRegionName,
|
||||
final byte [] splitPoint) throws IOException {
|
||||
try {
|
||||
splitRegion(tableNameOrRegionName, splitPoint);
|
||||
} catch (IllegalArgumentException e) {
|
||||
// Bad region, try table
|
||||
split(TableName.valueOf(tableNameOrRegionName), splitPoint);
|
||||
}
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public void split(final ServerName sn, final HRegionInfo hri,
|
||||
byte[] splitPoint) throws IOException {
|
||||
|
@ -3077,10 +2873,6 @@ public class HBaseAdmin implements Admin {
|
|||
|
||||
@Override
|
||||
public synchronized void close() throws IOException {
|
||||
if (cleanupConnectionOnClose && this.connection != null && !this.closed) {
|
||||
this.connection.close();
|
||||
this.closed = true;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3239,31 +3031,6 @@ public class HBaseAdmin implements Admin {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link #getCompactionState(org.apache.hadoop.hbase.TableName)} or {@link
|
||||
* #getCompactionStateForRegion(byte[])} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public CompactionState getCompactionState(final String tableNameOrRegionName)
|
||||
throws IOException, InterruptedException {
|
||||
return getCompactionState(Bytes.toBytes(tableNameOrRegionName));
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link #getCompactionState(org.apache.hadoop.hbase.TableName)} or {@link
|
||||
* #getCompactionStateForRegion(byte[])} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public CompactionState getCompactionState(final byte[] tableNameOrRegionName)
|
||||
throws IOException, InterruptedException {
|
||||
try {
|
||||
return getCompactionStateForRegion(tableNameOrRegionName);
|
||||
} catch (IllegalArgumentException e) {
|
||||
// Invalid region, try table
|
||||
return getCompactionState(TableName.valueOf(tableNameOrRegionName));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Take a snapshot for the given table. If the table is enabled, a FLUSH-type snapshot will be
|
||||
* taken. If the table is disabled, an offline snapshot is taken.
|
||||
|
|
|
@ -31,6 +31,9 @@ import org.apache.hadoop.hbase.HConstants;
|
|||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
@ -95,7 +98,8 @@ public class ExpiredMobFileCleaner extends Configured implements Tool {
|
|||
String familyName = args[1];
|
||||
TableName tn = TableName.valueOf(tableName);
|
||||
HBaseAdmin.checkHBaseAvailable(getConf());
|
||||
HBaseAdmin admin = new HBaseAdmin(getConf());
|
||||
Connection connection = ConnectionFactory.createConnection(getConf());
|
||||
Admin admin = connection.getAdmin();
|
||||
try {
|
||||
HTableDescriptor htd = admin.getTableDescriptor(tn);
|
||||
HColumnDescriptor family = htd.getFamily(Bytes.toBytes(familyName));
|
||||
|
@ -114,6 +118,11 @@ public class ExpiredMobFileCleaner extends Configured implements Tool {
|
|||
} catch (IOException e) {
|
||||
LOG.error("Failed to close the HBaseAdmin.", e);
|
||||
}
|
||||
try {
|
||||
connection.close();
|
||||
} catch (IOException e) {
|
||||
LOG.error("Failed to close the connection.", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -29,6 +29,9 @@ import org.apache.hadoop.hbase.HTableDescriptor;
|
|||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.util.Tool;
|
||||
|
@ -65,7 +68,8 @@ public class Sweeper extends Configured implements Tool {
|
|||
Configuration conf = getConf();
|
||||
// make sure the target HBase exists.
|
||||
HBaseAdmin.checkHBaseAvailable(conf);
|
||||
HBaseAdmin admin = new HBaseAdmin(conf);
|
||||
Connection connection = ConnectionFactory.createConnection(getConf());
|
||||
Admin admin = connection.getAdmin();
|
||||
try {
|
||||
FileSystem fs = FileSystem.get(conf);
|
||||
TableName tn = TableName.valueOf(tableName);
|
||||
|
@ -86,6 +90,11 @@ public class Sweeper extends Configured implements Tool {
|
|||
} catch (IOException e) {
|
||||
System.out.println("Failed to close the HBaseAdmin: " + e.getMessage());
|
||||
}
|
||||
try {
|
||||
connection.close();
|
||||
} catch (IOException e) {
|
||||
System.out.println("Failed to close the connection: " + e.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -58,6 +58,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
|||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.BufferedMutator;
|
||||
import org.apache.hadoop.hbase.client.ClusterConnection;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
||||
import org.apache.hadoop.hbase.client.Consistency;
|
||||
|
@ -1176,7 +1177,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
|
|||
*/
|
||||
public void shutdownMiniHBaseCluster() throws IOException {
|
||||
if (hbaseAdmin != null) {
|
||||
hbaseAdmin.close0();
|
||||
hbaseAdmin.close();
|
||||
hbaseAdmin = null;
|
||||
}
|
||||
|
||||
|
@ -2693,28 +2694,13 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
|
|||
public synchronized HBaseAdmin getHBaseAdmin()
|
||||
throws IOException {
|
||||
if (hbaseAdmin == null){
|
||||
this.hbaseAdmin = new HBaseAdminForTests(getConnection());
|
||||
this.hbaseAdmin = (HBaseAdmin) getConnection().getAdmin();
|
||||
}
|
||||
return hbaseAdmin;
|
||||
}
|
||||
|
||||
private HBaseAdminForTests hbaseAdmin = null;
|
||||
private static class HBaseAdminForTests extends HBaseAdmin {
|
||||
public HBaseAdminForTests(Connection connection) throws MasterNotRunningException,
|
||||
ZooKeeperConnectionException, IOException {
|
||||
super(connection);
|
||||
}
|
||||
private HBaseAdmin hbaseAdmin = null;
|
||||
|
||||
@Override
|
||||
public synchronized void close() throws IOException {
|
||||
LOG.warn("close() called on HBaseAdmin instance returned from " +
|
||||
"HBaseTestingUtility.getHBaseAdmin()");
|
||||
}
|
||||
|
||||
private synchronized void close0() throws IOException {
|
||||
super.close();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a ZooKeeperWatcher instance.
|
||||
|
|
|
@ -28,10 +28,10 @@ import org.apache.commons.logging.LogFactory;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.MultithreadedTestUtil.RepeatingTestThread;
|
||||
import org.apache.hadoop.hbase.MultithreadedTestUtil.TestContext;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.ResultScanner;
|
||||
|
@ -304,7 +304,7 @@ public class TestAcidGuarantees implements Tool {
|
|||
}
|
||||
// Add a flusher
|
||||
ctx.addThread(new RepeatingTestThread(ctx) {
|
||||
HBaseAdmin admin = util.getHBaseAdmin();
|
||||
Admin admin = util.getHBaseAdmin();
|
||||
public void doAnAction() throws Exception {
|
||||
try {
|
||||
admin.flush(TABLE_NAME);
|
||||
|
|
|
@ -307,7 +307,7 @@ public class TestIOFencing {
|
|||
assertTrue(compactingRegion.countStoreFiles() > 1);
|
||||
final byte REGION_NAME[] = compactingRegion.getRegionInfo().getRegionName();
|
||||
LOG.info("Asking for compaction");
|
||||
((HBaseAdmin)admin).majorCompact(TABLE_NAME.getName());
|
||||
admin.majorCompact(TABLE_NAME);
|
||||
LOG.info("Waiting for compaction to be about to start");
|
||||
compactingRegion.waitForCompactionToBlock();
|
||||
LOG.info("Starting a new server");
|
||||
|
@ -348,7 +348,7 @@ public class TestIOFencing {
|
|||
// If we survive the split keep going...
|
||||
// Now we make sure that the region isn't totally confused. Load up more rows.
|
||||
TEST_UTIL.loadNumericRows(table, FAMILY, FIRST_BATCH_COUNT, FIRST_BATCH_COUNT + SECOND_BATCH_COUNT);
|
||||
((HBaseAdmin)admin).majorCompact(TABLE_NAME.getName());
|
||||
admin.majorCompact(TABLE_NAME);
|
||||
startWaitTime = System.currentTimeMillis();
|
||||
while (newRegion.compactCount == 0) {
|
||||
Thread.sleep(1000);
|
||||
|
|
|
@ -1203,7 +1203,7 @@ public class TestAdmin1 {
|
|||
// the element at index 1 would be a replica (since the metareader gives us ordered
|
||||
// regions). Try splitting that region via the split API . Should fail
|
||||
try {
|
||||
TEST_UTIL.getHBaseAdmin().split(regions.get(1).getFirst().getRegionName());
|
||||
TEST_UTIL.getHBaseAdmin().splitRegion(regions.get(1).getFirst().getRegionName());
|
||||
} catch (IllegalArgumentException ex) {
|
||||
gotException = true;
|
||||
}
|
||||
|
|
|
@ -465,10 +465,10 @@ public class TestAdmin2 {
|
|||
onlineRegions.contains(info));
|
||||
}
|
||||
|
||||
private HBaseAdmin createTable(byte[] TABLENAME) throws IOException {
|
||||
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
|
||||
private Admin createTable(TableName tableName) throws IOException {
|
||||
Admin admin = TEST_UTIL.getHBaseAdmin();
|
||||
|
||||
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLENAME));
|
||||
HTableDescriptor htd = new HTableDescriptor(tableName);
|
||||
HColumnDescriptor hcd = new HColumnDescriptor("value");
|
||||
|
||||
htd.addFamily(hcd);
|
||||
|
@ -517,10 +517,10 @@ public class TestAdmin2 {
|
|||
|
||||
@Test (timeout=300000)
|
||||
public void testMoveToPreviouslyAssignedRS() throws IOException, InterruptedException {
|
||||
byte[] tableName = Bytes.toBytes("testMoveToPreviouslyAssignedRS");
|
||||
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
|
||||
HMaster master = cluster.getMaster();
|
||||
HBaseAdmin localAdmin = createTable(tableName);
|
||||
TableName tableName = TableName.valueOf("testMoveToPreviouslyAssignedRS");
|
||||
Admin localAdmin = createTable(tableName);
|
||||
List<HRegionInfo> tableRegions = localAdmin.getTableRegions(tableName);
|
||||
HRegionInfo hri = tableRegions.get(0);
|
||||
AssignmentManager am = master.getAssignmentManager();
|
||||
|
|
|
@ -551,7 +551,7 @@ public class TestFromClientSide {
|
|||
private List<HRegionLocation> splitTable(final Table t)
|
||||
throws IOException, InterruptedException {
|
||||
// Split this table in two.
|
||||
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
|
||||
Admin admin = TEST_UTIL.getHBaseAdmin();
|
||||
admin.split(t.getName());
|
||||
admin.close();
|
||||
List<HRegionLocation> regions = waitOnSplit(t);
|
||||
|
@ -1661,7 +1661,7 @@ public class TestFromClientSide {
|
|||
|
||||
@Test
|
||||
public void testDeleteFamilyVersion() throws Exception {
|
||||
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
|
||||
Admin admin = TEST_UTIL.getHBaseAdmin();
|
||||
TableName TABLE = TableName.valueOf("testDeleteFamilyVersion");
|
||||
|
||||
byte [][] QUALIFIERS = makeNAscii(QUALIFIER, 1);
|
||||
|
@ -1706,7 +1706,7 @@ public class TestFromClientSide {
|
|||
byte [][] VALUES = makeN(VALUE, 5);
|
||||
long [] ts = {1000, 2000, 3000, 4000, 5000};
|
||||
|
||||
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
|
||||
Admin admin = TEST_UTIL.getHBaseAdmin();
|
||||
Table ht = TEST_UTIL.createTable(TABLE, FAMILY, 5);
|
||||
Put put = null;
|
||||
Result result = null;
|
||||
|
@ -3546,7 +3546,7 @@ public class TestFromClientSide {
|
|||
|
||||
TableName TABLE = TableName.valueOf("testUpdatesWithMajorCompaction");
|
||||
Table hTable = TEST_UTIL.createTable(TABLE, FAMILY, 10);
|
||||
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
|
||||
Admin admin = TEST_UTIL.getHBaseAdmin();
|
||||
|
||||
// Write a column with values at timestamp 1, 2 and 3
|
||||
byte[] row = Bytes.toBytes("row2");
|
||||
|
@ -3605,10 +3605,9 @@ public class TestFromClientSide {
|
|||
@Test
|
||||
public void testMajorCompactionBetweenTwoUpdates() throws Exception {
|
||||
|
||||
String tableName = "testMajorCompactionBetweenTwoUpdates";
|
||||
TableName TABLE = TableName.valueOf(tableName);
|
||||
Table hTable = TEST_UTIL.createTable(TABLE, FAMILY, 10);
|
||||
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
|
||||
TableName tableName = TableName.valueOf("testMajorCompactionBetweenTwoUpdates");
|
||||
Table hTable = TEST_UTIL.createTable(tableName, FAMILY, 10);
|
||||
Admin admin = TEST_UTIL.getHBaseAdmin();
|
||||
|
||||
// Write a column with values at timestamp 1, 2 and 3
|
||||
byte[] row = Bytes.toBytes("row3");
|
||||
|
@ -4048,10 +4047,10 @@ public class TestFromClientSide {
|
|||
TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY);
|
||||
Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
|
||||
Table t = conn.getTable(tableName);
|
||||
HBaseAdmin ha = new HBaseAdmin(conn);
|
||||
assertTrue(ha.tableExists(tableName));
|
||||
Admin admin = conn.getAdmin();
|
||||
assertTrue(admin.tableExists(tableName));
|
||||
assertTrue(t.get(new Get(ROW)).isEmpty());
|
||||
ha.close();
|
||||
admin.close();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -4066,8 +4065,8 @@ public class TestFromClientSide {
|
|||
TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY);
|
||||
Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
|
||||
Table t = conn.getTable(tableName);
|
||||
try (HBaseAdmin ha = new HBaseAdmin(conn)) {
|
||||
assertTrue(ha.tableExists(tableName));
|
||||
try (Admin admin = conn.getAdmin()) {
|
||||
assertTrue(admin.tableExists(tableName));
|
||||
assertTrue(t.get(new Get(ROW)).isEmpty());
|
||||
}
|
||||
|
||||
|
@ -4081,10 +4080,10 @@ public class TestFromClientSide {
|
|||
assertTrue(cluster.waitForActiveAndReadyMaster());
|
||||
|
||||
// test that the same unmanaged connection works with a new
|
||||
// HBaseAdmin and can connect to the new master;
|
||||
try (HBaseAdmin newAdmin = new HBaseAdmin(conn)) {
|
||||
assertTrue(newAdmin.tableExists(tableName));
|
||||
assertTrue(newAdmin.getClusterStatus().getServersSize() == SLAVES + 1);
|
||||
// Admin and can connect to the new master;
|
||||
try (Admin admin = conn.getAdmin()) {
|
||||
assertTrue(admin.tableExists(tableName));
|
||||
assertTrue(admin.getClusterStatus().getServersSize() == SLAVES + 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -6350,7 +6349,7 @@ public class TestFromClientSide {
|
|||
HColumnDescriptor fam = new HColumnDescriptor(FAMILY);
|
||||
htd.addFamily(fam);
|
||||
byte[][] KEYS = HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE;
|
||||
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
|
||||
Admin admin = TEST_UTIL.getHBaseAdmin();
|
||||
admin.createTable(htd, KEYS);
|
||||
List<HRegionInfo> regions = admin.getTableRegions(htd.getTableName());
|
||||
|
||||
|
|
|
@ -149,17 +149,16 @@ public class TestFromClientSide3 {
|
|||
*/
|
||||
TEST_UTIL.getConfiguration().setInt("hbase.hstore.compaction.min", 3);
|
||||
|
||||
String tableName = "testAdvancedConfigOverride";
|
||||
TableName TABLE = TableName.valueOf(tableName);
|
||||
Table hTable = TEST_UTIL.createTable(TABLE, FAMILY, 10);
|
||||
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
|
||||
TableName tableName = TableName.valueOf("testAdvancedConfigOverride");
|
||||
Table hTable = TEST_UTIL.createTable(tableName, FAMILY, 10);
|
||||
Admin admin = TEST_UTIL.getHBaseAdmin();
|
||||
ClusterConnection connection = (ClusterConnection) TEST_UTIL.getConnection();
|
||||
|
||||
// Create 3 store files.
|
||||
byte[] row = Bytes.toBytes(random.nextInt());
|
||||
performMultiplePutAndFlush(admin, hTable, row, FAMILY, 3, 100);
|
||||
performMultiplePutAndFlush((HBaseAdmin) admin, hTable, row, FAMILY, 3, 100);
|
||||
|
||||
try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(TABLE)) {
|
||||
try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
|
||||
// Verify we have multiple store files.
|
||||
HRegionLocation loc = locator.getRegionLocation(row, true);
|
||||
byte[] regionName = loc.getRegionInfo().getRegionName();
|
||||
|
@ -167,7 +166,7 @@ public class TestFromClientSide3 {
|
|||
assertTrue(ProtobufUtil.getStoreFiles(server, regionName, FAMILY).size() > 1);
|
||||
|
||||
// Issue a compaction request
|
||||
admin.compact(TABLE.getName());
|
||||
admin.compact(tableName);
|
||||
|
||||
// poll wait for the compactions to happen
|
||||
for (int i = 0; i < 10 * 1000 / 40; ++i) {
|
||||
|
@ -189,19 +188,19 @@ public class TestFromClientSide3 {
|
|||
LOG.info("hbase.hstore.compaction.min should now be 5");
|
||||
HTableDescriptor htd = new HTableDescriptor(hTable.getTableDescriptor());
|
||||
htd.setValue("hbase.hstore.compaction.min", String.valueOf(5));
|
||||
admin.modifyTable(TABLE, htd);
|
||||
admin.modifyTable(tableName, htd);
|
||||
Pair<Integer, Integer> st;
|
||||
while (null != (st = admin.getAlterStatus(TABLE)) && st.getFirst() > 0) {
|
||||
while (null != (st = admin.getAlterStatus(tableName)) && st.getFirst() > 0) {
|
||||
LOG.debug(st.getFirst() + " regions left to update");
|
||||
Thread.sleep(40);
|
||||
}
|
||||
LOG.info("alter status finished");
|
||||
|
||||
// Create 3 more store files.
|
||||
performMultiplePutAndFlush(admin, hTable, row, FAMILY, 3, 10);
|
||||
performMultiplePutAndFlush((HBaseAdmin) admin, hTable, row, FAMILY, 3, 10);
|
||||
|
||||
// Issue a compaction request
|
||||
admin.compact(TABLE.getName());
|
||||
admin.compact(tableName);
|
||||
|
||||
// This time, the compaction request should not happen
|
||||
Thread.sleep(10 * 1000);
|
||||
|
@ -216,15 +215,15 @@ public class TestFromClientSide3 {
|
|||
HColumnDescriptor hcd = new HColumnDescriptor(htd.getFamily(FAMILY));
|
||||
hcd.setValue("hbase.hstore.compaction.min", String.valueOf(2));
|
||||
htd.modifyFamily(hcd);
|
||||
admin.modifyTable(TABLE, htd);
|
||||
while (null != (st = admin.getAlterStatus(TABLE)) && st.getFirst() > 0) {
|
||||
admin.modifyTable(tableName, htd);
|
||||
while (null != (st = admin.getAlterStatus(tableName)) && st.getFirst() > 0) {
|
||||
LOG.debug(st.getFirst() + " regions left to update");
|
||||
Thread.sleep(40);
|
||||
}
|
||||
LOG.info("alter status finished");
|
||||
|
||||
// Issue a compaction request
|
||||
admin.compact(TABLE.getName());
|
||||
admin.compact(tableName);
|
||||
|
||||
// poll wait for the compactions to happen
|
||||
for (int i = 0; i < 10 * 1000 / 40; ++i) {
|
||||
|
@ -251,8 +250,8 @@ public class TestFromClientSide3 {
|
|||
hcd = new HColumnDescriptor(htd.getFamily(FAMILY));
|
||||
hcd.setValue("hbase.hstore.compaction.min", null);
|
||||
htd.modifyFamily(hcd);
|
||||
admin.modifyTable(TABLE, htd);
|
||||
while (null != (st = admin.getAlterStatus(TABLE)) && st.getFirst() > 0) {
|
||||
admin.modifyTable(tableName, htd);
|
||||
while (null != (st = admin.getAlterStatus(tableName)) && st.getFirst() > 0) {
|
||||
LOG.debug(st.getFirst() + " regions left to update");
|
||||
Thread.sleep(40);
|
||||
}
|
||||
|
|
|
@ -36,9 +36,9 @@ import org.apache.hadoop.hbase.HTableDescriptor;
|
|||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.MasterNotRunningException;
|
||||
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.ResultScanner;
|
||||
|
@ -65,7 +65,7 @@ public class TestFilterWrapper {
|
|||
|
||||
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
||||
private static Configuration conf = null;
|
||||
private static HBaseAdmin admin = null;
|
||||
private static Admin admin = null;
|
||||
private static TableName name = TableName.valueOf("test");
|
||||
private static Connection connection;
|
||||
|
||||
|
|
|
@ -220,7 +220,7 @@ public class TestChangingEncoding {
|
|||
private void compactAndWait() throws IOException, InterruptedException {
|
||||
LOG.debug("Compacting table " + tableName);
|
||||
HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
|
||||
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
|
||||
Admin admin = TEST_UTIL.getHBaseAdmin();
|
||||
admin.majorCompact(tableName);
|
||||
|
||||
// Waiting for the compaction to start, at least .5s.
|
||||
|
|
|
@ -23,11 +23,11 @@ import java.util.List;
|
|||
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.RegionLocator;
|
||||
import org.apache.hadoop.hbase.client.Table;
|
||||
import org.apache.hadoop.hbase.testclassification.IOTests;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.ResultScanner;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
|
@ -67,7 +67,7 @@ public class TestLoadAndSwitchEncodeOnDisk extends
|
|||
|
||||
@Test(timeout=TIMEOUT_MS)
|
||||
public void loadTest() throws Exception {
|
||||
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
|
||||
Admin admin = TEST_UTIL.getHBaseAdmin();
|
||||
|
||||
compression = Compression.Algorithm.GZ; // used for table setup
|
||||
super.loadTest();
|
||||
|
|
|
@ -51,7 +51,6 @@ import org.apache.hadoop.hbase.TableName;
|
|||
import org.apache.hadoop.hbase.UnknownRegionException;
|
||||
import org.apache.hadoop.hbase.Waiter;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.Table;
|
||||
import org.apache.hadoop.hbase.coordination.ZkCoordinatedStateManager;
|
||||
|
@ -86,7 +85,7 @@ public class TestAssignmentManagerOnCluster {
|
|||
private final static byte[] FAMILY = Bytes.toBytes("FAMILY");
|
||||
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
||||
final static Configuration conf = TEST_UTIL.getConfiguration();
|
||||
private static HBaseAdmin admin;
|
||||
private static Admin admin;
|
||||
|
||||
@BeforeClass
|
||||
public static void setUpBeforeClass() throws Exception {
|
||||
|
|
|
@ -30,8 +30,12 @@ import java.util.regex.Matcher;
|
|||
import java.util.regex.Pattern;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.*;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.testclassification.MasterTests;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
@ -57,7 +61,7 @@ public class TestMasterStatusServlet {
|
|||
|
||||
private HMaster master;
|
||||
private Configuration conf;
|
||||
private HBaseAdmin admin;
|
||||
private Admin admin;
|
||||
|
||||
static final ServerName FAKE_HOST =
|
||||
ServerName.valueOf("fakehost", 12345, 1234567890);
|
||||
|
@ -110,7 +114,7 @@ public class TestMasterStatusServlet {
|
|||
Mockito.doReturn(rms).when(master).getRegionServerMetrics();
|
||||
|
||||
// Mock admin
|
||||
admin = Mockito.mock(HBaseAdmin.class);
|
||||
admin = Mockito.mock(Admin.class);
|
||||
}
|
||||
|
||||
private void setupMockTables() throws IOException {
|
||||
|
|
|
@ -36,8 +36,8 @@ import org.apache.hadoop.hbase.HTableDescriptor;
|
|||
import org.apache.hadoop.hbase.MetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.Delete;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.ResultScanner;
|
||||
import org.apache.hadoop.hbase.client.Table;
|
||||
|
@ -82,7 +82,7 @@ public class TestEnableTableHandler {
|
|||
final TableName tableName = TableName.valueOf("testEnableTableWithNoRegionServers");
|
||||
final MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
|
||||
final HMaster m = cluster.getMaster();
|
||||
final HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
|
||||
final Admin admin = TEST_UTIL.getHBaseAdmin();
|
||||
final HTableDescriptor desc = new HTableDescriptor(tableName);
|
||||
desc.addFamily(new HColumnDescriptor(FAMILYNAME));
|
||||
admin.createTable(desc);
|
||||
|
@ -143,7 +143,7 @@ public class TestEnableTableHandler {
|
|||
public void testDeleteForSureClearsAllTableRowsFromMeta()
|
||||
throws IOException, InterruptedException {
|
||||
final TableName tableName = TableName.valueOf("testDeleteForSureClearsAllTableRowsFromMeta");
|
||||
final HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
|
||||
final Admin admin = TEST_UTIL.getHBaseAdmin();
|
||||
final HTableDescriptor desc = new HTableDescriptor(tableName);
|
||||
desc.addFamily(new HColumnDescriptor(FAMILYNAME));
|
||||
try {
|
||||
|
@ -211,13 +211,7 @@ public class TestEnableTableHandler {
|
|||
}
|
||||
}
|
||||
|
||||
public static void createTable(HBaseTestingUtility testUtil, HTableDescriptor htd,
|
||||
byte [][] splitKeys)
|
||||
throws Exception {
|
||||
createTable(testUtil, testUtil.getHBaseAdmin(), htd, splitKeys);
|
||||
}
|
||||
|
||||
public static void createTable(HBaseTestingUtility testUtil, HBaseAdmin admin,
|
||||
public static void createTable(HBaseTestingUtility testUtil,
|
||||
HTableDescriptor htd, byte [][] splitKeys)
|
||||
throws Exception {
|
||||
// NOTE: We need a latch because admin is not sync,
|
||||
|
@ -225,6 +219,7 @@ public class TestEnableTableHandler {
|
|||
MasterSyncObserver observer = (MasterSyncObserver)testUtil.getHBaseCluster().getMaster()
|
||||
.getMasterCoprocessorHost().findCoprocessor(MasterSyncObserver.class.getName());
|
||||
observer.tableCreationLatch = new CountDownLatch(1);
|
||||
Admin admin = testUtil.getHBaseAdmin();
|
||||
if (splitKeys != null) {
|
||||
admin.createTable(htd, splitKeys);
|
||||
} else {
|
||||
|
@ -236,18 +231,13 @@ public class TestEnableTableHandler {
|
|||
}
|
||||
|
||||
public static void deleteTable(HBaseTestingUtility testUtil, TableName tableName)
|
||||
throws Exception {
|
||||
deleteTable(testUtil, testUtil.getHBaseAdmin(), tableName);
|
||||
}
|
||||
|
||||
public static void deleteTable(HBaseTestingUtility testUtil, HBaseAdmin admin,
|
||||
TableName tableName)
|
||||
throws Exception {
|
||||
// NOTE: We need a latch because admin is not sync,
|
||||
// so the postOp coprocessor method may be called after the admin operation returned.
|
||||
MasterSyncObserver observer = (MasterSyncObserver)testUtil.getHBaseCluster().getMaster()
|
||||
.getMasterCoprocessorHost().findCoprocessor(MasterSyncObserver.class.getName());
|
||||
observer.tableDeletionLatch = new CountDownLatch(1);
|
||||
Admin admin = testUtil.getHBaseAdmin();
|
||||
try {
|
||||
admin.disableTable(tableName);
|
||||
} catch (Exception e) {
|
||||
|
|
|
@ -24,7 +24,11 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
|
|||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.*;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.client.Table;
|
||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
@ -43,7 +47,7 @@ public class TestMobDataBlockEncoding {
|
|||
private final static byte [] qf2 = Bytes.toBytes("qualifier2");
|
||||
protected final byte[] qf3 = Bytes.toBytes("qualifier3");
|
||||
private static Table table;
|
||||
private static HBaseAdmin admin;
|
||||
private static Admin admin;
|
||||
private static HColumnDescriptor hcd;
|
||||
private static HTableDescriptor desc;
|
||||
private static Random random = new Random();
|
||||
|
|
|
@ -50,9 +50,9 @@ import org.apache.hadoop.hbase.MiniHBaseCluster;
|
|||
import org.apache.hadoop.hbase.NamespaceDescriptor;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.Waiter;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.Mutation;
|
||||
import org.apache.hadoop.hbase.client.RegionLocator;
|
||||
import org.apache.hadoop.hbase.client.Table;
|
||||
|
@ -100,7 +100,7 @@ public class TestNamespaceAuditor {
|
|||
withTimeout(this.getClass()).withLookingForStuckThread(true).build();
|
||||
private static final Log LOG = LogFactory.getLog(TestNamespaceAuditor.class);
|
||||
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
|
||||
private static HBaseAdmin ADMIN;
|
||||
private static Admin ADMIN;
|
||||
private String prefix = "TestNamespaceAuditor";
|
||||
|
||||
@BeforeClass
|
||||
|
|
|
@ -30,7 +30,7 @@ import org.apache.commons.logging.Log;
|
|||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.client.Table;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
|
||||
|
@ -89,7 +89,7 @@ public class TestCompactionState {
|
|||
Table ht = null;
|
||||
try {
|
||||
ht = TEST_UTIL.createTable(table, family);
|
||||
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
|
||||
Admin admin = TEST_UTIL.getHBaseAdmin();
|
||||
try {
|
||||
admin.compact(table, fakecf);
|
||||
} catch (IOException ioe) {
|
||||
|
@ -137,18 +137,18 @@ public class TestCompactionState {
|
|||
int countBefore = countStoreFilesInFamilies(regions, families);
|
||||
int countBeforeSingleFamily = countStoreFilesInFamily(regions, family);
|
||||
assertTrue(countBefore > 0); // there should be some data files
|
||||
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
|
||||
Admin admin = TEST_UTIL.getHBaseAdmin();
|
||||
if (expectedState == CompactionState.MINOR) {
|
||||
if (singleFamily) {
|
||||
admin.compact(table.getName(), family);
|
||||
admin.compact(table, family);
|
||||
} else {
|
||||
admin.compact(table.getName());
|
||||
admin.compact(table);
|
||||
}
|
||||
} else {
|
||||
if (singleFamily) {
|
||||
admin.majorCompact(table.getName(), family);
|
||||
admin.majorCompact(table, family);
|
||||
} else {
|
||||
admin.majorCompact(table.getName());
|
||||
admin.majorCompact(table);
|
||||
}
|
||||
}
|
||||
long curt = System.currentTimeMillis();
|
||||
|
|
|
@ -33,9 +33,9 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
|
|||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.ResultScanner;
|
||||
|
@ -67,7 +67,7 @@ public class TestMobStoreScanner {
|
|||
private final static byte [] qf2 = Bytes.toBytes("qualifier2");
|
||||
protected final byte[] qf3 = Bytes.toBytes("qualifier3");
|
||||
private static Table table;
|
||||
private static HBaseAdmin admin;
|
||||
private static Admin admin;
|
||||
private static HColumnDescriptor hcd;
|
||||
private static HTableDescriptor desc;
|
||||
private static Random random = new Random();
|
||||
|
|
|
@ -436,7 +436,8 @@ public class TestRegionServerMetrics {
|
|||
hcd.setMobEnabled(true);
|
||||
hcd.setMobThreshold(0);
|
||||
htd.addFamily(hcd);
|
||||
HBaseAdmin admin = new HBaseAdmin(conf);
|
||||
Connection connection = ConnectionFactory.createConnection(conf);
|
||||
Admin admin = connection.getAdmin();
|
||||
HTable t = TEST_UTIL.createTable(htd, new byte[0][0], conf);
|
||||
Region region = rs.getOnlineRegions(tableName).get(0);
|
||||
t.setAutoFlush(true, true);
|
||||
|
@ -485,5 +486,6 @@ public class TestRegionServerMetrics {
|
|||
serverSource);
|
||||
t.close();
|
||||
admin.close();
|
||||
connection.close();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
|
|||
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||
import org.apache.hadoop.hbase.NamespaceDescriptor;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.client.RegionLocator;
|
||||
import org.apache.hadoop.hbase.client.Table;
|
||||
|
@ -76,7 +76,7 @@ public class TestRemoveRegionMetrics {
|
|||
TableName tableName = TableName.valueOf(tableNameString);
|
||||
Table t = TEST_UTIL.createTable(tableName, Bytes.toBytes("D"));
|
||||
TEST_UTIL.waitUntilAllRegionsAssigned(t.getName());
|
||||
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
|
||||
Admin admin = TEST_UTIL.getHBaseAdmin();
|
||||
HRegionInfo regionInfo;
|
||||
byte[] row = Bytes.toBytes("r1");
|
||||
|
||||
|
|
|
@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.KeyValue;
|
|||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.client.RegionLocator;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
|
@ -73,7 +72,7 @@ public class TestScannerWithBulkload {
|
|||
public void testBulkLoad() throws Exception {
|
||||
TableName tableName = TableName.valueOf("testBulkLoad");
|
||||
long l = System.currentTimeMillis();
|
||||
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
|
||||
Admin admin = TEST_UTIL.getHBaseAdmin();
|
||||
createTable(admin, tableName);
|
||||
Scan scan = createScan();
|
||||
final Table table = init(admin, l, scan, tableName);
|
||||
|
@ -177,7 +176,7 @@ public class TestScannerWithBulkload {
|
|||
return hfilePath;
|
||||
}
|
||||
|
||||
private Table init(HBaseAdmin admin, long l, Scan scan, TableName tableName) throws Exception {
|
||||
private Table init(Admin admin, long l, Scan scan, TableName tableName) throws Exception {
|
||||
Table table = TEST_UTIL.getConnection().getTable(tableName);
|
||||
Put put0 = new Put(Bytes.toBytes("row1"));
|
||||
put0.add(new KeyValue(Bytes.toBytes("row1"), Bytes.toBytes("col"), Bytes.toBytes("q"), l,
|
||||
|
@ -211,7 +210,7 @@ public class TestScannerWithBulkload {
|
|||
public void testBulkLoadWithParallelScan() throws Exception {
|
||||
final TableName tableName = TableName.valueOf("testBulkLoadWithParallelScan");
|
||||
final long l = System.currentTimeMillis();
|
||||
final HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
|
||||
final Admin admin = TEST_UTIL.getHBaseAdmin();
|
||||
createTable(admin, tableName);
|
||||
Scan scan = createScan();
|
||||
final Table table = init(admin, l, scan, tableName);
|
||||
|
@ -253,7 +252,7 @@ public class TestScannerWithBulkload {
|
|||
public void testBulkLoadNativeHFile() throws Exception {
|
||||
TableName tableName = TableName.valueOf("testBulkLoadNativeHFile");
|
||||
long l = System.currentTimeMillis();
|
||||
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
|
||||
Admin admin = TEST_UTIL.getHBaseAdmin();
|
||||
createTable(admin, tableName);
|
||||
Scan scan = createScan();
|
||||
final Table table = init(admin, l, scan, tableName);
|
||||
|
|
|
@ -63,7 +63,6 @@ import org.apache.hadoop.hbase.client.ConnectionFactory;
|
|||
import org.apache.hadoop.hbase.client.Consistency;
|
||||
import org.apache.hadoop.hbase.client.Delete;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.Mutation;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
|
@ -120,7 +119,7 @@ import com.google.protobuf.ServiceException;
|
|||
public class TestSplitTransactionOnCluster {
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(TestSplitTransactionOnCluster.class);
|
||||
private HBaseAdmin admin = null;
|
||||
private Admin admin = null;
|
||||
private MiniHBaseCluster cluster = null;
|
||||
private static final int NB_SERVERS = 3;
|
||||
private static CountDownLatch latch = new CountDownLatch(1);
|
||||
|
@ -201,7 +200,7 @@ public class TestSplitTransactionOnCluster {
|
|||
Coprocessor.PRIORITY_USER, region.getBaseConf());
|
||||
|
||||
// split async
|
||||
this.admin.split(region.getRegionInfo().getRegionName(), new byte[] {42});
|
||||
this.admin.splitRegion(region.getRegionInfo().getRegionName(), new byte[] {42});
|
||||
|
||||
// we have to wait until the SPLITTING state is seen by the master
|
||||
FailingSplitRegionObserver observer = (FailingSplitRegionObserver) region
|
||||
|
@ -365,9 +364,9 @@ public class TestSplitTransactionOnCluster {
|
|||
|
||||
// Now try splitting.... should fail. And each should successfully
|
||||
// rollback.
|
||||
this.admin.split(hri.getRegionNameAsString());
|
||||
this.admin.split(hri.getRegionNameAsString());
|
||||
this.admin.split(hri.getRegionNameAsString());
|
||||
this.admin.splitRegion(hri.getRegionName());
|
||||
this.admin.splitRegion(hri.getRegionName());
|
||||
this.admin.splitRegion(hri.getRegionName());
|
||||
// Wait around a while and assert count of regions remains constant.
|
||||
for (int i = 0; i < 10; i++) {
|
||||
Thread.sleep(100);
|
||||
|
@ -427,7 +426,7 @@ public class TestSplitTransactionOnCluster {
|
|||
LOG.info("Daughter we are going to split: " + daughter);
|
||||
// Compact first to ensure we have cleaned up references -- else the split
|
||||
// will fail.
|
||||
this.admin.compact(daughter.getRegionName());
|
||||
this.admin.compactRegion(daughter.getRegionName());
|
||||
daughters = cluster.getRegions(tableName);
|
||||
HRegion daughterRegion = null;
|
||||
for (HRegion r: daughters) {
|
||||
|
@ -488,13 +487,13 @@ public class TestSplitTransactionOnCluster {
|
|||
String val = "Val" + i;
|
||||
p.addColumn("col".getBytes(), "ql".getBytes(), val.getBytes());
|
||||
table.put(p);
|
||||
admin.flush(userTableName.getName());
|
||||
admin.flush(userTableName);
|
||||
Delete d = new Delete(row.getBytes());
|
||||
// Do a normal delete
|
||||
table.delete(d);
|
||||
admin.flush(userTableName.getName());
|
||||
admin.flush(userTableName);
|
||||
}
|
||||
admin.majorCompact(userTableName.getName());
|
||||
admin.majorCompact(userTableName);
|
||||
List<HRegionInfo> regionsOfTable = TESTING_UTIL.getMiniHBaseCluster()
|
||||
.getMaster().getAssignmentManager().getRegionStates()
|
||||
.getRegionsOfTable(userTableName);
|
||||
|
@ -508,8 +507,8 @@ public class TestSplitTransactionOnCluster {
|
|||
p = new Put("row8".getBytes());
|
||||
p.addColumn("col".getBytes(), "ql".getBytes(), "val".getBytes());
|
||||
table.put(p);
|
||||
admin.flush(userTableName.getName());
|
||||
admin.split(hRegionInfo.getRegionName(), "row7".getBytes());
|
||||
admin.flush(userTableName);
|
||||
admin.splitRegion(hRegionInfo.getRegionName(), "row7".getBytes());
|
||||
regionsOfTable = TESTING_UTIL.getMiniHBaseCluster().getMaster()
|
||||
.getAssignmentManager().getRegionStates()
|
||||
.getRegionsOfTable(userTableName);
|
||||
|
@ -585,7 +584,7 @@ public class TestSplitTransactionOnCluster {
|
|||
HRegionServer server = cluster.getRegionServer(tableRegionIndex);
|
||||
printOutRegions(server, "Initial regions: ");
|
||||
|
||||
this.admin.split(hri.getRegionNameAsString());
|
||||
this.admin.splitRegion(hri.getRegionName());
|
||||
checkAndGetDaughters(tableName);
|
||||
|
||||
HMaster master = abortAndWaitForMaster();
|
||||
|
@ -750,7 +749,7 @@ public class TestSplitTransactionOnCluster {
|
|||
}
|
||||
}
|
||||
|
||||
private void insertData(final TableName tableName, HBaseAdmin admin, Table t) throws IOException,
|
||||
private void insertData(final TableName tableName, Admin admin, Table t) throws IOException,
|
||||
InterruptedException {
|
||||
Put p = new Put(Bytes.toBytes("row1"));
|
||||
p.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("q1"), Bytes.toBytes("1"));
|
||||
|
@ -1142,7 +1141,7 @@ public class TestSplitTransactionOnCluster {
|
|||
|
||||
private void split(final HRegionInfo hri, final HRegionServer server, final int regionCount)
|
||||
throws IOException, InterruptedException {
|
||||
this.admin.split(hri.getRegionNameAsString());
|
||||
this.admin.splitRegion(hri.getRegionName());
|
||||
for (int i = 0; ProtobufUtil.getOnlineRegions(
|
||||
server.getRSRpcServices()).size() <= regionCount && i < 300; i++) {
|
||||
LOG.debug("Waiting on region to split");
|
||||
|
|
|
@ -33,9 +33,9 @@ import org.apache.hadoop.hbase.HConstants;
|
|||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.NamespaceDescriptor;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.Table;
|
||||
|
@ -77,7 +77,7 @@ public class TestSplitWalDataLoss {
|
|||
testUtil.getConfiguration().setInt("hbase.regionserver.msginterval", 30000);
|
||||
testUtil.getConfiguration().setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, false);
|
||||
testUtil.startMiniCluster(2);
|
||||
HBaseAdmin admin = testUtil.getHBaseAdmin();
|
||||
Admin admin = testUtil.getHBaseAdmin();
|
||||
admin.createNamespace(namespace);
|
||||
admin.createTable(new HTableDescriptor(tableName).addFamily(new HColumnDescriptor(family)));
|
||||
testUtil.waitTableAvailable(tableName);
|
||||
|
|
|
@ -32,9 +32,9 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
|
|||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.client.Table;
|
||||
import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
|
||||
|
@ -80,7 +80,7 @@ public class TestCompactionWithThroughputController {
|
|||
}
|
||||
|
||||
private Store prepareData() throws IOException {
|
||||
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
|
||||
Admin admin = TEST_UTIL.getHBaseAdmin();
|
||||
if (admin.tableExists(tableName)) {
|
||||
admin.disableTable(tableName);
|
||||
admin.deleteTable(tableName);
|
||||
|
|
|
@ -42,7 +42,6 @@ import org.apache.hadoop.hbase.TableName;
|
|||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.Delete;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.ResultScanner;
|
||||
|
@ -521,7 +520,7 @@ public class TestReplicationSmallTests extends TestReplicationBase {
|
|||
final String colFam = "cf1";
|
||||
final int numOfTables = 3;
|
||||
|
||||
HBaseAdmin hadmin = utility1.getHBaseAdmin();
|
||||
Admin hadmin = utility1.getHBaseAdmin();
|
||||
|
||||
// Create Tables
|
||||
for (int i = 0; i < numOfTables; i++) {
|
||||
|
@ -552,9 +551,9 @@ public class TestReplicationSmallTests extends TestReplicationBase {
|
|||
|
||||
// drop tables
|
||||
for (int i = 0; i < numOfTables; i++) {
|
||||
String ht = tName + i;
|
||||
hadmin.disableTable(ht);
|
||||
hadmin.deleteTable(ht);
|
||||
TableName tableName = TableName.valueOf(tName + i);
|
||||
hadmin.disableTable(tableName);
|
||||
hadmin.deleteTable(tableName);
|
||||
}
|
||||
|
||||
hadmin.close();
|
||||
|
|
|
@ -37,10 +37,10 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
|
|||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.Tag;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.Table;
|
||||
import org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
|
||||
|
@ -141,10 +141,10 @@ public class TestVisibilityLabelReplicationWithExpAsString extends TestVisibilit
|
|||
HColumnDescriptor desc = new HColumnDescriptor(fam);
|
||||
desc.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
|
||||
table.addFamily(desc);
|
||||
try (HBaseAdmin hBaseAdmin = TEST_UTIL.getHBaseAdmin()) {
|
||||
try (Admin hBaseAdmin = TEST_UTIL.getHBaseAdmin()) {
|
||||
hBaseAdmin.createTable(table);
|
||||
}
|
||||
try (HBaseAdmin hBaseAdmin1 = TEST_UTIL1.getHBaseAdmin()){
|
||||
try (Admin hBaseAdmin1 = TEST_UTIL1.getHBaseAdmin()){
|
||||
hBaseAdmin1.createTable(table);
|
||||
}
|
||||
addLabels();
|
||||
|
|
|
@ -45,11 +45,11 @@ import org.apache.hadoop.hbase.TableName;
|
|||
import org.apache.hadoop.hbase.Tag;
|
||||
import org.apache.hadoop.hbase.TagRewriteCell;
|
||||
import org.apache.hadoop.hbase.TagType;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
||||
import org.apache.hadoop.hbase.client.Durability;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.ResultScanner;
|
||||
|
@ -186,7 +186,7 @@ public class TestVisibilityLabelsReplication {
|
|||
// Wait for the labels table to become available
|
||||
TEST_UTIL.waitTableEnabled(LABELS_TABLE_NAME.getName(), 50000);
|
||||
TEST_UTIL1.startMiniCluster(1);
|
||||
HBaseAdmin hBaseAdmin = TEST_UTIL.getHBaseAdmin();
|
||||
Admin hBaseAdmin = TEST_UTIL.getHBaseAdmin();
|
||||
HTableDescriptor table = new HTableDescriptor(TABLE_NAME);
|
||||
HColumnDescriptor desc = new HColumnDescriptor(fam);
|
||||
desc.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
|
||||
|
@ -198,7 +198,7 @@ public class TestVisibilityLabelsReplication {
|
|||
hBaseAdmin.close();
|
||||
}
|
||||
}
|
||||
HBaseAdmin hBaseAdmin1 = TEST_UTIL1.getHBaseAdmin();
|
||||
Admin hBaseAdmin1 = TEST_UTIL1.getHBaseAdmin();
|
||||
try {
|
||||
hBaseAdmin1.createTable(table);
|
||||
} finally {
|
||||
|
|
|
@ -272,8 +272,6 @@ public class BaseTestHBaseFsck {
|
|||
*
|
||||
* It will set tbl which needs to be closed after test
|
||||
*
|
||||
* @param tableName
|
||||
* @param replicaCount
|
||||
* @throws Exception
|
||||
*/
|
||||
void setupTableWithRegionReplica(TableName tablename, int replicaCount) throws Exception {
|
||||
|
@ -296,7 +294,7 @@ public class BaseTestHBaseFsck {
|
|||
/**
|
||||
* Setup a clean table with a mob-enabled column.
|
||||
*
|
||||
* @param tableName The name of a table to be created.
|
||||
* @param tablename The name of a table to be created.
|
||||
* @throws Exception
|
||||
*/
|
||||
void setupMobTable(TableName tablename) throws Exception {
|
||||
|
@ -349,7 +347,7 @@ public class BaseTestHBaseFsck {
|
|||
/**
|
||||
* Get region info from local cluster.
|
||||
*/
|
||||
Map<ServerName, List<String>> getDeployedHRIs(final HBaseAdmin admin) throws IOException {
|
||||
Map<ServerName, List<String>> getDeployedHRIs(final Admin admin) throws IOException {
|
||||
ClusterStatus status = admin.getClusterStatus();
|
||||
Collection<ServerName> regionServers = status.getServers();
|
||||
Map<ServerName, List<String>> mm =
|
||||
|
|
|
@ -190,7 +190,7 @@ public class TestHBaseFsckTwoRS extends BaseTestHBaseFsck {
|
|||
// different regions with the same start/endkeys since it doesn't
|
||||
// differentiate on ts/regionId! We actually need to recheck
|
||||
// deployments!
|
||||
while (findDeployedHSI(getDeployedHRIs((HBaseAdmin) admin), hriDupe) == null) {
|
||||
while (findDeployedHSI(getDeployedHRIs(admin), hriDupe) == null) {
|
||||
Thread.sleep(250);
|
||||
}
|
||||
|
||||
|
|
|
@ -158,22 +158,6 @@ public class OfflineMetaRebuildTestCore {
|
|||
tbl.put(puts);
|
||||
}
|
||||
|
||||
/**
|
||||
* delete table in preparation for next test
|
||||
*
|
||||
* @param tablename
|
||||
* @throws IOException
|
||||
*/
|
||||
void deleteTable(HBaseAdmin admin, String tablename) throws IOException {
|
||||
try {
|
||||
byte[] tbytes = Bytes.toBytes(tablename);
|
||||
admin.disableTable(tbytes);
|
||||
admin.deleteTable(tbytes);
|
||||
} catch (Exception e) {
|
||||
// Do nothing.
|
||||
}
|
||||
}
|
||||
|
||||
protected void deleteRegion(Configuration conf, final Table tbl,
|
||||
byte[] startKey, byte[] endKey) throws IOException {
|
||||
|
||||
|
|
|
@ -51,30 +51,34 @@ module Hbase
|
|||
#----------------------------------------------------------------------------------------------
|
||||
# Requests a table or region flush
|
||||
def flush(table_or_region_name)
|
||||
@admin.flush(table_or_region_name)
|
||||
begin
|
||||
@admin.flushRegion(table_or_region_name.to_java_bytes);
|
||||
rescue java.lang.IllegalArgumentException => e
|
||||
# Unknown region. Try table.
|
||||
@admin.flush(TableName.valueOf(table_or_region_name));
|
||||
end
|
||||
end
|
||||
|
||||
#----------------------------------------------------------------------------------------------
|
||||
# Requests a table or region or column family compaction
|
||||
def compact(table_or_region_name, family = nil, type = "NORMAL")
|
||||
family_bytes = nil
|
||||
unless family.nil?
|
||||
family_bytes = family.to_java_bytes
|
||||
end
|
||||
compact_type = nil
|
||||
if type == "NORMAL"
|
||||
if family == nil
|
||||
@admin.compact(table_or_region_name)
|
||||
else
|
||||
# We are compacting a column family within a region.
|
||||
@admin.compact(table_or_region_name, family)
|
||||
end
|
||||
compact_type = org.apache.hadoop.hbase.client.Admin::CompactType::NORMAL
|
||||
elsif type == "MOB"
|
||||
if family == nil
|
||||
@admin.compact(org.apache.hadoop.hbase.TableName.valueOf(table_or_region_name),
|
||||
org.apache.hadoop.hbase.client.Admin::CompactType::MOB)
|
||||
else
|
||||
# We are compacting a mob column family within a table.
|
||||
@admin.compact(org.apache.hadoop.hbase.TableName.valueOf(table_or_region_name), family.to_java_bytes,
|
||||
org.apache.hadoop.hbase.client.Admin::CompactType::MOB)
|
||||
end
|
||||
compact_type = org.apache.hadoop.hbase.client.Admin::CompactType::MOB
|
||||
else
|
||||
raise ArgumentError, "only NORMAL or MOB accepted for type!"
|
||||
raise ArgumentError, "only NORMAL or MOB accepted for type!"
|
||||
end
|
||||
|
||||
begin
|
||||
@admin.compactRegion(table_or_region_name.to_java_bytes, family_bytes, false)
|
||||
rescue java.lang.IllegalArgumentException => e
|
||||
@admin.compact(TableName.valueOf(table_or_region_name), family_bytes, false, compact_type)
|
||||
end
|
||||
end
|
||||
|
||||
|
@ -86,25 +90,24 @@ module Hbase
|
|||
#----------------------------------------------------------------------------------------------
|
||||
# Requests a table or region or column family major compaction
|
||||
def major_compact(table_or_region_name, family = nil, type = "NORMAL")
|
||||
family_bytes = nil
|
||||
unless family.nil?
|
||||
family_bytes = family.to_java_bytes
|
||||
end
|
||||
compact_type = nil
|
||||
if type == "NORMAL"
|
||||
if family == nil
|
||||
@admin.majorCompact(table_or_region_name)
|
||||
else
|
||||
# We are major compacting a column family within a region or table.
|
||||
@admin.majorCompact(table_or_region_name, family)
|
||||
end
|
||||
compact_type = org.apache.hadoop.hbase.client.Admin::CompactType::NORMAL
|
||||
elsif type == "MOB"
|
||||
if family == nil
|
||||
@admin.majorCompact(org.apache.hadoop.hbase.TableName.valueOf(table_or_region_name),
|
||||
org.apache.hadoop.hbase.client.Admin::CompactType::MOB)
|
||||
else
|
||||
# We are major compacting a mob column family within a table.
|
||||
@admin.majorCompact(org.apache.hadoop.hbase.TableName.valueOf(table_or_region_name),
|
||||
family.to_java_bytes, org.apache.hadoop.hbase.client.Admin::CompactType::MOB)
|
||||
end
|
||||
compact_type = org.apache.hadoop.hbase.client.Admin::CompactType::MOB
|
||||
else
|
||||
raise ArgumentError, "only NORMAL or MOB accepted for type!"
|
||||
end
|
||||
|
||||
begin
|
||||
@admin.majorCompactRegion(table_or_region_name.to_java_bytes, family_bytes)
|
||||
rescue java.lang.IllegalArgumentException => e
|
||||
@admin.majorCompact(TableName.valueOf(table_or_region_name), family_bytes, compact_type)
|
||||
end
|
||||
end
|
||||
|
||||
#----------------------------------------------------------------------------------------------
|
||||
|
@ -117,11 +120,15 @@ module Hbase
|
|||
|
||||
#----------------------------------------------------------------------------------------------
|
||||
# Requests a table or region split
|
||||
def split(table_or_region_name, split_point)
|
||||
if split_point == nil
|
||||
@admin.split(table_or_region_name)
|
||||
else
|
||||
@admin.split(table_or_region_name, split_point)
|
||||
def split(table_or_region_name, split_point = nil)
|
||||
split_point_bytes = nil
|
||||
unless split_point.nil?
|
||||
split_point_bytes = split_point.to_java_bytes
|
||||
end
|
||||
begin
|
||||
@admin.splitRegion(table_or_region_name.to_java_bytes, split_point_bytes)
|
||||
rescue java.lang.IllegalArgumentException => e
|
||||
@admin.split(TableName.valueOf(table_or_region_name), split_point_bytes)
|
||||
end
|
||||
end
|
||||
|
||||
|
|
|
@ -68,7 +68,6 @@ import org.apache.hadoop.hbase.client.Append;
|
|||
import org.apache.hadoop.hbase.client.Delete;
|
||||
import org.apache.hadoop.hbase.client.Durability;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.Increment;
|
||||
import org.apache.hadoop.hbase.client.OperationWithAttributes;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
|
@ -772,26 +771,34 @@ public class ThriftServerRunner implements Runnable {
|
|||
}
|
||||
}
|
||||
|
||||
// ThriftServerRunner.compact should be deprecated and replaced with methods specific to
|
||||
// table and region.
|
||||
@Override
|
||||
public void compact(ByteBuffer tableNameOrRegionName) throws IOError {
|
||||
try {
|
||||
// TODO: HBaseAdmin.compact(byte[]) deprecated and not trivial to replace here.
|
||||
// ThriftServerRunner.compact should be deprecated and replaced with methods specific to
|
||||
// table and region.
|
||||
((HBaseAdmin) getAdmin()).compact(getBytes(tableNameOrRegionName));
|
||||
try {
|
||||
getAdmin().compactRegion(getBytes(tableNameOrRegionName));
|
||||
} catch (IllegalArgumentException e) {
|
||||
// Invalid region, try table
|
||||
getAdmin().compact(TableName.valueOf(getBytes(tableNameOrRegionName)));
|
||||
}
|
||||
} catch (IOException e) {
|
||||
LOG.warn(e.getMessage(), e);
|
||||
throw new IOError(Throwables.getStackTraceAsString(e));
|
||||
}
|
||||
}
|
||||
|
||||
// ThriftServerRunner.majorCompact should be deprecated and replaced with methods specific
|
||||
// to table and region.
|
||||
@Override
|
||||
public void majorCompact(ByteBuffer tableNameOrRegionName) throws IOError {
|
||||
try {
|
||||
// TODO: HBaseAdmin.majorCompact(byte[]) deprecated and not trivial to replace here.
|
||||
// ThriftServerRunner.majorCompact should be deprecated and replaced with methods specific
|
||||
// to table and region.
|
||||
((HBaseAdmin) getAdmin()).majorCompact(getBytes(tableNameOrRegionName));
|
||||
try {
|
||||
getAdmin().compactRegion(getBytes(tableNameOrRegionName));
|
||||
} catch (IllegalArgumentException e) {
|
||||
// Invalid region, try table
|
||||
getAdmin().compact(TableName.valueOf(getBytes(tableNameOrRegionName)));
|
||||
}
|
||||
} catch (IOException e) {
|
||||
LOG.warn(e.getMessage(), e);
|
||||
throw new IOError(Throwables.getStackTraceAsString(e));
|
||||
|
|
|
@ -396,10 +396,11 @@ coprocessor.jar| org.myname.hbase.Coprocessor.RegionObserverExample|1073741823|'
|
|||
+
|
||||
[source,java]
|
||||
----
|
||||
String tableName = "users";
|
||||
TableName tableName = TableName.valueOf("users");
|
||||
String path = "hdfs://<namenode>:<port>/user/<hadoop-user>/coprocessor.jar";
|
||||
Configuration conf = HBaseConfiguration.create();
|
||||
HBaseAdmin admin = new HBaseAdmin(conf);
|
||||
Connection connection = ConnectionFactory.createConnection(conf);
|
||||
Admin admin = connection.getAdmin();
|
||||
admin.disableTable(tableName);
|
||||
HTableDescriptor hTableDescriptor = new HTableDescriptor(tableName);
|
||||
HColumnDescriptor columnFamily1 = new HColumnDescriptor("personalDet");
|
||||
|
@ -460,10 +461,11 @@ attached to this table, if any. For example:
|
|||
+
|
||||
[source,java]
|
||||
----
|
||||
String tableName = "users";
|
||||
TableName tableName = TableName.valueOf("users");
|
||||
String path = "hdfs://<namenode>:<port>/user/<hadoop-user>/coprocessor.jar";
|
||||
Configuration conf = HBaseConfiguration.create();
|
||||
HBaseAdmin admin = new HBaseAdmin(conf);
|
||||
Connection connection = ConnectionFactory.createConnection(conf);
|
||||
Admin admin = connection.getAdmin();
|
||||
admin.disableTable(tableName);
|
||||
HTableDescriptor hTableDescriptor = new HTableDescriptor(tableName);
|
||||
HColumnDescriptor columnFamily1 = new HColumnDescriptor("personalDet");
|
||||
|
|
|
@ -675,12 +675,13 @@ This example lists HBase tables, creates a new table, and adds a row to it.
|
|||
[source, scala]
|
||||
----
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration
|
||||
import org.apache.hadoop.hbase.client.{HBaseAdmin,HTable,Put,Get}
|
||||
import org.apache.hadoop.hbase.client.{Connection,ConnectionFactory,HBaseAdmin,HTable,Put,Get}
|
||||
import org.apache.hadoop.hbase.util.Bytes
|
||||
|
||||
|
||||
val conf = new HBaseConfiguration()
|
||||
val admin = new HBaseAdmin(conf)
|
||||
val connection = ConnectionFactory.createConnection(conf);
|
||||
val admin = connection.getAdmin();
|
||||
|
||||
// list the tables
|
||||
val listtables=admin.listTables()
|
||||
|
|
Loading…
Reference in New Issue