HBASE-11068 Update code to use Admin factory method instead of constructor

This commit is contained in:
stack 2014-08-04 11:42:38 -07:00
parent 757b13dea4
commit e91e2659a7
101 changed files with 522 additions and 432 deletions

View File

@ -126,7 +126,7 @@ public class IntegrationTestIngest extends IntegrationTestBase {
}
private void deleteTableIfNecessary() throws IOException {
if (util.getHBaseAdmin().tableExists(getTablename())) {
if (util.getHBaseAdmin().tableExists(TableName.valueOf(getTablename()))) {
util.deleteTable(Bytes.toBytes(getTablename()));
}
}

View File

@ -21,6 +21,7 @@ import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Waiter.Predicate;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.io.crypto.KeyProviderForTesting;
import org.apache.hadoop.hbase.io.hfile.HFile;
@ -78,20 +79,20 @@ public class IntegrationTestIngestWithEncryption extends IntegrationTestIngest {
// Update the test table schema so HFiles from this point will be written with
// encryption features enabled.
final HBaseAdmin admin = util.getHBaseAdmin();
final Admin admin = util.getHBaseAdmin();
HTableDescriptor tableDescriptor =
new HTableDescriptor(admin.getTableDescriptor(Bytes.toBytes(getTablename())));
new HTableDescriptor(admin.getTableDescriptor(TableName.valueOf(getTablename())));
for (HColumnDescriptor columnDescriptor: tableDescriptor.getColumnFamilies()) {
columnDescriptor.setEncryptionType("AES");
LOG.info("Updating CF schema for " + getTablename() + "." +
columnDescriptor.getNameAsString());
admin.disableTable(getTablename());
admin.modifyColumn(getTablename(), columnDescriptor);
admin.enableTable(getTablename());
admin.disableTable(TableName.valueOf(getTablename()));
admin.modifyColumn(TableName.valueOf(getTablename()), columnDescriptor);
admin.enableTable(TableName.valueOf(getTablename()));
util.waitFor(30000, 1000, true, new Predicate<IOException>() {
@Override
public boolean evaluate() throws IOException {
return admin.isTableAvailable(getTablename());
return admin.isTableAvailable(TableName.valueOf(getTablename()));
}
});
}

View File

@ -24,6 +24,7 @@ import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.util.RegionSplitter;
import org.apache.hadoop.hbase.util.RegionSplitter.SplitAlgorithm;
@ -49,7 +50,7 @@ public class IntegrationTestManyRegions {
protected static final Log LOG
= LogFactory.getLog(IntegrationTestManyRegions.class);
protected static final String TABLE_NAME = CLASS_NAME;
protected static final TableName TABLE_NAME = TableName.valueOf(CLASS_NAME);
protected static final String COLUMN_NAME = "f";
protected static final String REGION_COUNT_KEY
= String.format("hbase.%s.regions", CLASS_NAME);
@ -80,7 +81,7 @@ public class IntegrationTestManyRegions {
util.initializeCluster(REGION_SERVER_COUNT);
LOG.info("Cluster initialized");
HBaseAdmin admin = util.getHBaseAdmin();
Admin admin = util.getHBaseAdmin();
if (admin.tableExists(TABLE_NAME)) {
LOG.info(String.format("Deleting existing table %s.", TABLE_NAME));
if (admin.isTableEnabled(TABLE_NAME)) admin.disableTable(TABLE_NAME);
@ -93,7 +94,7 @@ public class IntegrationTestManyRegions {
@After
public void tearDown() throws IOException {
LOG.info("Cleaning up after test.");
HBaseAdmin admin = util.getHBaseAdmin();
Admin admin = util.getHBaseAdmin();
if (admin.tableExists(TABLE_NAME)) {
if (admin.isTableEnabled(TABLE_NAME)) admin.disableTable(TABLE_NAME);
admin.deleteTable(TABLE_NAME);
@ -122,10 +123,10 @@ public class IntegrationTestManyRegions {
private static class Worker implements Runnable {
private final CountDownLatch doneSignal;
private final HBaseAdmin admin;
private final Admin admin;
private boolean success = false;
public Worker(final CountDownLatch doneSignal, final HBaseAdmin admin) {
public Worker(final CountDownLatch doneSignal, final Admin admin) {
this.doneSignal = doneSignal;
this.admin = admin;
}
@ -137,7 +138,7 @@ public class IntegrationTestManyRegions {
@Override
public void run() {
long startTime, endTime;
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(TABLE_NAME));
HTableDescriptor desc = new HTableDescriptor(TABLE_NAME);
desc.addFamily(new HColumnDescriptor(COLUMN_NAME));
SplitAlgorithm algo = new RegionSplitter.HexStringSplit();
byte[][] splits = algo.split(REGION_COUNT);

View File

@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.chaos.actions.RestartRsHoldingTableAction;
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
import org.apache.hadoop.hbase.chaos.policies.PeriodicRandomActionPolicy;
import org.apache.hadoop.hbase.chaos.policies.Policy;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.ipc.RpcClient;
import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy;
@ -86,9 +87,9 @@ public class IntegrationTestRegionReplicaPerf extends IntegrationTestBase {
*/
static class PerfEvalCallable implements Callable<TimingResult> {
private final Queue<String> argv = new LinkedList<String>();
private final HBaseAdmin admin;
private final Admin admin;
public PerfEvalCallable(HBaseAdmin admin, String argv) {
public PerfEvalCallable(Admin admin, String argv) {
// TODO: this API is awkward, should take HConnection, not HBaseAdmin
this.admin = admin;
this.argv.addAll(Arrays.asList(argv.split(" ")));

View File

@ -49,8 +49,8 @@ import org.junit.Assert;
@InterfaceAudience.Private
public class StripeCompactionsPerformanceEvaluation extends AbstractHBaseTool {
private static final Log LOG = LogFactory.getLog(StripeCompactionsPerformanceEvaluation.class);
private static final String TABLE_NAME =
StripeCompactionsPerformanceEvaluation.class.getSimpleName();
private static final TableName TABLE_NAME =
TableName.valueOf(StripeCompactionsPerformanceEvaluation.class.getSimpleName());
private static final byte[] COLUMN_FAMILY = Bytes.toBytes("CF");
private static final int MIN_NUM_SERVERS = 1;
@ -199,9 +199,8 @@ public class StripeCompactionsPerformanceEvaluation extends AbstractHBaseTool {
status(String.format("%s test starting on %d servers; preloading 0 to %d and writing to %d",
description, numServers, startKey, endKey));
TableName tn = TableName.valueOf(TABLE_NAME);
if (preloadKeys > 0) {
MultiThreadedWriter preloader = new MultiThreadedWriter(dataGen, conf, tn);
MultiThreadedWriter preloader = new MultiThreadedWriter(dataGen, conf, TABLE_NAME);
long time = System.currentTimeMillis();
preloader.start(0, startKey, writeThreads);
preloader.waitForFinish();
@ -214,8 +213,8 @@ public class StripeCompactionsPerformanceEvaluation extends AbstractHBaseTool {
Thread.sleep(waitTime);
}
MultiThreadedWriter writer = new MultiThreadedWriter(dataGen, conf, tn);
MultiThreadedReader reader = new MultiThreadedReader(dataGen, conf, tn, 100);
MultiThreadedWriter writer = new MultiThreadedWriter(dataGen, conf, TABLE_NAME);
MultiThreadedReader reader = new MultiThreadedReader(dataGen, conf, TABLE_NAME, 100);
// reader.getMetrics().enable();
reader.linkToWriter(writer);

View File

@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.IntegrationTestingUtility;
import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.util.Bytes;
@ -129,7 +130,7 @@ public class Action {
LOG.info("Moving " + victimRegions.size() + " regions from " + fromServers.size()
+ " servers to " + toServers.size() + " different servers");
HBaseAdmin admin = this.context.getHBaseIntegrationTestingUtility().getHBaseAdmin();
Admin admin = this.context.getHBaseIntegrationTestingUtility().getHBaseAdmin();
for (byte[] victimRegion : victimRegions) {
int targetIx = RandomUtils.nextInt(toServers.size());
admin.move(victimRegion, Bytes.toBytes(toServers.get(targetIx).getServerName()));
@ -137,7 +138,7 @@ public class Action {
}
protected void forceBalancer() throws Exception {
HBaseAdmin admin = this.context.getHBaseIntegrationTestingUtility().getHBaseAdmin();
Admin admin = this.context.getHBaseIntegrationTestingUtility().getHBaseAdmin();
boolean result = admin.balancer();
if (!result) {
LOG.error("Balancer didn't succeed");

View File

@ -23,6 +23,8 @@ import java.io.IOException;
import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.util.Bytes;
@ -30,13 +32,11 @@ import org.apache.hadoop.hbase.util.Bytes;
* Action the adds a column family to a table.
*/
public class AddColumnAction extends Action {
private final byte[] tableName;
private final String tableNameString;
private HBaseAdmin admin;
private final TableName tableName;
private Admin admin;
public AddColumnAction(String tableName) {
tableNameString = tableName;
this.tableName = Bytes.toBytes(tableName);
this.tableName = TableName.valueOf(tableName);
}
@Override
@ -55,7 +55,7 @@ public class AddColumnAction extends Action {
columnDescriptor = new HColumnDescriptor(RandomStringUtils.randomAlphabetic(5));
}
LOG.debug("Performing action: Adding " + columnDescriptor + " to " + tableNameString);
LOG.debug("Performing action: Adding " + columnDescriptor + " to " + tableName);
tableDescriptor.addFamily(columnDescriptor);
admin.modifyTable(tableName, tableDescriptor);

View File

@ -23,6 +23,8 @@ import java.util.Random;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.util.Bytes;
@ -32,30 +34,27 @@ import org.apache.hadoop.hbase.util.Bytes;
* table
*/
public class ChangeBloomFilterAction extends Action {
private final byte[] tableNameBytes;
private final long sleepTime;
private final String tableName;
private final TableName tableName;
public ChangeBloomFilterAction(String tableName) {
this(-1, tableName);
}
public ChangeBloomFilterAction(int sleepTime, String tableName) {
this.tableNameBytes = Bytes.toBytes(tableName);
this.sleepTime = sleepTime;
this.tableName = tableName;
this.tableName = TableName.valueOf(tableName);
}
@Override
public void perform() throws Exception {
Random random = new Random();
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
HBaseAdmin admin = util.getHBaseAdmin();
Admin admin = util.getHBaseAdmin();
LOG.info("Performing action: Change bloom filter on all columns of table "
+ tableName);
HTableDescriptor tableDescriptor = admin.getTableDescriptor(Bytes
.toBytes(tableName));
HTableDescriptor tableDescriptor = admin.getTableDescriptor(tableName);
HColumnDescriptor[] columnDescriptors = tableDescriptor.getColumnFamilies();
if (columnDescriptors == null || columnDescriptors.length == 0) {

View File

@ -23,6 +23,8 @@ import java.util.Random;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
import org.apache.hadoop.hbase.util.Bytes;
@ -31,15 +33,15 @@ import org.apache.hadoop.hbase.util.Bytes;
* Action that changes the compression algorithm on a column family from a list of tables.
*/
public class ChangeCompressionAction extends Action {
private final byte[] tableName;
private final TableName tableName;
private final String tableNameString;
private HBaseAdmin admin;
private Admin admin;
private Random random;
public ChangeCompressionAction(String tableName) {
tableNameString = tableName;
this.tableName = Bytes.toBytes(tableName);
this.tableName = TableName.valueOf(tableName);
this.random = new Random();
}

View File

@ -23,6 +23,8 @@ import java.util.Random;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.util.Bytes;
@ -31,15 +33,13 @@ import org.apache.hadoop.hbase.util.Bytes;
* Action that changes the encoding on a column family from a list of tables.
*/
public class ChangeEncodingAction extends Action {
private final byte[] tableName;
private final String tableNameString;
private final TableName tableName;
private HBaseAdmin admin;
private Admin admin;
private Random random;
public ChangeEncodingAction(String tableName) {
tableNameString = tableName;
this.tableName = Bytes.toBytes(tableName);
this.tableName = TableName.valueOf(tableName);
this.random = new Random();
}
@ -58,7 +58,7 @@ public class ChangeEncodingAction extends Action {
return;
}
LOG.debug("Performing action: Changing encodings on " + tableNameString);
LOG.debug("Performing action: Changing encodings on " + tableName);
// possible DataBlockEncoding id's
int[] possibleIds = {0, 2, 3, 4/*, 6*/};
for (HColumnDescriptor descriptor : columnDescriptors) {

View File

@ -23,6 +23,8 @@ import java.util.Random;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.util.Bytes;
@ -32,15 +34,15 @@ import org.apache.hadoop.hbase.util.Bytes;
* Always keeps at least 1 as the number of versions.
*/
public class ChangeVersionsAction extends Action {
private final byte[] tableName;
private final TableName tableName;
private final String tableNameString;
private HBaseAdmin admin;
private Admin admin;
private Random random;
public ChangeVersionsAction(String tableName) {
tableNameString = tableName;
this.tableName = Bytes.toBytes(tableName);
this.tableName = TableName.valueOf(tableName);
this.random = new Random();
}

View File

@ -23,7 +23,9 @@ import java.util.List;
import org.apache.commons.lang.math.RandomUtils;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.util.Bytes;
@ -31,10 +33,9 @@ import org.apache.hadoop.hbase.util.Bytes;
* Region that queues a compaction of a random region from the table.
*/
public class CompactRandomRegionOfTableAction extends Action {
private final byte[] tableNameBytes;
private final int majorRatio;
private final long sleepTime;
private final String tableName;
private final TableName tableName;
public CompactRandomRegionOfTableAction(
String tableName, float majorRatio) {
@ -43,21 +44,20 @@ public class CompactRandomRegionOfTableAction extends Action {
public CompactRandomRegionOfTableAction(
int sleepTime, String tableName, float majorRatio) {
this.tableNameBytes = Bytes.toBytes(tableName);
this.majorRatio = (int) (100 * majorRatio);
this.sleepTime = sleepTime;
this.tableName = tableName;
this.tableName = TableName.valueOf(tableName);
}
@Override
public void perform() throws Exception {
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
HBaseAdmin admin = util.getHBaseAdmin();
Admin admin = util.getHBaseAdmin();
boolean major = RandomUtils.nextInt(100) < majorRatio;
LOG.info("Performing action: Compact random region of table "
+ tableName + ", major=" + major);
List<HRegionInfo> regions = admin.getTableRegions(tableNameBytes);
List<HRegionInfo> regions = admin.getTableRegions(tableName);
if (regions == null || regions.isEmpty()) {
LOG.info("Table " + tableName + " doesn't have regions to compact");
return;

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.chaos.actions;
import org.apache.commons.lang.math.RandomUtils;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.util.Bytes;
@ -47,7 +48,7 @@ public class CompactTableAction extends Action {
@Override
public void perform() throws Exception {
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
HBaseAdmin admin = util.getHBaseAdmin();
Admin admin = util.getHBaseAdmin();
boolean major = RandomUtils.nextInt(100) < majorRatio;
LOG.info("Performing action: Compact table " + tableName + ", major=" + major);

View File

@ -22,7 +22,9 @@ import java.util.List;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.util.Bytes;
@ -30,27 +32,25 @@ import org.apache.hadoop.hbase.util.Bytes;
* Action that tries to flush a random region of a table.
*/
public class FlushRandomRegionOfTableAction extends Action {
private final byte[] tableNameBytes;
private final long sleepTime;
private final String tableName;
private final TableName tableName;
public FlushRandomRegionOfTableAction(String tableName) {
this (-1, tableName);
}
public FlushRandomRegionOfTableAction(int sleepTime, String tableName) {
this.tableNameBytes = Bytes.toBytes(tableName);
this.sleepTime = sleepTime;
this.tableName = tableName;
this.tableName = TableName.valueOf(tableName);
}
@Override
public void perform() throws Exception {
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
HBaseAdmin admin = util.getHBaseAdmin();
Admin admin = util.getHBaseAdmin();
LOG.info("Performing action: Flush random region of table " + tableName);
List<HRegionInfo> regions = admin.getTableRegions(tableNameBytes);
List<HRegionInfo> regions = admin.getTableRegions(tableName);
if (regions == null || regions.isEmpty()) {
LOG.info("Table " + tableName + " doesn't have regions to flush");
return;

View File

@ -19,6 +19,8 @@
package org.apache.hadoop.hbase.chaos.actions;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.util.Bytes;
@ -26,28 +28,26 @@ import org.apache.hadoop.hbase.util.Bytes;
* Action that tries to flush a table.
*/
public class FlushTableAction extends Action {
private final byte[] tableNameBytes;
private final long sleepTime;
private final String tableName;
private final TableName tableName;
public FlushTableAction(String tableName) {
this(-1, tableName);
}
public FlushTableAction(int sleepTime, String tableName) {
this.tableNameBytes = Bytes.toBytes(tableName);
this.sleepTime = sleepTime;
this.tableName = tableName;
this.tableName = TableName.valueOf(tableName);
}
@Override
public void perform() throws Exception {
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
HBaseAdmin admin = util.getHBaseAdmin();
Admin admin = util.getHBaseAdmin();
LOG.info("Performing action: Flush table " + tableName);
try {
admin.flush(tableNameBytes);
admin.flush(tableName.toBytes());
} catch (Exception ex) {
LOG.warn("Flush failed, might be caused by other chaos: " + ex.getMessage());
}

View File

@ -23,6 +23,8 @@ import java.util.List;
import org.apache.commons.lang.math.RandomUtils;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.util.Bytes;
@ -30,8 +32,7 @@ import org.apache.hadoop.hbase.util.Bytes;
* Action to merge regions of a table.
*/
public class MergeRandomAdjacentRegionsOfTableAction extends Action {
private final byte[] tableNameBytes;
private final String tableName;
private final TableName tableName;
private final long sleepTime;
public MergeRandomAdjacentRegionsOfTableAction(String tableName) {
@ -39,18 +40,17 @@ public class MergeRandomAdjacentRegionsOfTableAction extends Action {
}
public MergeRandomAdjacentRegionsOfTableAction(int sleepTime, String tableName) {
this.tableNameBytes = Bytes.toBytes(tableName);
this.tableName = tableName;
this.tableName = TableName.valueOf(tableName);
this.sleepTime = sleepTime;
}
@Override
public void perform() throws Exception {
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
HBaseAdmin admin = util.getHBaseAdmin();
Admin admin = util.getHBaseAdmin();
LOG.info("Performing action: Merge random adjacent regions of table " + tableName);
List<HRegionInfo> regions = admin.getTableRegions(tableNameBytes);
List<HRegionInfo> regions = admin.getTableRegions(tableName);
if (regions == null || regions.size() < 2) {
LOG.info("Table " + tableName + " doesn't have enough regions to merge");
return;

View File

@ -22,7 +22,9 @@ import java.util.List;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.util.Bytes;
@ -31,8 +33,7 @@ import org.apache.hadoop.hbase.util.Bytes;
*/
public class MoveRandomRegionOfTableAction extends Action {
private final long sleepTime;
private final byte[] tableNameBytes;
private final String tableName;
private final TableName tableName;
public MoveRandomRegionOfTableAction(String tableName) {
this(-1, tableName);
@ -40,8 +41,7 @@ public class MoveRandomRegionOfTableAction extends Action {
public MoveRandomRegionOfTableAction(long sleepTime, String tableName) {
this.sleepTime = sleepTime;
this.tableNameBytes = Bytes.toBytes(tableName);
this.tableName = tableName;
this.tableName = TableName.valueOf(tableName);
}
@Override
@ -51,10 +51,10 @@ public class MoveRandomRegionOfTableAction extends Action {
}
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
HBaseAdmin admin = util.getHBaseAdmin();
Admin admin = util.getHBaseAdmin();
LOG.info("Performing action: Move random region of table " + tableName);
List<HRegionInfo> regions = admin.getTableRegions(tableNameBytes);
List<HRegionInfo> regions = admin.getTableRegions(tableName);
if (regions == null || regions.isEmpty()) {
LOG.info("Table " + tableName + " doesn't have regions to move");
return;

View File

@ -25,7 +25,9 @@ import java.util.List;
import org.apache.commons.lang.math.RandomUtils;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.chaos.factories.MonkeyConstants;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.util.Bytes;
@ -34,8 +36,7 @@ import org.apache.hadoop.hbase.util.Bytes;
*/
public class MoveRegionsOfTableAction extends Action {
private final long sleepTime;
private final byte[] tableNameBytes;
private final String tableName;
private final TableName tableName;
private final long maxTime;
public MoveRegionsOfTableAction(String tableName) {
@ -44,8 +45,7 @@ public class MoveRegionsOfTableAction extends Action {
public MoveRegionsOfTableAction(long sleepTime, long maxSleepTime, String tableName) {
this.sleepTime = sleepTime;
this.tableNameBytes = Bytes.toBytes(tableName);
this.tableName = tableName;
this.tableName = TableName.valueOf(tableName);
this.maxTime = maxSleepTime;
}
@ -55,12 +55,12 @@ public class MoveRegionsOfTableAction extends Action {
Thread.sleep(sleepTime);
}
HBaseAdmin admin = this.context.getHBaseIntegrationTestingUtility().getHBaseAdmin();
Admin admin = this.context.getHBaseIntegrationTestingUtility().getHBaseAdmin();
Collection<ServerName> serversList = admin.getClusterStatus().getServers();
ServerName[] servers = serversList.toArray(new ServerName[serversList.size()]);
LOG.info("Performing action: Move regions of table " + tableName);
List<HRegionInfo> regions = admin.getTableRegions(tableNameBytes);
List<HRegionInfo> regions = admin.getTableRegions(tableName);
if (regions == null || regions.isEmpty()) {
LOG.info("Table " + tableName + " doesn't have regions to move");
return;

View File

@ -24,6 +24,8 @@ import java.util.Set;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.util.Bytes;
@ -31,15 +33,15 @@ import org.apache.hadoop.hbase.util.Bytes;
* Action that removes a column family.
*/
public class RemoveColumnAction extends Action {
private final byte[] tableName;
private final TableName tableName;
private final Set<String> protectedColumns;
private final String tableNameString;
private HBaseAdmin admin;
private Admin admin;
private Random random;
public RemoveColumnAction(String tableName, Set<String> protectedColumns) {
tableNameString = tableName;
this.tableName = Bytes.toBytes(tableName);
this.tableName = TableName.valueOf(tableName);
this.protectedColumns = protectedColumns;
random = new Random();
}

View File

@ -19,13 +19,15 @@
package org.apache.hadoop.hbase.chaos.actions;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
/**
* Action that tries to take a snapshot of a table.
*/
public class SnapshotTableAction extends Action {
private final String tableName;
private final TableName tableName;
private final long sleepTime;
public SnapshotTableAction(String tableName) {
@ -33,7 +35,7 @@ public class SnapshotTableAction extends Action {
}
public SnapshotTableAction(int sleepTime, String tableName) {
this.tableName = tableName;
this.tableName = TableName.valueOf(tableName);
this.sleepTime = sleepTime;
}
@ -41,7 +43,7 @@ public class SnapshotTableAction extends Action {
public void perform() throws Exception {
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
String snapshotName = tableName + "-it-" + System.currentTimeMillis();
HBaseAdmin admin = util.getHBaseAdmin();
Admin admin = util.getHBaseAdmin();
LOG.info("Performing action: Snapshot table " + tableName);
admin.snapshot(snapshotName, tableName);

View File

@ -22,7 +22,9 @@ import java.util.List;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.util.Bytes;
@ -30,27 +32,25 @@ import org.apache.hadoop.hbase.util.Bytes;
* Action that tries to split a random region of a table.
*/
public class SplitRandomRegionOfTableAction extends Action {
private final byte[] tableNameBytes;
private final long sleepTime;
private final String tableName;
private final TableName tableName;
public SplitRandomRegionOfTableAction(String tableName) {
this(-1, tableName);
}
public SplitRandomRegionOfTableAction(int sleepTime, String tableName) {
this.tableNameBytes = Bytes.toBytes(tableName);
this.sleepTime = sleepTime;
this.tableName = tableName;
this.tableName = TableName.valueOf(tableName);
}
@Override
public void perform() throws Exception {
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
HBaseAdmin admin = util.getHBaseAdmin();
Admin admin = util.getHBaseAdmin();
LOG.info("Performing action: Split random region of table " + tableName);
List<HRegionInfo> regions = admin.getTableRegions(tableNameBytes);
List<HRegionInfo> regions = admin.getTableRegions(tableName);
if (regions == null || regions.isEmpty()) {
LOG.info("Table " + tableName + " doesn't have regions to split");
return;

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.chaos.actions;
import java.util.Random;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.TableName;
@ -39,7 +40,7 @@ public class TruncateTableAction extends Action {
@Override
public void perform() throws Exception {
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
HBaseAdmin admin = util.getHBaseAdmin();
Admin admin = util.getHBaseAdmin();
boolean preserveSplits = random.nextBoolean();
LOG.info("Performing action: Truncate table " + tableName.getNameAsString() +

View File

@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.IntegrationTestingUtility;
import org.apache.hadoop.hbase.IntegrationTests;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Consistency;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
@ -196,7 +197,7 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
if (replicaCount == NUM_REPLICA_COUNT_DEFAULT) return;
TableName t = TableName.valueOf(getTablename());
HBaseAdmin admin = util.getHBaseAdmin();
Admin admin = util.getHBaseAdmin();
HTableDescriptor desc = admin.getTableDescriptor(t);
desc.addCoprocessor(SlowMeCoproScanOperations.class.getName());
HBaseTestingUtility.modifyTableSync(admin, desc);
@ -226,7 +227,7 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
}
private void setupTable() throws IOException, InterruptedException {
if (util.getHBaseAdmin().tableExists(getTablename())) {
if (util.getHBaseAdmin().tableExists(TableName.valueOf(getTablename()))) {
util.deleteTable(getTablename());
}

View File

@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.IntegrationTestingUtility;
import org.apache.hadoop.hbase.IntegrationTests;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.Type;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
@ -274,7 +275,7 @@ public class IntegrationTestImportTsv implements Configurable, Tool {
} catch (Exception e) {
throw new IOException("Underlying MapReduce job failed. Aborting commit.", e);
} finally {
if (util.getHBaseAdmin().tableExists(table)) {
if (util.getHBaseAdmin().tableExists(TableName.valueOf(table))) {
util.deleteTable(table);
}
}

View File

@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.IntegrationTestBase;
import org.apache.hadoop.hbase.IntegrationTestingUtility;
import org.apache.hadoop.hbase.IntegrationTests;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
@ -382,7 +383,7 @@ public void cleanUpCluster() throws Exception {
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TEST_NAME));
htd.addFamily(new HColumnDescriptor(TEST_FAMILY));
HBaseAdmin admin = getTestingUtil(getConf()).getHBaseAdmin();
Admin admin = getTestingUtil(getConf()).getHBaseAdmin();
admin.createTable(htd, Bytes.toBytes(0L), Bytes.toBytes(-1L), 40);
doLoad(getConf(), htd);

View File

@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.IntegrationTestingUtility;
import org.apache.hadoop.hbase.IntegrationTests;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.chaos.factories.MonkeyFactory;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Consistency;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
@ -156,7 +157,7 @@ public class IntegrationTestTimeBoundedRequestsWithRegionReplicas extends Integr
// flush the table
LOG.info("Flushing the table");
HBaseAdmin admin = util.getHBaseAdmin();
Admin admin = util.getHBaseAdmin();
admin.flush(getTablename());
// re-open the regions to make sure that the replicas are up to date
@ -166,8 +167,8 @@ public class IntegrationTestTimeBoundedRequestsWithRegionReplicas extends Integr
Threads.sleep(refreshTime);
} else {
LOG.info("Reopening the table");
admin.disableTable(getTablename());
admin.enableTable(getTablename());
admin.disableTable(TableName.valueOf(getTablename()));
admin.enableTable(TableName.valueOf(getTablename()));
}
// We should only start the ChaosMonkey after the readers are started and have cached

View File

@ -25,6 +25,8 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.IntegrationTestingUtility;
import org.apache.hadoop.hbase.IntegrationTests;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
@ -58,11 +60,11 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
public static final String TABLE_NAME_DEFAULT = "SendTracesTable";
public static final String COLUMN_FAMILY_DEFAULT = "D";
private String tableName = TABLE_NAME_DEFAULT;
private String familyName = COLUMN_FAMILY_DEFAULT;
private TableName tableName = TableName.valueOf(TABLE_NAME_DEFAULT);
private byte[] familyName = Bytes.toBytes(COLUMN_FAMILY_DEFAULT);
private IntegrationTestingUtility util;
private Random random = new Random();
private HBaseAdmin admin;
private Admin admin;
private SpanReceiverHost receiverHost;
public static void main(String[] args) throws Exception {
@ -83,8 +85,8 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
String tableNameString = cmd.getOptionValue(TABLE_ARG, TABLE_NAME_DEFAULT);
String familyString = cmd.getOptionValue(CF_ARG, COLUMN_FAMILY_DEFAULT);
this.tableName = tableNameString;
this.familyName = familyString;
this.tableName = TableName.valueOf(tableNameString);
this.familyName = Bytes.toBytes(familyString);
}
@Override
@ -248,20 +250,18 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
Put p = new Put(Bytes.toBytes(rk));
for (int y = 0; y < 10; y++) {
random.nextBytes(value);
p.add(Bytes.toBytes(familyName),
Bytes.toBytes(random.nextLong()),
value);
p.add(familyName, Bytes.toBytes(random.nextLong()), value);
}
ht.put(p);
}
if ((x % 1000) == 0) {
admin.flush(Bytes.toBytes(tableName));
admin.flush(tableName.toBytes());
}
} finally {
traceScope.close();
}
}
admin.flush(Bytes.toBytes(tableName));
admin.flush(tableName.toBytes());
return rowKeys;
}

View File

@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotEnabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
@ -453,7 +454,7 @@ public final class Canary implements Tool {
* Canary entry point for specified table.
* @throws Exception
*/
public static void sniff(final HBaseAdmin admin, TableName tableName) throws Exception {
public static void sniff(final Admin admin, TableName tableName) throws Exception {
sniff(admin, new StdOutSink(), tableName.getNameAsString());
}
@ -461,10 +462,10 @@ public final class Canary implements Tool {
* Canary entry point for specified table.
* @throws Exception
*/
private static void sniff(final HBaseAdmin admin, final Sink sink, String tableName)
private static void sniff(final Admin admin, final Sink sink, String tableName)
throws Exception {
if (admin.isTableAvailable(tableName)) {
sniff(admin, sink, admin.getTableDescriptor(tableName.getBytes()));
if (admin.isTableAvailable(TableName.valueOf(tableName))) {
sniff(admin, sink, admin.getTableDescriptor(TableName.valueOf(tableName)));
} else {
LOG.warn(String.format("Table %s is not available", tableName));
}
@ -473,7 +474,7 @@ public final class Canary implements Tool {
/*
* Loops over regions that owns this table, and output some information abouts the state.
*/
private static void sniff(final HBaseAdmin admin, final Sink sink, HTableDescriptor tableDesc)
private static void sniff(final Admin admin, final Sink sink, HTableDescriptor tableDesc)
throws Exception {
HTable table = null;
@ -484,7 +485,7 @@ public final class Canary implements Tool {
}
try {
for (HRegionInfo region : admin.getTableRegions(tableDesc.getName())) {
for (HRegionInfo region : admin.getTableRegions(tableDesc.getTableName())) {
try {
sniffRegion(admin, sink, region, table);
} catch (Exception e) {
@ -502,7 +503,7 @@ public final class Canary implements Tool {
* failure.
*/
private static void sniffRegion(
final HBaseAdmin admin,
final Admin admin,
final Sink sink,
HRegionInfo region,
HTable table) throws Exception {

View File

@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HTable;
@ -143,7 +144,7 @@ public class HBaseFsckRepair {
* Contacts a region server and waits up to hbase.hbck.close.timeout ms
* (default 120s) to close the region. This bypasses the active hmaster.
*/
public static void closeRegionSilentlyAndWait(HBaseAdmin admin,
public static void closeRegionSilentlyAndWait(Admin admin,
ServerName server, HRegionInfo region) throws IOException, InterruptedException {
HConnection connection = admin.getConnection();
AdminService.BlockingInterface rs = connection.getAdmin(server);

View File

@ -54,11 +54,13 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Waiter.Predicate;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
@ -1468,7 +1470,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
/**
* Modify a table, synchronous. Waiting logic similar to that of {@code admin.rb#alter_status}.
*/
public static void modifyTableSync(HBaseAdmin admin, HTableDescriptor desc)
public static void modifyTableSync(Admin admin, HTableDescriptor desc)
throws IOException, InterruptedException {
admin.modifyTable(desc.getTableName(), desc);
Pair<Integer, Integer> status = new Pair<Integer, Integer>() {{
@ -1494,7 +1496,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
/**
* Set the number of Region replicas.
*/
public static void setReplicas(HBaseAdmin admin, TableName table, int replicaCount)
public static void setReplicas(Admin admin, TableName table, int replicaCount)
throws IOException, InterruptedException {
admin.disableTable(table);
HTableDescriptor desc = admin.getTableDescriptor(table);
@ -2041,8 +2043,8 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
HConnection conn = table.getConnection();
conn.clearRegionCache();
// assign all the new regions IF table is enabled.
HBaseAdmin admin = getHBaseAdmin();
if (admin.isTableEnabled(table.getTableName())) {
Admin admin = getHBaseAdmin();
if (admin.isTableEnabled(table.getName())) {
for(HRegionInfo hri : newRegions) {
admin.assign(hri.getRegionName());
}
@ -2469,15 +2471,15 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
}
/**
* Returns a HBaseAdmin instance.
* Returns a Admin instance.
* This instance is shared between HBaseTestingUtility instance users.
* Closing it has no effect, it will be closed automatically when the
* cluster shutdowns
*
* @return The HBaseAdmin instance.
* @return An Admin instance.
* @throws IOException
*/
public synchronized HBaseAdmin getHBaseAdmin()
public synchronized Admin getHBaseAdmin()
throws IOException {
if (hbaseAdmin == null){
hbaseAdmin = new HBaseAdminForTests(getConfiguration());
@ -2648,7 +2650,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
waitTableAvailable(getHBaseAdmin(), table, 30000);
}
public void waitTableAvailable(HBaseAdmin admin, byte[] table)
public void waitTableAvailable(Admin admin, byte[] table)
throws InterruptedException, IOException {
waitTableAvailable(admin, table, 30000);
}
@ -2665,10 +2667,10 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
waitTableAvailable(getHBaseAdmin(), table, timeoutMillis);
}
public void waitTableAvailable(HBaseAdmin admin, byte[] table, long timeoutMillis)
public void waitTableAvailable(Admin admin, byte[] table, long timeoutMillis)
throws InterruptedException, IOException {
long startWait = System.currentTimeMillis();
while (!admin.isTableAvailable(table)) {
while (!admin.isTableAvailable(TableName.valueOf(table))) {
assertTrue("Timed out waiting for table to become available " +
Bytes.toStringBinary(table),
System.currentTimeMillis() - startWait < timeoutMillis);
@ -2690,7 +2692,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
waitTableEnabled(getHBaseAdmin(), table, 30000);
}
public void waitTableEnabled(HBaseAdmin admin, byte[] table)
public void waitTableEnabled(Admin admin, byte[] table)
throws InterruptedException, IOException {
waitTableEnabled(admin, table, 30000);
}
@ -2709,12 +2711,13 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
waitTableEnabled(getHBaseAdmin(), table, timeoutMillis);
}
public void waitTableEnabled(HBaseAdmin admin, byte[] table, long timeoutMillis)
public void waitTableEnabled(Admin admin, byte[] table, long timeoutMillis)
throws InterruptedException, IOException {
TableName tableName = TableName.valueOf(table);
long startWait = System.currentTimeMillis();
waitTableAvailable(admin, table, timeoutMillis);
long remainder = System.currentTimeMillis() - startWait;
while (!admin.isTableEnabled(table)) {
while (!admin.isTableEnabled(tableName)) {
assertTrue("Timed out waiting for table to become available and enabled " +
Bytes.toStringBinary(table),
System.currentTimeMillis() - remainder < timeoutMillis);
@ -2726,7 +2729,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
// Below we do a get. The get will retry if a NotServeringRegionException or a
// RegionOpeningException. It is crass but when done all will be online.
try {
Canary.sniff(admin, TableName.valueOf(table));
Canary.sniff(admin, tableName);
} catch (Exception e) {
throw new IOException(e);
}
@ -3276,7 +3279,9 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
}
int totalNumberOfRegions = 0;
HBaseAdmin admin = new HBaseAdmin(conf);
HConnection unmanagedConnection = HConnectionManager.createConnection(conf);
Admin admin = unmanagedConnection.getAdmin();
try {
// create a table a pre-splits regions.
// The number of splits is set as:
@ -3303,6 +3308,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
" already exists, continuing");
} finally {
admin.close();
unmanagedConnection.close();
}
return totalNumberOfRegions;
}

View File

@ -47,6 +47,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Consistency;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
@ -272,7 +273,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
* {@code opts.presplitRegions} is specified or when the existing table's
* region replica count doesn't match {@code opts.replicas}.
*/
static boolean checkTable(HBaseAdmin admin, TestOptions opts) throws IOException {
static boolean checkTable(Admin admin, TestOptions opts) throws IOException {
TableName tableName = TableName.valueOf(opts.tableName);
boolean needsDelete = false, exists = admin.tableExists(tableName);
boolean isReadCmd = opts.cmdName.toLowerCase().contains("read")

View File

@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.util.Bytes;
@ -76,7 +77,7 @@ public class TestHColumnDescriptorDefaultVersions {
@Test
public void testCreateTableWithDefault() throws IOException {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
// Create a table with one family
HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME);
HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
@ -97,7 +98,7 @@ public class TestHColumnDescriptorDefaultVersions {
TEST_UTIL.getConfiguration().setInt("hbase.column.max.version", 3);
TEST_UTIL.startMiniCluster(1);
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
// Create a table with one family
HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME);
HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
@ -119,7 +120,7 @@ public class TestHColumnDescriptorDefaultVersions {
TEST_UTIL.getConfiguration().setInt("hbase.column.max.version", 3);
TEST_UTIL.startMiniCluster(1);
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
// Create a table with one family
HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME);
HColumnDescriptor hcd =
@ -140,7 +141,7 @@ public class TestHColumnDescriptorDefaultVersions {
private void verifyHColumnDescriptor(int expected, final TableName tableName,
final byte[]... families) throws IOException {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
// Verify descriptor from master
HTableDescriptor htd = admin.getTableDescriptor(tableName);

View File

@ -29,6 +29,7 @@ import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HConnection;
@ -209,7 +210,7 @@ public class TestMetaTableAccessor {
assertFalse(MetaTableAccessor.tableExists(hConnection, name));
UTIL.createTable(name, HConstants.CATALOG_FAMILY);
assertTrue(MetaTableAccessor.tableExists(hConnection, name));
HBaseAdmin admin = UTIL.getHBaseAdmin();
Admin admin = UTIL.getHBaseAdmin();
admin.disableTable(name);
admin.deleteTable(name);
assertFalse(MetaTableAccessor.tableExists(hConnection, name));

View File

@ -33,6 +33,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
@ -58,7 +59,7 @@ public class TestNamespace {
private static HMaster master;
protected final static int NUM_SLAVES_BASE = 4;
private static HBaseTestingUtility TEST_UTIL;
protected static HBaseAdmin admin;
protected static Admin admin;
protected static HBaseCluster cluster;
private static ZKNamespaceManager zkNamespaceManager;
private String prefix = "TestNamespace";
@ -198,8 +199,8 @@ public class TestNamespace {
String nsName = prefix+"_"+testName;
LOG.info(testName);
byte[] tableName = Bytes.toBytes("my_table");
byte[] tableNameFoo = Bytes.toBytes(nsName+":my_table");
TableName tableName = TableName.valueOf("my_table");
TableName tableNameFoo = TableName.valueOf(nsName+":my_table");
//create namespace and verify
admin.createNamespace(NamespaceDescriptor.create(nsName).build());
TEST_UTIL.createTable(tableName, Bytes.toBytes(nsName));
@ -276,13 +277,13 @@ public class TestNamespace {
@Test
public void createTableInSystemNamespace() throws Exception {
String tableName = "hbase:createTableInSystemNamespace";
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
TableName tableName = TableName.valueOf("hbase:createTableInSystemNamespace");
HTableDescriptor desc = new HTableDescriptor(tableName);
HColumnDescriptor colDesc = new HColumnDescriptor("cf1");
desc.addFamily(colDesc);
admin.createTable(desc);
assertEquals(0, admin.listTables().length);
assertTrue(admin.tableExists(Bytes.toBytes(tableName)));
assertTrue(admin.tableExists(tableName));
admin.disableTable(desc.getTableName());
admin.deleteTable(desc.getTableName());
}

View File

@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
@ -117,7 +118,7 @@ public class TestHFileArchiving {
TableName.valueOf("testRemovesRegionDirOnArchive");
UTIL.createTable(TABLE_NAME, TEST_FAM);
final HBaseAdmin admin = UTIL.getHBaseAdmin();
final Admin admin = UTIL.getHBaseAdmin();
// get the current store files for the region
List<HRegion> servingRegions = UTIL.getHBaseCluster().getRegions(TABLE_NAME);

View File

@ -88,9 +88,6 @@ public class TestAdmin {
final Log LOG = LogFactory.getLog(getClass());
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private Admin admin;
// We use actual HBaseAdmin instance instead of going via Admin interface in
// here because makes use of an internal HBA method (TODO: Fix.).
private HBaseAdmin rawAdminInstance;
@BeforeClass
public static void setUpBeforeClass() throws Exception {
@ -110,7 +107,7 @@ public class TestAdmin {
@Before
public void setUp() throws Exception {
this.admin = this.rawAdminInstance = TEST_UTIL.getHBaseAdmin();
this.admin = TEST_UTIL.getHBaseAdmin();
}
@After
@ -151,11 +148,12 @@ public class TestAdmin {
@Test (timeout=300000)
public void testDeleteEditUnknownColumnFamilyAndOrTable() throws IOException {
// Test we get exception if we try to
final String nonexistent = "nonexistent";
HColumnDescriptor nonexistentHcd = new HColumnDescriptor(nonexistent);
final TableName nonexistentTable = TableName.valueOf("nonexistent");
final byte[] nonexistentColumn = Bytes.toBytes("nonexistent");
HColumnDescriptor nonexistentHcd = new HColumnDescriptor(nonexistentColumn);
Exception exception = null;
try {
this.admin.addColumn(TableName.valueOf(nonexistent), nonexistentHcd);
this.admin.addColumn(nonexistentTable, nonexistentHcd);
} catch (IOException e) {
exception = e;
}
@ -163,7 +161,7 @@ public class TestAdmin {
exception = null;
try {
this.admin.deleteTable(TableName.valueOf(nonexistent));
this.admin.deleteTable(nonexistentTable);
} catch (IOException e) {
exception = e;
}
@ -171,7 +169,7 @@ public class TestAdmin {
exception = null;
try {
this.admin.deleteColumn(TableName.valueOf(nonexistent), Bytes.toBytes(nonexistent));
this.admin.deleteColumn(nonexistentTable, nonexistentColumn);
} catch (IOException e) {
exception = e;
}
@ -179,7 +177,7 @@ public class TestAdmin {
exception = null;
try {
this.admin.disableTable(TableName.valueOf(nonexistent));
this.admin.disableTable(nonexistentTable);
} catch (IOException e) {
exception = e;
}
@ -187,7 +185,7 @@ public class TestAdmin {
exception = null;
try {
this.admin.enableTable(TableName.valueOf(nonexistent));
this.admin.enableTable(nonexistentTable);
} catch (IOException e) {
exception = e;
}
@ -195,7 +193,7 @@ public class TestAdmin {
exception = null;
try {
this.admin.modifyColumn(TableName.valueOf(nonexistent), nonexistentHcd);
this.admin.modifyColumn(nonexistentTable, nonexistentHcd);
} catch (IOException e) {
exception = e;
}
@ -203,7 +201,7 @@ public class TestAdmin {
exception = null;
try {
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(nonexistent));
HTableDescriptor htd = new HTableDescriptor(nonexistentTable);
htd.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
this.admin.modifyTable(htd.getTableName(), htd);
} catch (IOException e) {
@ -1759,6 +1757,10 @@ public class TestAdmin {
@Test (timeout=300000)
public void testGetRegion() throws Exception {
// We use actual HBaseAdmin instance instead of going via Admin interface in
// here because makes use of an internal HBA method (TODO: Fix.).
HBaseAdmin rawAdmin = new HBaseAdmin(TEST_UTIL.getConfiguration());
final String name = "testGetRegion";
LOG.info("Started " + name);
final byte [] nameBytes = Bytes.toBytes(name);
@ -1768,9 +1770,9 @@ public class TestAdmin {
HRegionLocation regionLocation = t.getRegionLocation("mmm");
HRegionInfo region = regionLocation.getRegionInfo();
byte[] regionName = region.getRegionName();
Pair<HRegionInfo, ServerName> pair = rawAdminInstance.getRegion(regionName);
Pair<HRegionInfo, ServerName> pair = rawAdmin.getRegion(regionName);
assertTrue(Bytes.equals(regionName, pair.getFirst().getRegionName()));
pair = rawAdminInstance.getRegion(region.getEncodedNameAsBytes());
pair = rawAdmin.getRegion(region.getEncodedNameAsBytes());
assertTrue(Bytes.equals(regionName, pair.getFirst().getRegionName()));
}
}

View File

@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
@ -50,7 +51,7 @@ public class TestClientOperationInterrupt {
private static final Log LOG = LogFactory.getLog(TestClientOperationInterrupt.class);
private static HBaseTestingUtility util;
private static final byte[] tableName = Bytes.toBytes("test");
private static final TableName tableName = TableName.valueOf("test");
private static final byte[] dummy = Bytes.toBytes("dummy");
private static final byte[] row1 = Bytes.toBytes("r1");
private static final byte[] test = Bytes.toBytes("test");
@ -73,7 +74,7 @@ public class TestClientOperationInterrupt {
util = new HBaseTestingUtility(conf);
util.startMiniCluster();
HBaseAdmin admin = util.getHBaseAdmin();
Admin admin = util.getHBaseAdmin();
if (admin.tableExists(tableName)) {
if (admin.isTableEnabled(tableName)) {
admin.disableTable(tableName);

View File

@ -62,7 +62,7 @@ public class TestCloneSnapshotFromClient {
private int snapshot0Rows;
private int snapshot1Rows;
private TableName tableName;
private HBaseAdmin admin;
private Admin admin;
@BeforeClass
public static void setUpBeforeClass() throws Exception {
@ -148,7 +148,7 @@ public class TestCloneSnapshotFromClient {
@Test(expected=SnapshotDoesNotExistException.class)
public void testCloneNonExistentSnapshot() throws IOException, InterruptedException {
String snapshotName = "random-snapshot-" + System.currentTimeMillis();
String tableName = "random-table-" + System.currentTimeMillis();
TableName tableName = TableName.valueOf("random-table-" + System.currentTimeMillis());
admin.cloneSnapshot(snapshotName, tableName);
}

View File

@ -5450,7 +5450,7 @@ public class TestFromClientSide {
}
private void checkTableIsLegal(HTableDescriptor htd) throws IOException {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
admin.createTable(htd);
assertTrue(admin.tableExists(htd.getTableName()));
admin.disableTable(htd.getTableName());
@ -5458,7 +5458,7 @@ public class TestFromClientSide {
}
private void checkTableIsIllegal(HTableDescriptor htd) throws IOException {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
try {
admin.createTable(htd);
fail();

View File

@ -405,7 +405,7 @@ public class TestFromClientSide3 {
@Test
public void testGetEmptyRow() throws Exception {
//Create a table and put in 1 row
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(Bytes.toBytes("test")));
desc.addFamily(new HColumnDescriptor(FAMILY));
admin.createTable(desc);

View File

@ -66,7 +66,7 @@ public class TestRestoreSnapshotFromClient {
private int snapshot0Rows;
private int snapshot1Rows;
private TableName tableName;
private HBaseAdmin admin;
private Admin admin;
@BeforeClass
public static void setUpBeforeClass() throws Exception {

View File

@ -189,7 +189,7 @@ public class TestSnapshotCloneIndependence {
FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
HBaseAdmin admin = UTIL.getHBaseAdmin();
Admin admin = UTIL.getHBaseAdmin();
final long startTime = System.currentTimeMillis();
final TableName localTableName =
TableName.valueOf(STRING_TABLE_NAME + startTime);
@ -210,7 +210,7 @@ public class TestSnapshotCloneIndependence {
if (!online) {
admin.enableTable(localTableName);
}
byte[] cloneTableName = Bytes.toBytes("test-clone-" + localTableName);
TableName cloneTableName = TableName.valueOf("test-clone-" + localTableName);
admin.cloneSnapshot(snapshotName, cloneTableName);
HTable clonedTable = new HTable(UTIL.getConfiguration(), cloneTableName);
@ -267,7 +267,7 @@ public class TestSnapshotCloneIndependence {
Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
// Create a table
HBaseAdmin admin = UTIL.getHBaseAdmin();
Admin admin = UTIL.getHBaseAdmin();
final long startTime = System.currentTimeMillis();
final TableName localTableName =
TableName.valueOf(STRING_TABLE_NAME + startTime);
@ -286,7 +286,7 @@ public class TestSnapshotCloneIndependence {
admin.enableTable(localTableName);
}
byte[] cloneTableName = Bytes.toBytes("test-clone-" + localTableName);
TableName cloneTableName = TableName.valueOf("test-clone-" + localTableName);
// Clone the snapshot
byte[] snapshotName = Bytes.toBytes(snapshotNameAsString);
@ -323,7 +323,7 @@ public class TestSnapshotCloneIndependence {
Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
// Create a table
HBaseAdmin admin = UTIL.getHBaseAdmin();
Admin admin = UTIL.getHBaseAdmin();
final long startTime = System.currentTimeMillis();
final TableName localTableName =
TableName.valueOf(STRING_TABLE_NAME + startTime);
@ -339,7 +339,7 @@ public class TestSnapshotCloneIndependence {
if (!online) {
admin.enableTable(localTableName);
}
byte[] cloneTableName = Bytes.toBytes("test-clone-" + localTableName);
TableName cloneTableName = TableName.valueOf("test-clone-" + localTableName);
// Clone the snapshot
byte[] snapshotName = Bytes.toBytes(snapshotNameAsString);

View File

@ -125,7 +125,7 @@ public class TestSnapshotFromClient {
*/
@Test (timeout=300000)
public void testMetaTablesSnapshot() throws Exception {
HBaseAdmin admin = UTIL.getHBaseAdmin();
Admin admin = UTIL.getHBaseAdmin();
byte[] snapshotName = Bytes.toBytes("metaSnapshot");
try {
@ -143,7 +143,7 @@ public class TestSnapshotFromClient {
*/
@Test (timeout=300000)
public void testSnapshotDeletionWithRegex() throws Exception {
HBaseAdmin admin = UTIL.getHBaseAdmin();
Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin);
@ -179,7 +179,7 @@ public class TestSnapshotFromClient {
*/
@Test (timeout=300000)
public void testOfflineTableSnapshot() throws Exception {
HBaseAdmin admin = UTIL.getHBaseAdmin();
Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin);
@ -232,7 +232,7 @@ public class TestSnapshotFromClient {
@Test (timeout=300000)
public void testSnapshotFailsOnNonExistantTable() throws Exception {
HBaseAdmin admin = UTIL.getHBaseAdmin();
Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin);
String tableName = "_not_a_table";
@ -241,7 +241,7 @@ public class TestSnapshotFromClient {
boolean fail = false;
do {
try {
admin.getTableDescriptor(Bytes.toBytes(tableName));
admin.getTableDescriptor(TableName.valueOf(tableName));
fail = true;
LOG.error("Table:" + tableName + " already exists, checking a new name");
tableName = tableName+"!";
@ -252,7 +252,7 @@ public class TestSnapshotFromClient {
// snapshot the non-existant table
try {
admin.snapshot("fail", tableName);
admin.snapshot("fail", TableName.valueOf(tableName));
fail("Snapshot succeeded even though there is not table.");
} catch (SnapshotCreationException e) {
LOG.info("Correctly failed to snapshot a non-existant table:" + e.getMessage());
@ -263,7 +263,7 @@ public class TestSnapshotFromClient {
public void testOfflineTableSnapshotWithEmptyRegions() throws Exception {
// test with an empty table with one region
HBaseAdmin admin = UTIL.getHBaseAdmin();
Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin);

View File

@ -83,7 +83,7 @@ public class TestSnapshotMetadata {
private static final int BLOCK_SIZE = 98;
private static final int MAX_VERSIONS = 8;
private HBaseAdmin admin;
private Admin admin;
private String originalTableDescription;
private HTableDescriptor originalTableDescriptor;
TableName originalTableName;
@ -185,7 +185,7 @@ public class TestSnapshotMetadata {
public void testDescribeMatchesAfterClone() throws Exception {
// Clone the original table
final String clonedTableNameAsString = "clone" + originalTableName;
final byte[] clonedTableName = Bytes.toBytes(clonedTableNameAsString);
final TableName clonedTableName = TableName.valueOf(clonedTableNameAsString);
final String snapshotNameAsString = "snapshot" + originalTableName
+ System.currentTimeMillis();
final byte[] snapshotName = Bytes.toBytes(snapshotNameAsString);

View File

@ -88,7 +88,7 @@ public class TestTableSnapshotScanner {
} else {
util.createTable(tableName, FAMILIES);
}
HBaseAdmin admin = util.getHBaseAdmin();
Admin admin = util.getHBaseAdmin();
// put some stuff in the table
HTable table = new HTable(util.getConfiguration(), tableName);

View File

@ -51,7 +51,7 @@ public class TestConstraint {
.getLog(TestConstraint.class);
private static HBaseTestingUtility util;
private static final byte[] tableName = Bytes.toBytes("test");
private static final TableName tableName = TableName.valueOf("test");
private static final byte[] dummy = Bytes.toBytes("dummy");
private static final byte[] row1 = Bytes.toBytes("r1");
private static final byte[] test = Bytes.toBytes("test");
@ -72,7 +72,7 @@ public class TestConstraint {
public void testConstraintPasses() throws Exception {
// create the table
// it would be nice if this was also a method on the util
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
HTableDescriptor desc = new HTableDescriptor(tableName);
for (byte[] family : new byte[][] { dummy, test }) {
desc.addFamily(new HColumnDescriptor(family));
}
@ -103,7 +103,7 @@ public class TestConstraint {
// create the table
// it would be nice if this was also a method on the util
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
HTableDescriptor desc = new HTableDescriptor(tableName);
for (byte[] family : new byte[][] { dummy, test }) {
desc.addFamily(new HColumnDescriptor(family));
}
@ -140,7 +140,7 @@ public class TestConstraint {
@Test
public void testDisableConstraint() throws Throwable {
// create the table
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
HTableDescriptor desc = new HTableDescriptor(tableName);
// add a family to the table
for (byte[] family : new byte[][] { dummy, test }) {
desc.addFamily(new HColumnDescriptor(family));
@ -175,7 +175,7 @@ public class TestConstraint {
@Test
public void testDisableConstraints() throws Throwable {
// create the table
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
HTableDescriptor desc = new HTableDescriptor(tableName);
// add a family to the table
for (byte[] family : new byte[][] { dummy, test }) {
desc.addFamily(new HColumnDescriptor(family));
@ -207,7 +207,7 @@ public class TestConstraint {
@Test
public void testIsUnloaded() throws Exception {
// create the table
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
HTableDescriptor desc = new HTableDescriptor(tableName);
// add a family to the table
for (byte[] family : new byte[][] { dummy, test }) {
desc.addFamily(new HColumnDescriptor(family));

View File

@ -23,6 +23,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.TestServerCustomProtocol;
@ -55,7 +56,7 @@ public class TestClassLoading {
private static MiniDFSCluster cluster;
static final String tableName = "TestClassLoading";
static final TableName tableName = TableName.valueOf("TestClassLoading");
static final String cpName1 = "TestCP1";
static final String cpName2 = "TestCP2";
static final String cpName3 = "TestCP3";
@ -137,7 +138,7 @@ public class TestClassLoading {
LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS2);
// create a table that references the coprocessors
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
HTableDescriptor htd = new HTableDescriptor(tableName);
htd.addFamily(new HColumnDescriptor("test"));
// without configuration values
htd.setValue("COPROCESSOR$1", jarFileOnHDFS1.toString() + "|" + cpName1 +
@ -145,7 +146,7 @@ public class TestClassLoading {
// with configuration values
htd.setValue("COPROCESSOR$2", jarFileOnHDFS2.toString() + "|" + cpName2 +
"|" + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3");
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(tableName)) {
if (admin.isTableEnabled(tableName)) {
admin.disableTable(tableName);
@ -166,7 +167,7 @@ public class TestClassLoading {
MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
for (HRegion region:
hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
if (region.getRegionNameAsString().startsWith(tableName)) {
if (region.getRegionNameAsString().startsWith(tableName.getNameAsString())) {
foundTableRegion = true;
CoprocessorEnvironment env;
env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName1);
@ -226,7 +227,7 @@ public class TestClassLoading {
htd.addFamily(new HColumnDescriptor("test"));
htd.setValue("COPROCESSOR$1", getLocalPath(jarFile) + "|" + cpName3 + "|" +
Coprocessor.PRIORITY_USER);
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
admin.createTable(htd);
waitForTable(htd.getTableName());
@ -252,7 +253,7 @@ public class TestClassLoading {
htd.addFamily(new HColumnDescriptor("test"));
htd.setValue("COPROCESSOR$1", getLocalPath(jarFile) + "|" + cpName4 + "|" +
Coprocessor.PRIORITY_USER);
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
admin.createTable(htd);
waitForTable(htd.getTableName());
@ -296,7 +297,7 @@ public class TestClassLoading {
" | org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver | | k=v ";
// create a table that references the jar
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
HTableDescriptor htd = new HTableDescriptor(tableName);
htd.addFamily(new HColumnDescriptor("test"));
// add 3 coprocessors by setting htd attributes directly.
@ -314,7 +315,7 @@ public class TestClassLoading {
htd.addCoprocessor(cpName6, new Path(getLocalPath(jarFile6)),
Coprocessor.PRIORITY_USER, kvs);
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(tableName)) {
if (admin.isTableEnabled(tableName)) {
admin.disableTable(tableName);
@ -333,7 +334,7 @@ public class TestClassLoading {
MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
for (HRegion region:
hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
if (region.getRegionNameAsString().startsWith(tableName)) {
if (region.getRegionNameAsString().startsWith(tableName.getNameAsString())) {
found_1 = found_1 ||
(region.getCoprocessorHost().findCoprocessor(cpName1) != null);
found_2 = found_2 ||
@ -398,7 +399,7 @@ public class TestClassLoading {
LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS);
// create a table that references the coprocessors
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
HTableDescriptor htd = new HTableDescriptor(tableName);
htd.addFamily(new HColumnDescriptor("test"));
// without configuration values
htd.setValue("COPROCESSOR$1", jarFileOnHDFS.toString() + "|" + cpName1 +
@ -406,7 +407,7 @@ public class TestClassLoading {
// with configuration values
htd.setValue("COPROCESSOR$2", jarFileOnHDFS.toString() + "|" + cpName2 +
"|" + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3");
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(tableName)) {
if (admin.isTableEnabled(tableName)) {
admin.disableTable(tableName);
@ -422,7 +423,7 @@ public class TestClassLoading {
MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
for (HRegion region:
hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
if (region.getRegionNameAsString().startsWith(tableName)) {
if (region.getRegionNameAsString().startsWith(tableName.getNameAsString())) {
CoprocessorEnvironment env;
env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName1);
if (env != null) {

View File

@ -30,6 +30,7 @@ import java.util.Map;
import java.util.NavigableMap;
import java.util.TreeMap;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.util.ByteStringer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@ -283,7 +284,7 @@ public class TestCoprocessorEndpoint {
@Test
public void testMasterCoprocessorService() throws Throwable {
HBaseAdmin admin = util.getHBaseAdmin();
Admin admin = util.getHBaseAdmin();
final TestProtos.EchoRequestProto request =
TestProtos.EchoRequestProto.newBuilder().setMessage("hello").build();
TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface service =
@ -314,7 +315,7 @@ public class TestCoprocessorEndpoint {
@Test
public void testMasterCoprocessorError() throws Throwable {
HBaseAdmin admin = util.getHBaseAdmin();
Admin admin = util.getHBaseAdmin();
TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface service =
TestRpcServiceProtos.TestProtobufRpcProto.newBlockingStub(admin.coprocessorService());
try {

View File

@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
@ -85,7 +86,7 @@ public class TestMasterCoprocessorExceptionWithAbort {
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TEST_TABLE));
htd.addFamily(new HColumnDescriptor(TEST_FAMILY));
try {
HBaseAdmin admin = UTIL.getHBaseAdmin();
Admin admin = UTIL.getHBaseAdmin();
admin.createTable(htd);
fail("BuggyMasterObserver failed to throw an exception.");
} catch (IOException e) {

View File

@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
@ -191,7 +192,7 @@ public class TestMasterCoprocessorExceptionWithRemove {
boolean threwDNRE = false;
try {
HBaseAdmin admin = UTIL.getHBaseAdmin();
Admin admin = UTIL.getHBaseAdmin();
admin.createTable(htd1);
} catch (IOException e) {
if (e.getClass().getName().equals("org.apache.hadoop.hbase.DoNotRetryIOException")) {
@ -218,7 +219,7 @@ public class TestMasterCoprocessorExceptionWithRemove {
// by creating another table: should not have a problem this time.
HTableDescriptor htd2 = new HTableDescriptor(TableName.valueOf(TEST_TABLE2));
htd2.addFamily(new HColumnDescriptor(TEST_FAMILY2));
HBaseAdmin admin = UTIL.getHBaseAdmin();
Admin admin = UTIL.getHBaseAdmin();
try {
admin.createTable(htd2);
} catch (IOException e) {

View File

@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.master.AssignmentManager;
@ -1012,9 +1013,8 @@ public class TestMasterObserver {
private static HBaseTestingUtility UTIL = new HBaseTestingUtility();
private static byte[] TEST_SNAPSHOT = Bytes.toBytes("observed_snapshot");
private static TableName TEST_TABLE =
TableName.valueOf("observed_table");
private static byte[] TEST_CLONE = Bytes.toBytes("observed_clone");
private static TableName TEST_TABLE = TableName.valueOf("observed_table");
private static TableName TEST_CLONE = TableName.valueOf("observed_clone");
private static byte[] TEST_FAMILY = Bytes.toBytes("fam1");
private static byte[] TEST_FAMILY2 = Bytes.toBytes("fam2");
private static byte[] TEST_FAMILY3 = Bytes.toBytes("fam3");
@ -1073,7 +1073,7 @@ public class TestMasterObserver {
// create a table
HTableDescriptor htd = new HTableDescriptor(TEST_TABLE);
htd.addFamily(new HColumnDescriptor(TEST_FAMILY));
HBaseAdmin admin = UTIL.getHBaseAdmin();
Admin admin = UTIL.getHBaseAdmin();
tableCreationLatch = new CountDownLatch(1);
admin.createTable(htd);
@ -1236,7 +1236,7 @@ public class TestMasterObserver {
// create a table
HTableDescriptor htd = new HTableDescriptor(TEST_TABLE);
htd.addFamily(new HColumnDescriptor(TEST_FAMILY));
HBaseAdmin admin = UTIL.getHBaseAdmin();
Admin admin = UTIL.getHBaseAdmin();
tableCreationLatch = new CountDownLatch(1);
admin.createTable(htd);
@ -1294,7 +1294,7 @@ public class TestMasterObserver {
// create a table
HBaseAdmin admin = UTIL.getHBaseAdmin();
Admin admin = UTIL.getHBaseAdmin();
admin.createNamespace(NamespaceDescriptor.create(testNamespace).build());
assertTrue("Test namespace should be created", cp.wasCreateNamespaceCalled());
@ -1332,7 +1332,7 @@ public class TestMasterObserver {
assertTrue("Test namespace should not be created", cp.preCreateNamespaceCalledOnly());
}
private void modifyTableSync(HBaseAdmin admin, TableName tableName, HTableDescriptor htd)
private void modifyTableSync(Admin admin, TableName tableName, HTableDescriptor htd)
throws IOException {
admin.modifyTable(tableName, htd);
//wait until modify table finishes

View File

@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
@ -157,7 +158,7 @@ public class TestOpenTableInCoprocessor {
other.addFamily(new HColumnDescriptor(family));
HBaseAdmin admin = UTIL.getHBaseAdmin();
Admin admin = UTIL.getHBaseAdmin();
admin.createTable(primary);
admin.createTable(other);

View File

@ -31,6 +31,8 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
@ -52,7 +54,7 @@ import org.junit.experimental.categories.Category;
@Category(MediumTests.class)
public class TestRegionObserverBypass {
private static HBaseTestingUtility util;
private static final byte[] tableName = Bytes.toBytes("test");
private static final TableName tableName = TableName.valueOf("test");
private static final byte[] dummy = Bytes.toBytes("dummy");
private static final byte[] row1 = Bytes.toBytes("r1");
private static final byte[] row2 = Bytes.toBytes("r2");
@ -75,7 +77,7 @@ public class TestRegionObserverBypass {
@Before
public void setUp() throws Exception {
HBaseAdmin admin = util.getHBaseAdmin();
Admin admin = util.getHBaseAdmin();
if (admin.tableExists(tableName)) {
if (admin.isTableEnabled(tableName)) {
admin.disableTable(tableName);

View File

@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
@ -486,14 +487,14 @@ public class TestRegionObserverInterface {
*/
@Test
public void testCompactionOverride() throws Exception {
byte[] compactTable = Bytes.toBytes("TestCompactionOverride");
HBaseAdmin admin = util.getHBaseAdmin();
TableName compactTable = TableName.valueOf("TestCompactionOverride");
Admin admin = util.getHBaseAdmin();
if (admin.tableExists(compactTable)) {
admin.disableTable(compactTable);
admin.deleteTable(compactTable);
}
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(compactTable));
HTableDescriptor htd = new HTableDescriptor(compactTable);
htd.addFamily(new HColumnDescriptor(A));
htd.addCoprocessor(EvenOnlyCompactor.class.getName());
admin.createTable(htd);
@ -515,7 +516,7 @@ public class TestRegionObserverInterface {
// force a compaction
long ts = System.currentTimeMillis();
admin.flush(compactTable);
admin.flush(compactTable.toBytes());
// wait for flush
for (int i=0; i<10; i++) {
if (compactor.lastFlush >= ts) {
@ -527,7 +528,7 @@ public class TestRegionObserverInterface {
LOG.debug("Flush complete");
ts = compactor.lastFlush;
admin.majorCompact(compactTable);
admin.majorCompact(compactTable.toBytes());
// wait for compaction
for (int i=0; i<30; i++) {
if (compactor.lastCompaction >= ts) {

View File

@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
@ -254,7 +255,7 @@ public class TestRegionObserverScannerOpenHook {
desc.addCoprocessor(NoDataFromCompaction.class.getName(), null, Coprocessor.PRIORITY_HIGHEST,
null);
HBaseAdmin admin = UTIL.getHBaseAdmin();
Admin admin = UTIL.getHBaseAdmin();
admin.createTable(desc);
HTable table = new HTable(conf, desc.getTableName());

View File

@ -33,6 +33,7 @@ import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.util.ByteStringer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
@ -85,7 +86,7 @@ public class TestRowProcessorEndpoint {
static final Log LOG = LogFactory.getLog(TestRowProcessorEndpoint.class);
private static final byte[] TABLE = Bytes.toBytes("testtable");
private static final TableName TABLE = TableName.valueOf("testtable");
private final static byte[] ROW = Bytes.toBytes("testrow");
private final static byte[] ROW2 = Bytes.toBytes("testrow2");
private final static byte[] FAM = Bytes.toBytes("friendlist");

View File

@ -25,6 +25,7 @@ import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Result;
@ -149,7 +150,7 @@ public abstract class TableSnapshotInputFormatTestBase {
} else {
util.createTable(tableName, FAMILIES);
}
HBaseAdmin admin = util.getHBaseAdmin();
Admin admin = util.getHBaseAdmin();
// put some stuff in the table
HTable table = new HTable(util.getConfiguration(), tableName);

View File

@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
@ -183,7 +184,7 @@ public class TestAssignmentListener {
@Test(timeout=60000)
public void testAssignmentListener() throws IOException, InterruptedException {
AssignmentManager am = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager();
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
DummyAssignmentListener listener = new DummyAssignmentListener();
am.registerListener(listener);

View File

@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
@ -82,7 +83,7 @@ public class TestAssignmentManagerOnCluster {
private final static byte[] FAMILY = Bytes.toBytes("FAMILY");
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
final static Configuration conf = TEST_UTIL.getConfiguration();
private static HBaseAdmin admin;
private static Admin admin;
static void setupOnce() throws Exception {
// Using the our load balancer to control region plans
@ -302,7 +303,7 @@ public class TestAssignmentManagerOnCluster {
public void testMoveRegionOfDeletedTable() throws Exception {
TableName table =
TableName.valueOf("testMoveRegionOfDeletedTable");
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
try {
HRegionInfo hri = createTableAndGetOneRegion(table);
@ -807,11 +808,11 @@ public class TestAssignmentManagerOnCluster {
*/
@Test (timeout=60000)
public void testAssignDisabledRegion() throws Exception {
String table = "testAssignDisabledRegion";
TableName table = TableName.valueOf("testAssignDisabledRegion");
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
MyMaster master = null;
try {
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(table));
HTableDescriptor desc = new HTableDescriptor(table);
desc.addFamily(new HColumnDescriptor(FAMILY));
admin.createTable(desc);
@ -839,7 +840,7 @@ public class TestAssignmentManagerOnCluster {
am.unassign(hri, true);
assertTrue(regionStates.isRegionOffline(hri));
} finally {
TEST_UTIL.deleteTable(Bytes.toBytes(table));
TEST_UTIL.deleteTable(table);
}
}

View File

@ -781,7 +781,7 @@ public class TestDistributedLogSplitting {
makeHLog(hrs.getWAL(), regions, "table", "family", NUM_LOG_LINES, 100);
LOG.info("Disabling table\n");
TEST_UTIL.getHBaseAdmin().disableTable(Bytes.toBytes("disableTable"));
TEST_UTIL.getHBaseAdmin().disableTable(TableName.valueOf("disableTable"));
// abort RS
LOG.info("Aborting region server: " + hrs.getServerName());
@ -1361,7 +1361,7 @@ public class TestDistributedLogSplitting {
HTable installTable(ZooKeeperWatcher zkw, String tname, String fname, int nrs,
int existingRegions) throws Exception {
// Create a table with regions
byte [] table = Bytes.toBytes(tname);
TableName table = TableName.valueOf(tname);
byte [] family = Bytes.toBytes(fname);
LOG.info("Creating table with " + nrs + " regions");
HTable ht = TEST_UTIL.createTable(table, family);
@ -1617,7 +1617,7 @@ public class TestDistributedLogSplitting {
final HRegionServer destRS = hrs;
// the RS doesn't have regions of the specified table so we need move one to this RS
List<HRegionInfo> tableRegions =
TEST_UTIL.getHBaseAdmin().getTableRegions(Bytes.toBytes(tableName));
TEST_UTIL.getHBaseAdmin().getTableRegions(TableName.valueOf(tableName));
final HRegionInfo hri = tableRegions.get(0);
TEST_UTIL.getHBaseAdmin().move(hri.getEncodedNameAsBytes(),
Bytes.toBytes(destRS.getServerName().getServerName()));

View File

@ -22,6 +22,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
@ -47,7 +48,7 @@ public class TestMaster {
private static final TableName TABLENAME =
TableName.valueOf("TestMaster");
private static final byte[] FAMILYNAME = Bytes.toBytes("fam");
private static HBaseAdmin admin;
private static Admin admin;
@BeforeClass
public static void beforeAllTests() throws Exception {
@ -147,9 +148,9 @@ public class TestMaster {
@Test
public void testMoveThrowsPleaseHoldException() throws IOException {
byte[] tableName = Bytes.toBytes("testMoveThrowsPleaseHoldException");
TableName tableName = TableName.valueOf("testMoveThrowsPleaseHoldException");
HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster();
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
HTableDescriptor htd = new HTableDescriptor(tableName);
HColumnDescriptor hcd = new HColumnDescriptor("value");
htd.addFamily(hcd);

View File

@ -67,7 +67,7 @@ public class TestMasterRestartAfterDisablingTable {
HMaster master = cluster.getMaster();
// Create a table with regions
byte[] table = Bytes.toBytes("tableRestart");
TableName table = TableName.valueOf("tableRestart");
byte[] family = Bytes.toBytes("family");
log("Creating table with " + NUM_REGIONS_TO_CREATE + " regions");
HTable ht = TEST_UTIL.createTable(table, family);

View File

@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.LargeTests;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.util.Bytes;
@ -76,7 +77,7 @@ public class TestRollingRestart {
HMaster master = cluster.getMaster();
// Create a table with regions
byte [] table = Bytes.toBytes("tableRestart");
TableName table = TableName.valueOf("tableRestart");
byte [] family = Bytes.toBytes("family");
log("Creating table with " + NUM_REGIONS_TO_CREATE + " regions");
HTable ht = TEST_UTIL.createTable(table, family);

View File

@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver;
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
@ -114,7 +115,7 @@ public class TestTableLockManager {
Future<Object> shouldFinish = executor.submit(new Callable<Object>() {
@Override
public Object call() throws Exception {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
admin.deleteColumn(TABLE_NAME, FAMILY);
return null;
}
@ -123,7 +124,7 @@ public class TestTableLockManager {
deleteColumn.await();
try {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
admin.addColumn(TABLE_NAME, new HColumnDescriptor(NEW_FAMILY));
fail("Was expecting TableLockTimeoutException");
} catch (LockTimeoutException ex) {
@ -166,7 +167,7 @@ public class TestTableLockManager {
Future<Object> alterTableFuture = executor.submit(new Callable<Object>() {
@Override
public Object call() throws Exception {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
admin.addColumn(TABLE_NAME, new HColumnDescriptor(NEW_FAMILY));
LOG.info("Added new column family");
HTableDescriptor tableDesc = admin.getTableDescriptor(TABLE_NAME);
@ -177,7 +178,7 @@ public class TestTableLockManager {
Future<Object> disableTableFuture = executor.submit(new Callable<Object>() {
@Override
public Object call() throws Exception {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
admin.disableTable(TABLE_NAME);
assertTrue(admin.isTableDisabled(TABLE_NAME));
admin.deleteTable(TABLE_NAME);
@ -243,7 +244,7 @@ public class TestTableLockManager {
public void testDelete() throws Exception {
prepareMiniCluster();
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
admin.disableTable(TABLE_NAME);
admin.deleteTable(TABLE_NAME);
@ -327,7 +328,7 @@ public class TestTableLockManager {
loadTool.setConf(TEST_UTIL.getConfiguration());
int numKeys = 10000;
final TableName tableName = TableName.valueOf("testTableReadLock");
final HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
final Admin admin = TEST_UTIL.getHBaseAdmin();
final HTableDescriptor desc = new HTableDescriptor(tableName);
final byte[] family = Bytes.toBytes("test_cf");
desc.addFamily(new HColumnDescriptor(family));

View File

@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.master.HMaster;
@ -278,7 +279,7 @@ public class TestSnapshotFromMaster {
*/
@Test(timeout = 300000)
public void testSnapshotHFileArchiving() throws Exception {
HBaseAdmin admin = UTIL.getHBaseAdmin();
Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin);

View File

@ -69,10 +69,10 @@ public class TestCreateTableHandler {
@Test (timeout=300000)
public void testCreateTableCalledTwiceAndFirstOneInProgress() throws Exception {
final byte[] tableName = Bytes.toBytes("testCreateTableCalledTwiceAndFirstOneInProgress");
final TableName tableName = TableName.valueOf("testCreateTableCalledTwiceAndFirstOneInProgress");
final MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
final HMaster m = cluster.getMaster();
final HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
final HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILYNAME));
final HRegionInfo[] hRegionInfos = new HRegionInfo[] { new HRegionInfo(desc.getTableName(), null,
null) };
@ -96,10 +96,10 @@ public class TestCreateTableHandler {
@Test (timeout=300000)
public void testCreateTableWithSplitRegion() throws Exception {
final byte[] tableName = Bytes.toBytes("testCreateTableWithSplitRegion");
final TableName tableName = TableName.valueOf("testCreateTableWithSplitRegion");
final MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
final HMaster m = cluster.getMaster();
final HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
final HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILYNAME));
byte[] splitPoint = Bytes.toBytes("split-point");
long ts = System.currentTimeMillis();

View File

@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.LargeTests;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.util.Bytes;
@ -97,7 +98,7 @@ public class TestTableDeleteFamilyHandler {
@Test
public void deleteColumnFamilyWithMultipleRegions() throws Exception {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
HTableDescriptor beforehtd = admin.getTableDescriptor(TABLENAME);
FileSystem fs = TEST_UTIL.getDFSCluster().getFileSystem();
@ -135,7 +136,7 @@ public class TestTableDeleteFamilyHandler {
// TEST - Disable and delete the column family
admin.disableTable(TABLENAME);
admin.deleteColumn(TABLENAME.getName(), "cf2");
admin.deleteColumn(TABLENAME, Bytes.toBytes("cf2"));
// 5 - Check if only 2 column families exist in the descriptor
HTableDescriptor afterhtd = admin.getTableDescriptor(TABLENAME);

View File

@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.LargeTests;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.util.Bytes;
@ -80,7 +81,7 @@ public class TestTableDescriptorModification {
@Test
public void testModifyTable() throws IOException {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
// Create a table with one family
HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME);
baseHtd.addFamily(new HColumnDescriptor(FAMILY_0));
@ -103,7 +104,7 @@ public class TestTableDescriptorModification {
@Test
public void testAddColumn() throws IOException {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
// Create a table with two families
HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME);
baseHtd.addFamily(new HColumnDescriptor(FAMILY_0));
@ -123,7 +124,7 @@ public class TestTableDescriptorModification {
@Test
public void testDeleteColumn() throws IOException {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
// Create a table with two families
HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME);
baseHtd.addFamily(new HColumnDescriptor(FAMILY_0));
@ -144,7 +145,7 @@ public class TestTableDescriptorModification {
private void verifyTableDescriptor(final TableName tableName,
final byte[]... families) throws IOException {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
// Verify descriptor from master
HTableDescriptor htd = admin.getTableDescriptor(tableName);

View File

@ -93,7 +93,8 @@ public class TestNamespaceUpgrade {
{"1","2","3","4","5","6","7","8","9"};
private final static String currentKeys[] =
{"1","2","3","4","5","6","7","8","9","A"};
private final static String tables[] = {"foo", "ns1.foo","ns.two.foo"};
private final static TableName tables[] =
{TableName.valueOf("foo"), TableName.valueOf("ns1.foo"), TableName.valueOf("ns.two.foo")};
@BeforeClass
public static void setUpBeforeClass() throws Exception {
@ -134,7 +135,7 @@ public class TestNamespaceUpgrade {
doFsCommand(shell, new String [] {"-lsr", "/"});
TEST_UTIL.startMiniHBaseCluster(1, 1);
for(String table: tables) {
for(TableName table: tables) {
int count = 0;
for(Result res: new HTable(TEST_UTIL.getConfiguration(), table).getScanner(new Scan())) {
assertEquals(currentKeys[count++], Bytes.toString(res.getRow()));
@ -151,7 +152,7 @@ public class TestNamespaceUpgrade {
count++;
}
assertEquals(3, count);
assertFalse(TEST_UTIL.getHBaseAdmin().tableExists("_acl_"));
assertFalse(TEST_UTIL.getHBaseAdmin().tableExists(TableName.valueOf("_acl_")));
//verify ACL table was compacted
List<HRegion> regions = TEST_UTIL.getMiniHBaseCluster().getRegions(secureTable.getName());
@ -198,8 +199,8 @@ public class TestNamespaceUpgrade {
public void testSnapshots() throws IOException, InterruptedException {
String snapshots[][] = {snapshot1Keys, snapshot2Keys};
for(int i = 1; i <= snapshots.length; i++) {
for(String table: tables) {
TEST_UTIL.getHBaseAdmin().cloneSnapshot(table+"_snapshot"+i, table+"_clone"+i);
for(TableName table: tables) {
TEST_UTIL.getHBaseAdmin().cloneSnapshot(table+"_snapshot"+i, TableName.valueOf(table+"_clone"+i));
FSUtils.logFileSystemState(FileSystem.get(TEST_UTIL.getConfiguration()),
FSUtils.getRootDir(TEST_UTIL.getConfiguration()),
LOG);
@ -217,14 +218,15 @@ public class TestNamespaceUpgrade {
public void testRenameUsingSnapshots() throws Exception {
String newNS = "newNS";
TEST_UTIL.getHBaseAdmin().createNamespace(NamespaceDescriptor.create(newNS).build());
for(String table: tables) {
for(TableName table: tables) {
int count = 0;
for(Result res: new HTable(TEST_UTIL.getConfiguration(), table).getScanner(new
Scan())) {
assertEquals(currentKeys[count++], Bytes.toString(res.getRow()));
}
TEST_UTIL.getHBaseAdmin().snapshot(table + "_snapshot3", table);
final String newTableName = newNS + TableName.NAMESPACE_DELIM + table + "_clone3";
final TableName newTableName =
TableName.valueOf(newNS + TableName.NAMESPACE_DELIM + table + "_clone3");
TEST_UTIL.getHBaseAdmin().cloneSnapshot(table + "_snapshot3", newTableName);
Thread.sleep(1000);
count = 0;
@ -234,14 +236,14 @@ public class TestNamespaceUpgrade {
}
FSUtils.logFileSystemState(TEST_UTIL.getTestFileSystem(), TEST_UTIL.getDefaultRootDirPath()
, LOG);
Assert.assertEquals(newTableName, currentKeys.length, count);
TEST_UTIL.getHBaseAdmin().flush(newTableName);
TEST_UTIL.getHBaseAdmin().majorCompact(newTableName);
Assert.assertEquals(newTableName + "", currentKeys.length, count);
TEST_UTIL.getHBaseAdmin().flush(newTableName.toBytes());
TEST_UTIL.getHBaseAdmin().majorCompact(newTableName.toBytes());
TEST_UTIL.waitFor(30000, new Waiter.Predicate<IOException>() {
@Override
public boolean evaluate() throws IOException {
try {
return TEST_UTIL.getHBaseAdmin().getCompactionState(newTableName) ==
return TEST_UTIL.getHBaseAdmin().getCompactionState(newTableName.toBytes()) ==
AdminProtos.GetRegionInfoResponse.CompactionState.NONE;
} catch (InterruptedException e) {
throw new IOException(e);
@ -252,10 +254,11 @@ public class TestNamespaceUpgrade {
String nextNS = "nextNS";
TEST_UTIL.getHBaseAdmin().createNamespace(NamespaceDescriptor.create(nextNS).build());
for(String table: tables) {
String srcTable = newNS + TableName.NAMESPACE_DELIM + table + "_clone3";
for(TableName table: tables) {
TableName srcTable = TableName.valueOf(newNS + TableName.NAMESPACE_DELIM + table + "_clone3");
TEST_UTIL.getHBaseAdmin().snapshot(table + "_snapshot4", srcTable);
String newTableName = nextNS + TableName.NAMESPACE_DELIM + table + "_clone4";
TableName newTableName =
TableName.valueOf(nextNS + TableName.NAMESPACE_DELIM + table + "_clone4");
TEST_UTIL.getHBaseAdmin().cloneSnapshot(table+"_snapshot4", newTableName);
FSUtils.logFileSystemState(TEST_UTIL.getTestFileSystem(), TEST_UTIL.getDefaultRootDirPath(),
LOG);
@ -264,7 +267,7 @@ public class TestNamespaceUpgrade {
Scan())) {
assertEquals(currentKeys[count++], Bytes.toString(res.getRow()));
}
Assert.assertEquals(newTableName, currentKeys.length, count);
Assert.assertEquals(newTableName + "", currentKeys.length, count);
}
}

View File

@ -25,6 +25,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.SmallTests;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.junit.AfterClass;
import static org.junit.Assert.assertArrayEquals;
@ -59,7 +60,7 @@ public class TestProcedureManager {
@Test
public void testSimpleProcedureManager() throws IOException {
HBaseAdmin admin = util.getHBaseAdmin();
Admin admin = util.getHBaseAdmin();
byte[] result = admin.execProcedureWithRet(SimpleMasterProcedureManager.SIMPLE_SIGNATURE,
"mytest", new HashMap<String, String>());

View File

@ -113,7 +113,7 @@ public class TestEncryptionKeyRotation {
hcd.setEncryptionKey(EncryptionUtil.wrapKey(conf,
conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, User.getCurrent().getShortName()),
secondCFKey));
TEST_UTIL.getHBaseAdmin().modifyColumn(htd.getName(), hcd);
TEST_UTIL.getHBaseAdmin().modifyColumn(htd.getTableName(), hcd);
Thread.sleep(5000); // Need a predicate for online schema change
// And major compact

View File

@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.LargeTests;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.Stoppable;
@ -223,7 +224,7 @@ public class TestEndToEndSplitTransaction {
HTable table;
TableName tableName;
byte[] family;
HBaseAdmin admin;
Admin admin;
HRegionServer rs;
RegionSplitter(HTable table) throws IOException {
@ -398,7 +399,7 @@ public class TestEndToEndSplitTransaction {
/* some utility methods for split tests */
public static void flushAndBlockUntilDone(HBaseAdmin admin, HRegionServer rs, byte[] regionName)
public static void flushAndBlockUntilDone(Admin admin, HRegionServer rs, byte[] regionName)
throws IOException, InterruptedException {
log("flushing region: " + Bytes.toStringBinary(regionName));
admin.flush(regionName);
@ -409,7 +410,7 @@ public class TestEndToEndSplitTransaction {
}
}
public static void compactAndBlockUntilDone(HBaseAdmin admin, HRegionServer rs, byte[] regionName)
public static void compactAndBlockUntilDone(Admin admin, HRegionServer rs, byte[] regionName)
throws IOException, InterruptedException {
log("Compacting region: " + Bytes.toStringBinary(regionName));
admin.majorCompact(regionName);

View File

@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
@ -58,22 +59,21 @@ public class TestHRegionOnCluster {
TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
try {
final byte[] TABLENAME = Bytes
.toBytes("testDataCorrectnessReplayingRecoveredEdits");
final TableName TABLENAME = TableName.valueOf("testDataCorrectnessReplayingRecoveredEdits");
final byte[] FAMILY = Bytes.toBytes("family");
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
HMaster master = cluster.getMaster();
// Create table
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(TABLENAME));
HTableDescriptor desc = new HTableDescriptor(TABLENAME);
desc.addFamily(new HColumnDescriptor(FAMILY));
HBaseAdmin hbaseAdmin = TEST_UTIL.getHBaseAdmin();
Admin hbaseAdmin = TEST_UTIL.getHBaseAdmin();
hbaseAdmin.createTable(desc);
assertTrue(hbaseAdmin.isTableAvailable(TABLENAME));
// Put data: r1->v1
Log.info("Loading r1 to v1 into " + Bytes.toString(TABLENAME));
Log.info("Loading r1 to v1 into " + TABLENAME);
HTable table = new HTable(TEST_UTIL.getConfiguration(), TABLENAME);
putDataAndVerify(table, "r1", FAMILY, "v1", 1);
@ -95,7 +95,7 @@ public class TestHRegionOnCluster {
} while (cluster.getServerWith(regionInfo.getRegionName()) == originServerNum);
// Put data: r2->v2
Log.info("Loading r2 to v2 into " + Bytes.toString(TABLENAME));
Log.info("Loading r2 to v2 into " + TABLENAME);
putDataAndVerify(table, "r2", FAMILY, "v2", 2);
TEST_UTIL.waitUntilAllRegionsAssigned(table.getName());
@ -108,7 +108,7 @@ public class TestHRegionOnCluster {
} while (cluster.getServerWith(regionInfo.getRegionName()) == targetServerNum);
// Put data: r3->v3
Log.info("Loading r3 to v3 into " + Bytes.toString(TABLENAME));
Log.info("Loading r3 to v3 into " + TABLENAME);
putDataAndVerify(table, "r3", FAMILY, "v3", 3);
// Kill target server
@ -125,7 +125,7 @@ public class TestHRegionOnCluster {
cluster.getRegionServerThreads().get(originServerNum).join();
// Put data: r4->v4
Log.info("Loading r4 to v4 into " + Bytes.toString(TABLENAME));
Log.info("Loading r4 to v4 into " + TABLENAME);
putDataAndVerify(table, "r4", FAMILY, "v4", 4);
} finally {

View File

@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
@ -91,7 +92,7 @@ public class TestRegionMergeTransactionOnCluster {
static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private static HMaster master;
private static HBaseAdmin admin;
private static Admin admin;
static void setupOnce() throws Exception {
// Start a cluster

View File

@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
@ -57,7 +58,7 @@ import org.junit.experimental.categories.Category;
* w.r.t. essential column family optimization
*/
public class TestSCVFWithMiniCluster {
private static final String HBASE_TABLE_NAME = "TestSCVFWithMiniCluster";
private static final TableName HBASE_TABLE_NAME = TableName.valueOf("TestSCVFWithMiniCluster");
private static final byte[] FAMILY_A = Bytes.toBytes("a");
private static final byte[] FAMILY_B = Bytes.toBytes("b");
@ -77,7 +78,7 @@ public class TestSCVFWithMiniCluster {
util.startMiniCluster(1);
HBaseAdmin admin = util.getHBaseAdmin();
Admin admin = util.getHBaseAdmin();
destroy(admin, HBASE_TABLE_NAME);
create(admin, HBASE_TABLE_NAME, FAMILY_A, FAMILY_B);
admin.close();
@ -215,9 +216,9 @@ public class TestSCVFWithMiniCluster {
verify(scan);
}
private static void create(HBaseAdmin admin, String tableName, byte[]... families)
private static void create(Admin admin, TableName tableName, byte[]... families)
throws IOException {
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
HTableDescriptor desc = new HTableDescriptor(tableName);
for (byte[] family : families) {
HColumnDescriptor colDesc = new HColumnDescriptor(family);
colDesc.setMaxVersions(1);
@ -231,7 +232,7 @@ public class TestSCVFWithMiniCluster {
}
}
private static void destroy(HBaseAdmin admin, String tableName) throws IOException {
private static void destroy(Admin admin, TableName tableName) throws IOException {
try {
admin.disableTable(tableName);
admin.deleteTable(tableName);

View File

@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Tag;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.HBaseAdmin;
@ -114,7 +115,7 @@ public class TestTags {
// colDesc.setDataBlockEncoding(DataBlockEncoding.NONE);
colDesc.setDataBlockEncoding(DataBlockEncoding.PREFIX_TREE);
desc.addFamily(colDesc);
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
admin.createTable(desc);
byte[] value = Bytes.toBytes("value");
table = new HTable(TEST_UTIL.getConfiguration(), tableName);
@ -192,7 +193,7 @@ public class TestTags {
// colDesc.setDataBlockEncoding(DataBlockEncoding.NONE);
colDesc.setDataBlockEncoding(DataBlockEncoding.PREFIX_TREE);
desc.addFamily(colDesc);
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
admin.createTable(desc);
table = new HTable(TEST_UTIL.getConfiguration(), tableName);
@ -295,7 +296,7 @@ public class TestTags {
colDesc.setBlockCacheEnabled(true);
colDesc.setDataBlockEncoding(encoding);
desc.addFamily(colDesc);
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
admin.createTable(desc);
try {
table = new HTable(TEST_UTIL.getConfiguration(), tableName);

View File

@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
@ -59,7 +60,7 @@ import org.junit.experimental.categories.Category;
public class TestLogRollAbort {
private static final Log LOG = LogFactory.getLog(TestLogRolling.class);
private static MiniDFSCluster dfsCluster;
private static HBaseAdmin admin;
private static Admin admin;
private static MiniHBaseCluster cluster;
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();

View File

@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.LargeTests;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
@ -84,7 +85,7 @@ public class TestLogRolling {
private byte[] value;
private FileSystem fs;
private MiniDFSCluster dfsCluster;
private HBaseAdmin admin;
private Admin admin;
private MiniHBaseCluster cluster;
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();

View File

@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.rest.client.Client;
import org.apache.hadoop.hbase.rest.client.Cluster;
@ -101,8 +102,8 @@ public class RowResourceBase {
@Before
public void beforeMethod() throws Exception {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(TABLE)) {
Admin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(TableName.valueOf(TABLE))) {
TEST_UTIL.deleteTable(Bytes.toBytes(TABLE));
}
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
@ -113,8 +114,8 @@ public class RowResourceBase {
@After
public void afterMethod() throws Exception {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(TABLE)) {
Admin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(TableName.valueOf(TABLE))) {
TEST_UTIL.deleteTable(Bytes.toBytes(TABLE));
}
}

View File

@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
@ -50,7 +51,7 @@ import org.junit.experimental.categories.Category;
@Category(MediumTests.class)
public class TestGzipFilter {
private static final String TABLE = "TestGzipFilter";
private static final TableName TABLE = TableName.valueOf("TestGzipFilter");
private static final String CFA = "a";
private static final String COLUMN_1 = CFA + ":1";
private static final String COLUMN_2 = CFA + ":2";
@ -68,11 +69,11 @@ public class TestGzipFilter {
REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration());
client = new Client(new Cluster().add("localhost",
REST_TEST_UTIL.getServletPort()));
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(TABLE)) {
return;
}
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
HTableDescriptor htd = new HTableDescriptor(TABLE);
htd.addFamily(new HColumnDescriptor(CFA));
admin.createTable(htd);
}

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.rest;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.rest.client.Client;
import org.apache.hadoop.hbase.rest.client.Cluster;
@ -49,7 +50,7 @@ import static org.junit.Assert.assertEquals;
@Category(MediumTests.class)
public class TestMultiRowResource {
private static final String TABLE = "TestRowResource";
private static final TableName TABLE = TableName.valueOf("TestRowResource");
private static final String CFA = "a";
private static final String CFB = "b";
private static final String COLUMN_1 = CFA + ":1";
@ -82,11 +83,11 @@ public class TestMultiRowResource {
marshaller = context.createMarshaller();
unmarshaller = context.createUnmarshaller();
client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort()));
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(TABLE)) {
return;
}
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
HTableDescriptor htd = new HTableDescriptor(TABLE);
htd.addFamily(new HColumnDescriptor(CFA));
htd.addFamily(new HColumnDescriptor(CFB));
admin.createTable(htd);

View File

@ -33,6 +33,7 @@ import javax.xml.bind.Unmarshaller;
import org.apache.commons.httpclient.Header;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
@ -55,7 +56,7 @@ import org.junit.experimental.categories.Category;
@Category(MediumTests.class)
public class TestScannerResource {
private static final String TABLE = "TestScannerResource";
private static final TableName TABLE = TableName.valueOf("TestScannerResource");
private static final String NONEXISTENT_TABLE = "ThisTableDoesNotExist";
private static final String CFA = "a";
private static final String CFB = "b";
@ -73,7 +74,7 @@ public class TestScannerResource {
private static int expectedRows2;
private static Configuration conf;
static int insertData(Configuration conf, String tableName, String column, double prob)
static int insertData(Configuration conf, TableName tableName, String column, double prob)
throws IOException {
Random rng = new Random();
int count = 0;
@ -163,11 +164,11 @@ public class TestScannerResource {
ScannerModel.class);
marshaller = context.createMarshaller();
unmarshaller = context.createUnmarshaller();
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(TABLE)) {
return;
}
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
HTableDescriptor htd = new HTableDescriptor(TABLE);
htd.addFamily(new HColumnDescriptor(CFA));
htd.addFamily(new HColumnDescriptor(CFB));
admin.createTable(htd);

View File

@ -33,6 +33,7 @@ import javax.xml.bind.Unmarshaller;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
@ -75,7 +76,7 @@ public class TestScannersWithFilters {
private static final Log LOG = LogFactory.getLog(TestScannersWithFilters.class);
private static final String TABLE = "TestScannersWithFilters";
private static final TableName TABLE = TableName.valueOf("TestScannersWithFilters");
private static final byte [][] ROWS_ONE = {
Bytes.toBytes("testRowOne-0"), Bytes.toBytes("testRowOne-1"),
@ -128,9 +129,9 @@ public class TestScannersWithFilters {
unmarshaller = context.createUnmarshaller();
client = new Client(new Cluster().add("localhost",
REST_TEST_UTIL.getServletPort()));
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
if (!admin.tableExists(TABLE)) {
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
HTableDescriptor htd = new HTableDescriptor(TABLE);
htd.addFamily(new HColumnDescriptor(FAMILIES[0]));
htd.addFamily(new HColumnDescriptor(FAMILIES[1]));
admin.createTable(htd);

View File

@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
@ -66,7 +67,7 @@ import org.junit.experimental.categories.Category;
@Category(MediumTests.class)
public class TestScannersWithLabels {
private static final String TABLE = "TestScannersWithLabels";
private static final TableName TABLE = TableName.valueOf("TestScannersWithLabels");
private static final String CFA = "a";
private static final String CFB = "b";
private static final String COLUMN_1 = CFA + ":1";
@ -86,7 +87,7 @@ public class TestScannersWithLabels {
private static Unmarshaller unmarshaller;
private static Configuration conf;
private static int insertData(String tableName, String column, double prob) throws IOException {
private static int insertData(TableName tableName, String column, double prob) throws IOException {
Random rng = new Random();
int count = 0;
HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName);
@ -142,11 +143,11 @@ public class TestScannersWithLabels {
ScannerModel.class);
marshaller = context.createMarshaller();
unmarshaller = context.createUnmarshaller();
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(TABLE)) {
return;
}
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
HTableDescriptor htd = new HTableDescriptor(TABLE);
htd.addFamily(new HColumnDescriptor(CFA));
htd.addFamily(new HColumnDescriptor(CFB));
admin.createTable(htd);

View File

@ -29,6 +29,8 @@ import javax.xml.bind.JAXBException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.rest.client.Client;
import org.apache.hadoop.hbase.rest.client.Cluster;
@ -95,8 +97,8 @@ public class TestSchemaResource {
TableSchemaModel model;
Response response;
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
assertFalse(admin.tableExists(TABLE1));
Admin admin = TEST_UTIL.getHBaseAdmin();
assertFalse(admin.tableExists(TableName.valueOf(TABLE1)));
// create the table
model = testTableSchemaModel.buildTestModel(TABLE1);
@ -133,7 +135,7 @@ public class TestSchemaResource {
// delete the table and make sure HBase concurs
response = client.delete(schemaPath);
assertEquals(response.getCode(), 200);
assertFalse(admin.tableExists(TABLE1));
assertFalse(admin.tableExists(TableName.valueOf(TABLE1)));
}
@Test
@ -142,8 +144,8 @@ public class TestSchemaResource {
TableSchemaModel model;
Response response;
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
assertFalse(admin.tableExists(TABLE2));
Admin admin = TEST_UTIL.getHBaseAdmin();
assertFalse(admin.tableExists(TableName.valueOf(TABLE2)));
// create the table
model = testTableSchemaModel.buildTestModel(TABLE2);
@ -184,7 +186,7 @@ public class TestSchemaResource {
// delete the table and make sure HBase concurs
response = client.delete(schemaPath);
assertEquals(response.getCode(), 200);
assertFalse(admin.tableExists(TABLE2));
assertFalse(admin.tableExists(TableName.valueOf(TABLE2)));
}
}

View File

@ -31,6 +31,7 @@ import javax.xml.bind.JAXBException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
@ -56,7 +57,7 @@ import org.junit.experimental.categories.Category;
public class TestTableResource {
private static final Log LOG = LogFactory.getLog(TestTableResource.class);
private static String TABLE = "TestTableResource";
private static TableName TABLE = TableName.valueOf("TestTableResource");
private static String COLUMN_FAMILY = "test";
private static String COLUMN = COLUMN_FAMILY + ":qualifier";
private static Map<HRegionInfo, ServerName> regionMap;
@ -78,11 +79,11 @@ public class TestTableResource {
TableInfoModel.class,
TableListModel.class,
TableRegionModel.class);
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(TABLE)) {
return;
}
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
HTableDescriptor htd = new HTableDescriptor(TABLE);
htd.addFamily(new HColumnDescriptor(COLUMN_FAMILY));
admin.createTable(htd);
HTable table = new HTable(TEST_UTIL.getConfiguration(), TABLE);
@ -106,7 +107,7 @@ public class TestTableResource {
Map<HRegionInfo, ServerName> m = table.getRegionLocations();
assertEquals(m.size(), 1);
// tell the master to split the table
admin.split(TABLE);
admin.split(TABLE.toBytes());
// give some time for the split to happen
long timeout = System.currentTimeMillis() + (15 * 1000);
@ -139,7 +140,7 @@ public class TestTableResource {
assertTrue(tables.hasNext());
while (tables.hasNext()) {
TableModel table = tables.next();
if (table.getName().equals(TABLE)) {
if (table.getName().equals(TABLE.getNameAsString())) {
found = true;
break;
}
@ -148,7 +149,7 @@ public class TestTableResource {
}
void checkTableInfo(TableInfoModel model) {
assertEquals(model.getName(), TABLE);
assertEquals(model.getName(), TABLE.getNameAsString());
Iterator<TableRegionModel> regions = model.getRegions().iterator();
assertTrue(regions.hasNext());
while (regions.hasNext()) {

View File

@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.ParseFilter;
@ -74,7 +75,7 @@ import org.xml.sax.XMLReader;
@Category(MediumTests.class)
public class TestTableScan {
private static final String TABLE = "TestScanResource";
private static final TableName TABLE = TableName.valueOf("TestScanResource");
private static final String CFA = "a";
private static final String CFB = "b";
private static final String COLUMN_1 = CFA + ":1";
@ -96,9 +97,9 @@ public class TestTableScan {
REST_TEST_UTIL.startServletContainer(conf);
client = new Client(new Cluster().add("localhost",
REST_TEST_UTIL.getServletPort()));
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
if (!admin.tableExists(TABLE)) {
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
HTableDescriptor htd = new HTableDescriptor(TABLE);
htd.addFamily(new HColumnDescriptor(CFA));
htd.addFamily(new HColumnDescriptor(CFB));
admin.createTable(htd);

View File

@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
@ -58,7 +59,7 @@ import org.junit.experimental.categories.Category;
@Category(MediumTests.class)
public class TestRemoteTable {
private static final String TABLE = "TestRemoteTable";
private static final TableName TABLE = TableName.valueOf("TestRemoteTable");
private static final byte[] ROW_1 = Bytes.toBytes("testrow1");
private static final byte[] ROW_2 = Bytes.toBytes("testrow2");
private static final byte[] ROW_3 = Bytes.toBytes("testrow3");
@ -88,12 +89,12 @@ public class TestRemoteTable {
@Before
public void before() throws Exception {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(TABLE)) {
if (admin.isTableEnabled(TABLE)) admin.disableTable(TABLE);
admin.deleteTable(TABLE);
}
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
HTableDescriptor htd = new HTableDescriptor(TABLE);
htd.addFamily(new HColumnDescriptor(COLUMN_1).setMaxVersions(3));
htd.addFamily(new HColumnDescriptor(COLUMN_2).setMaxVersions(3));
htd.addFamily(new HColumnDescriptor(COLUMN_3).setMaxVersions(3));
@ -116,7 +117,7 @@ public class TestRemoteTable {
remoteTable = new RemoteHTable(
new Client(new Cluster().add("localhost",
REST_TEST_UTIL.getServletPort())),
TEST_UTIL.getConfiguration(), TABLE);
TEST_UTIL.getConfiguration(), TABLE.toBytes());
}
@After

View File

@ -49,10 +49,13 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.Tag;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.Put;
@ -205,7 +208,7 @@ public class TestAccessController extends SecureTestUtil {
@Before
public void setUp() throws Exception {
// Create the test table (owner added to the _acl_ table)
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
HTableDescriptor htd = new HTableDescriptor(TEST_TABLE.getTableName());
HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY);
hcd.setMaxVersions(100);
@ -911,7 +914,7 @@ public class TestAccessController extends SecureTestUtil {
HTable table = new HTable(conf, tableName);
try {
HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
Admin admin = TEST_UTIL.getHBaseAdmin();
TEST_UTIL.waitTableEnabled(admin, tableName.getName());
LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
loader.doBulkLoad(loadPath, table);
@ -1031,7 +1034,7 @@ public class TestAccessController extends SecureTestUtil {
final byte[] qualifier = Bytes.toBytes("q");
// create table
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(tableName)) {
admin.disableTable(tableName);
admin.deleteTable(tableName);
@ -1305,7 +1308,7 @@ public class TestAccessController extends SecureTestUtil {
final byte[] qualifier = Bytes.toBytes("q");
// create table
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(tableName)) {
admin.disableTable(tableName);
admin.deleteTable(tableName);
@ -1419,7 +1422,7 @@ public class TestAccessController extends SecureTestUtil {
final byte[] qualifier = Bytes.toBytes("q");
// create table
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(tableName)) {
admin.disableTable(tableName);
admin.deleteTable(tableName);
@ -1884,7 +1887,7 @@ public class TestAccessController extends SecureTestUtil {
Permission.Action.ADMIN, Permission.Action.CREATE, Permission.Action.READ,
Permission.Action.WRITE);
final HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
final Admin admin = TEST_UTIL.getHBaseAdmin();
HTableDescriptor htd = new HTableDescriptor(TEST_TABLE2);
htd.addFamily(new HColumnDescriptor(TEST_FAMILY));
admin.createTable(htd);
@ -1955,7 +1958,7 @@ public class TestAccessController extends SecureTestUtil {
AccessTestAction listTablesAction = new AccessTestAction() {
@Override
public Object run() throws Exception {
HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
Admin admin = TEST_UTIL.getHBaseAdmin();
try {
admin.listTables();
} finally {
@ -1968,7 +1971,7 @@ public class TestAccessController extends SecureTestUtil {
AccessTestAction getTableDescAction = new AccessTestAction() {
@Override
public Object run() throws Exception {
HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
Admin admin = TEST_UTIL.getHBaseAdmin();
try {
admin.getTableDescriptor(TEST_TABLE.getTableName());
} finally {
@ -1997,12 +2000,14 @@ public class TestAccessController extends SecureTestUtil {
AccessTestAction deleteTableAction = new AccessTestAction() {
@Override
public Object run() throws Exception {
HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
HConnection unmanagedConnection = HConnectionManager.createConnection(TEST_UTIL.getConfiguration());
Admin admin = unmanagedConnection.getAdmin();
try {
admin.disableTable(TEST_TABLE.getTableName());
admin.deleteTable(TEST_TABLE.getTableName());
} finally {
admin.close();
unmanagedConnection.close();
}
return null;
}

View File

@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
@ -123,7 +124,7 @@ public class TestCellACLWithMultipleVersions extends SecureTestUtil {
@Before
public void setUp() throws Exception {
// Create the test table (owner added to the _acl_ table)
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
HTableDescriptor htd = new HTableDescriptor(TEST_TABLE.getTableName());
HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY1);
hcd.setMaxVersions(4);

View File

@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
@ -124,7 +125,7 @@ public class TestCellACLs extends SecureTestUtil {
@Before
public void setUp() throws Exception {
// Create the test table (owner added to the _acl_ table)
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
HTableDescriptor htd = new HTableDescriptor(TEST_TABLE.getTableName());
HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY);
hcd.setMaxVersions(4);

View File

@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
@ -111,7 +112,7 @@ public class TestScanEarlyTermination extends SecureTestUtil {
@Before
public void setUp() throws Exception {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
HTableDescriptor htd = new HTableDescriptor(TEST_TABLE.getTableName());
htd.setOwner(USER_OWNER);
HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY1);

View File

@ -39,6 +39,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.LargeTests;
@ -292,7 +293,7 @@ public class TestTablePermissions {
.add(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes("v1")));
table.put(new Put(Bytes.toBytes("row2"))
.add(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes("v2")));
HBaseAdmin admin = UTIL.getHBaseAdmin();
Admin admin = UTIL.getHBaseAdmin();
admin.split(TEST_TABLE.getName());
// wait for split

View File

@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
@ -725,7 +726,7 @@ public class TestVisibilityLabels {
@Test
public void testUserShouldNotDoDDLOpOnLabelsTable() throws Exception {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
try {
admin.disableTable(LABELS_TABLE_NAME);
fail("Lables table should not get disabled by user.");

View File

@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
@ -518,7 +519,7 @@ public class TestVisibilityLabelsWithDeletes {
TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
HTable table = null;
try {
HBaseAdmin hBaseAdmin = TEST_UTIL.getHBaseAdmin();
Admin hBaseAdmin = TEST_UTIL.getHBaseAdmin();
HColumnDescriptor colDesc = new HColumnDescriptor(fam);
colDesc.setMaxVersions(5);
HTableDescriptor desc = new HTableDescriptor(tableName);
@ -600,7 +601,7 @@ public class TestVisibilityLabelsWithDeletes {
TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
HTable table = null;
try {
HBaseAdmin hBaseAdmin = TEST_UTIL.getHBaseAdmin();
Admin hBaseAdmin = TEST_UTIL.getHBaseAdmin();
HColumnDescriptor colDesc = new HColumnDescriptor(fam);
colDesc.setMaxVersions(5);
HTableDescriptor desc = new HTableDescriptor(tableName);
@ -682,7 +683,7 @@ public class TestVisibilityLabelsWithDeletes {
TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
HTable table = null;
try {
HBaseAdmin hBaseAdmin = TEST_UTIL.getHBaseAdmin();
Admin hBaseAdmin = TEST_UTIL.getHBaseAdmin();
HColumnDescriptor colDesc = new HColumnDescriptor(fam);
colDesc.setMaxVersions(5);
HTableDescriptor desc = new HTableDescriptor(tableName);
@ -738,7 +739,7 @@ public class TestVisibilityLabelsWithDeletes {
TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
HTable table = null;
try {
HBaseAdmin hBaseAdmin = TEST_UTIL.getHBaseAdmin();
Admin hBaseAdmin = TEST_UTIL.getHBaseAdmin();
HColumnDescriptor colDesc = new HColumnDescriptor(fam);
colDesc.setMaxVersions(5);
HTableDescriptor desc = new HTableDescriptor(tableName);
@ -927,7 +928,7 @@ public class TestVisibilityLabelsWithDeletes {
private HTable doPuts(TableName tableName) throws IOException, InterruptedIOException,
RetriesExhaustedWithDetailsException, InterruptedException {
HTable table;
HBaseAdmin hBaseAdmin = TEST_UTIL.getHBaseAdmin();
Admin hBaseAdmin = TEST_UTIL.getHBaseAdmin();
HColumnDescriptor colDesc = new HColumnDescriptor(fam);
colDesc.setMaxVersions(5);
HTableDescriptor desc = new HTableDescriptor(tableName);
@ -969,7 +970,7 @@ public class TestVisibilityLabelsWithDeletes {
private HTable doPutsWithDiffCols(TableName tableName) throws IOException,
InterruptedIOException, RetriesExhaustedWithDetailsException, InterruptedException {
HTable table;
HBaseAdmin hBaseAdmin = TEST_UTIL.getHBaseAdmin();
Admin hBaseAdmin = TEST_UTIL.getHBaseAdmin();
HColumnDescriptor colDesc = new HColumnDescriptor(fam);
colDesc.setMaxVersions(5);
HTableDescriptor desc = new HTableDescriptor(tableName);
@ -1004,7 +1005,7 @@ public class TestVisibilityLabelsWithDeletes {
private HTable doPutsWithoutVisibility(TableName tableName) throws IOException,
InterruptedIOException, RetriesExhaustedWithDetailsException, InterruptedException {
HTable table;
HBaseAdmin hBaseAdmin = TEST_UTIL.getHBaseAdmin();
Admin hBaseAdmin = TEST_UTIL.getHBaseAdmin();
HColumnDescriptor colDesc = new HColumnDescriptor(fam);
colDesc.setMaxVersions(5);
HTableDescriptor desc = new HTableDescriptor(tableName);
@ -1454,7 +1455,7 @@ public class TestVisibilityLabelsWithDeletes {
TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
HTable table = null;
try {
HBaseAdmin hBaseAdmin = TEST_UTIL.getHBaseAdmin();
Admin hBaseAdmin = TEST_UTIL.getHBaseAdmin();
HColumnDescriptor colDesc = new HColumnDescriptor(fam);
colDesc.setMaxVersions(5);
HTableDescriptor desc = new HTableDescriptor(tableName);
@ -1507,7 +1508,7 @@ public class TestVisibilityLabelsWithDeletes {
TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
HTable table = null;
try {
HBaseAdmin hBaseAdmin = TEST_UTIL.getHBaseAdmin();
Admin hBaseAdmin = TEST_UTIL.getHBaseAdmin();
HColumnDescriptor colDesc = new HColumnDescriptor(fam);
colDesc.setMaxVersions(5);
HTableDescriptor desc = new HTableDescriptor(tableName);
@ -2916,7 +2917,7 @@ public class TestVisibilityLabelsWithDeletes {
TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
HTable table = null;
try {
HBaseAdmin hBaseAdmin = TEST_UTIL.getHBaseAdmin();
Admin hBaseAdmin = TEST_UTIL.getHBaseAdmin();
HColumnDescriptor colDesc = new HColumnDescriptor(fam);
colDesc.setMaxVersions(5);
HTableDescriptor desc = new HTableDescriptor(tableName);

View File

@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotEnabledException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
@ -84,7 +85,7 @@ public class SnapshotTestingUtils {
* @throws IOException
* if the admin operation fails
*/
public static void assertNoSnapshots(HBaseAdmin admin) throws IOException {
public static void assertNoSnapshots(Admin admin) throws IOException {
assertEquals("Have some previous snapshots", 0, admin.listSnapshots()
.size());
}
@ -94,7 +95,7 @@ public class SnapshotTestingUtils {
* name and table match the passed in parameters.
*/
public static List<SnapshotDescription> assertExistsMatchingSnapshot(
HBaseAdmin admin, String snapshotName, TableName tableName)
Admin admin, String snapshotName, TableName tableName)
throws IOException {
// list the snapshot
List<SnapshotDescription> snapshots = admin.listSnapshots();
@ -114,7 +115,7 @@ public class SnapshotTestingUtils {
/**
* Make sure that there is only one snapshot returned from the master
*/
public static void assertOneSnapshotThatMatches(HBaseAdmin admin,
public static void assertOneSnapshotThatMatches(Admin admin,
SnapshotDescription snapshot) throws IOException {
assertOneSnapshotThatMatches(admin, snapshot.getName(),
TableName.valueOf(snapshot.getTable()));
@ -125,7 +126,7 @@ public class SnapshotTestingUtils {
* name and table match the passed in parameters.
*/
public static List<SnapshotDescription> assertOneSnapshotThatMatches(
HBaseAdmin admin, String snapshotName, TableName tableName)
Admin admin, String snapshotName, TableName tableName)
throws IOException {
// list the snapshot
List<SnapshotDescription> snapshots = admin.listSnapshots();
@ -142,7 +143,7 @@ public class SnapshotTestingUtils {
* name and table match the passed in parameters.
*/
public static List<SnapshotDescription> assertOneSnapshotThatMatches(
HBaseAdmin admin, byte[] snapshot, TableName tableName) throws IOException {
Admin admin, byte[] snapshot, TableName tableName) throws IOException {
return assertOneSnapshotThatMatches(admin, Bytes.toString(snapshot),
tableName);
}
@ -153,7 +154,7 @@ public class SnapshotTestingUtils {
*/
public static void confirmSnapshotValid(
SnapshotDescription snapshotDescriptor, TableName tableName,
byte[] testFamily, Path rootDir, HBaseAdmin admin, FileSystem fs)
byte[] testFamily, Path rootDir, Admin admin, FileSystem fs)
throws IOException {
ArrayList nonEmptyTestFamilies = new ArrayList(1);
nonEmptyTestFamilies.add(testFamily);
@ -166,7 +167,7 @@ public class SnapshotTestingUtils {
*/
public static void confirmEmptySnapshotValid(
SnapshotDescription snapshotDescriptor, TableName tableName,
byte[] testFamily, Path rootDir, HBaseAdmin admin, FileSystem fs)
byte[] testFamily, Path rootDir, Admin admin, FileSystem fs)
throws IOException {
ArrayList emptyTestFamilies = new ArrayList(1);
emptyTestFamilies.add(testFamily);
@ -183,7 +184,7 @@ public class SnapshotTestingUtils {
public static void confirmSnapshotValid(
SnapshotDescription snapshotDescriptor, TableName tableName,
List<byte[]> nonEmptyTestFamilies, List<byte[]> emptyTestFamilies,
Path rootDir, HBaseAdmin admin, FileSystem fs) throws IOException {
Path rootDir, Admin admin, FileSystem fs) throws IOException {
final Configuration conf = admin.getConfiguration();
// check snapshot dir
@ -265,14 +266,14 @@ public class SnapshotTestingUtils {
* Take snapshot with maximum of numTries attempts, ignoring CorruptedSnapshotException
* except for the last CorruptedSnapshotException
*/
public static void snapshot(HBaseAdmin admin,
public static void snapshot(Admin admin,
final String snapshotName, final String tableName,
SnapshotDescription.Type type, int numTries) throws IOException {
int tries = 0;
CorruptedSnapshotException lastEx = null;
while (tries++ < numTries) {
try {
admin.snapshot(snapshotName, tableName, type);
admin.snapshot(snapshotName, TableName.valueOf(tableName), type);
return;
} catch (CorruptedSnapshotException cse) {
LOG.warn("Got CorruptedSnapshotException", cse);
@ -282,12 +283,12 @@ public class SnapshotTestingUtils {
throw lastEx;
}
public static void cleanupSnapshot(HBaseAdmin admin, byte[] tableName)
public static void cleanupSnapshot(Admin admin, byte[] tableName)
throws IOException {
SnapshotTestingUtils.cleanupSnapshot(admin, Bytes.toString(tableName));
}
public static void cleanupSnapshot(HBaseAdmin admin, String snapshotName)
public static void cleanupSnapshot(Admin admin, String snapshotName)
throws IOException {
// delete the taken snapshot
admin.deleteSnapshot(snapshotName);
@ -356,7 +357,7 @@ public class SnapshotTestingUtils {
* not empty. Note that this will leave the table disabled
* in the case of an offline snapshot.
*/
public static void createSnapshotAndValidate(HBaseAdmin admin,
public static void createSnapshotAndValidate(Admin admin,
TableName tableName, String familyName, String snapshotNameString,
Path rootDir, FileSystem fs, boolean onlineSnapshot)
throws Exception {
@ -370,7 +371,7 @@ public class SnapshotTestingUtils {
* Take a snapshot of the specified table and verify the given families.
* Note that this will leave the table disabled in the case of an offline snapshot.
*/
public static void createSnapshotAndValidate(HBaseAdmin admin,
public static void createSnapshotAndValidate(Admin admin,
TableName tableName, List<byte[]> nonEmptyFamilyNames, List<byte[]> emptyFamilyNames,
String snapshotNameString, Path rootDir, FileSystem fs, boolean onlineSnapshot)
throws Exception {
@ -702,7 +703,7 @@ public class SnapshotTestingUtils {
table.put(put);
}
public static void deleteAllSnapshots(final HBaseAdmin admin)
public static void deleteAllSnapshots(final Admin admin)
throws IOException {
// Delete all the snapshots
for (SnapshotDescription snapshot: admin.listSnapshots()) {
@ -729,7 +730,7 @@ public class SnapshotTestingUtils {
}
}
public static void verifyReplicasCameOnline(TableName tableName, HBaseAdmin admin,
public static void verifyReplicasCameOnline(TableName tableName, Admin admin,
int regionReplication) throws IOException {
List<HRegionInfo> regions = admin.getTableRegions(tableName);
HashSet<HRegionInfo> set = new HashSet<HRegionInfo>();

View File

@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
@ -77,7 +78,7 @@ public class TestExportSnapshot {
private byte[] snapshotName;
private int tableNumFiles;
private TableName tableName;
private HBaseAdmin admin;
private Admin admin;
public static void setUpBaseConf(Configuration conf) {
conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);

View File

@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.LargeTests;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.ScannerCallable;
@ -77,11 +78,9 @@ public class TestFlushSnapshotFromClient {
private static final Log LOG = LogFactory.getLog(TestFlushSnapshotFromClient.class);
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
private static final int NUM_RS = 2;
private static final String STRING_TABLE_NAME = "test";
private static final byte[] TEST_FAM = Bytes.toBytes("fam");
private static final byte[] TEST_QUAL = Bytes.toBytes("q");
private static final TableName TABLE_NAME =
TableName.valueOf(STRING_TABLE_NAME);
private static final TableName TABLE_NAME = TableName.valueOf("test");
private final int DEFAULT_NUM_ROWS = 100;
/**
@ -142,7 +141,7 @@ public class TestFlushSnapshotFromClient {
*/
@Test (timeout=300000)
public void testFlushTableSnapshot() throws Exception {
HBaseAdmin admin = UTIL.getHBaseAdmin();
Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin);
@ -157,7 +156,7 @@ public class TestFlushSnapshotFromClient {
// take a snapshot of the enabled table
String snapshotString = "offlineTableSnapshot";
byte[] snapshot = Bytes.toBytes(snapshotString);
admin.snapshot(snapshotString, STRING_TABLE_NAME, SnapshotDescription.Type.FLUSH);
admin.snapshot(snapshotString, TABLE_NAME, SnapshotDescription.Type.FLUSH);
LOG.debug("Snapshot completed.");
// make sure we have the snapshot
@ -181,7 +180,7 @@ public class TestFlushSnapshotFromClient {
*/
@Test(timeout=30000)
public void testSkipFlushTableSnapshot() throws Exception {
HBaseAdmin admin = UTIL.getHBaseAdmin();
Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin);
@ -196,7 +195,7 @@ public class TestFlushSnapshotFromClient {
// take a snapshot of the enabled table
String snapshotString = "skipFlushTableSnapshot";
byte[] snapshot = Bytes.toBytes(snapshotString);
admin.snapshot(snapshotString, STRING_TABLE_NAME, SnapshotDescription.Type.SKIPFLUSH);
admin.snapshot(snapshotString, TABLE_NAME, SnapshotDescription.Type.SKIPFLUSH);
LOG.debug("Snapshot completed.");
// make sure we have the snapshot
@ -225,7 +224,7 @@ public class TestFlushSnapshotFromClient {
*/
@Test (timeout=300000)
public void testFlushTableSnapshotWithProcedure() throws Exception {
HBaseAdmin admin = UTIL.getHBaseAdmin();
Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin);
@ -241,7 +240,7 @@ public class TestFlushSnapshotFromClient {
String snapshotString = "offlineTableSnapshot";
byte[] snapshot = Bytes.toBytes(snapshotString);
Map<String, String> props = new HashMap<String, String>();
props.put("table", STRING_TABLE_NAME);
props.put("table", TABLE_NAME.getNameAsString());
admin.execProcedure(SnapshotManager.ONLINE_SNAPSHOT_CONTROLLER_DESCRIPTION,
snapshotString, props);
@ -265,19 +264,19 @@ public class TestFlushSnapshotFromClient {
@Test (timeout=300000)
public void testSnapshotFailsOnNonExistantTable() throws Exception {
HBaseAdmin admin = UTIL.getHBaseAdmin();
Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin);
String tableName = "_not_a_table";
TableName tableName = TableName.valueOf("_not_a_table");
// make sure the table doesn't exist
boolean fail = false;
do {
try {
admin.getTableDescriptor(Bytes.toBytes(tableName));
admin.getTableDescriptor(tableName);
fail = true;
LOG.error("Table:" + tableName + " already exists, checking a new name");
tableName = tableName+"!";
tableName = TableName.valueOf(tableName+"!");
} catch (TableNotFoundException e) {
fail = false;
}
@ -294,7 +293,7 @@ public class TestFlushSnapshotFromClient {
@Test(timeout = 300000)
public void testAsyncFlushSnapshot() throws Exception {
HBaseAdmin admin = UTIL.getHBaseAdmin();
Admin admin = UTIL.getHBaseAdmin();
SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName("asyncSnapshot")
.setTable(TABLE_NAME.getNameAsString())
.setType(SnapshotDescription.Type.FLUSH)
@ -316,7 +315,7 @@ public class TestFlushSnapshotFromClient {
@Test (timeout=300000)
public void testSnapshotStateAfterMerge() throws Exception {
int numRows = DEFAULT_NUM_ROWS;
HBaseAdmin admin = UTIL.getHBaseAdmin();
Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin);
// load the table so we have some data
@ -324,12 +323,12 @@ public class TestFlushSnapshotFromClient {
// Take a snapshot
String snapshotBeforeMergeName = "snapshotBeforeMerge";
admin.snapshot(snapshotBeforeMergeName, STRING_TABLE_NAME, SnapshotDescription.Type.FLUSH);
admin.snapshot(snapshotBeforeMergeName, TABLE_NAME, SnapshotDescription.Type.FLUSH);
// Clone the table
String cloneBeforeMergeName = "cloneBeforeMerge";
TableName cloneBeforeMergeName = TableName.valueOf("cloneBeforeMerge");
admin.cloneSnapshot(snapshotBeforeMergeName, cloneBeforeMergeName);
SnapshotTestingUtils.waitForTableToBeOnline(UTIL, TableName.valueOf(cloneBeforeMergeName));
SnapshotTestingUtils.waitForTableToBeOnline(UTIL, cloneBeforeMergeName);
// Merge two regions
List<HRegionInfo> regions = admin.getTableRegions(TABLE_NAME);
@ -351,13 +350,13 @@ public class TestFlushSnapshotFromClient {
assertEquals(numRegionsAfterMerge, admin.getTableRegions(TABLE_NAME).size());
// Clone the table
String cloneAfterMergeName = "cloneAfterMerge";
TableName cloneAfterMergeName = TableName.valueOf("cloneAfterMerge");
admin.cloneSnapshot(snapshotBeforeMergeName, cloneAfterMergeName);
SnapshotTestingUtils.waitForTableToBeOnline(UTIL, TableName.valueOf(cloneAfterMergeName));
SnapshotTestingUtils.waitForTableToBeOnline(UTIL, cloneAfterMergeName);
SnapshotTestingUtils.verifyRowCount(UTIL, TABLE_NAME, numRows);
SnapshotTestingUtils.verifyRowCount(UTIL, TableName.valueOf(cloneBeforeMergeName), numRows);
SnapshotTestingUtils.verifyRowCount(UTIL, TableName.valueOf(cloneAfterMergeName), numRows);
SnapshotTestingUtils.verifyRowCount(UTIL, cloneBeforeMergeName, numRows);
SnapshotTestingUtils.verifyRowCount(UTIL, cloneAfterMergeName, numRows);
// test that we can delete the snapshot
UTIL.deleteTable(cloneAfterMergeName);
@ -367,7 +366,7 @@ public class TestFlushSnapshotFromClient {
@Test (timeout=300000)
public void testTakeSnapshotAfterMerge() throws Exception {
int numRows = DEFAULT_NUM_ROWS;
HBaseAdmin admin = UTIL.getHBaseAdmin();
Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin);
// load the table so we have some data
@ -393,16 +392,16 @@ public class TestFlushSnapshotFromClient {
// Take a snapshot
String snapshotName = "snapshotAfterMerge";
SnapshotTestingUtils.snapshot(admin, snapshotName, STRING_TABLE_NAME,
SnapshotTestingUtils.snapshot(admin, snapshotName, TABLE_NAME.getNameAsString(),
SnapshotDescription.Type.FLUSH, 3);
// Clone the table
String cloneName = "cloneMerge";
TableName cloneName = TableName.valueOf("cloneMerge");
admin.cloneSnapshot(snapshotName, cloneName);
SnapshotTestingUtils.waitForTableToBeOnline(UTIL, TableName.valueOf(cloneName));
SnapshotTestingUtils.waitForTableToBeOnline(UTIL, cloneName);
SnapshotTestingUtils.verifyRowCount(UTIL, TABLE_NAME, numRows);
SnapshotTestingUtils.verifyRowCount(UTIL, TableName.valueOf(cloneName), numRows);
SnapshotTestingUtils.verifyRowCount(UTIL, cloneName, numRows);
// test that we can delete the snapshot
UTIL.deleteTable(cloneName);
@ -414,7 +413,7 @@ public class TestFlushSnapshotFromClient {
@Test (timeout=300000)
public void testFlushCreateListDestroy() throws Exception {
LOG.debug("------- Starting Snapshot test -------------");
HBaseAdmin admin = UTIL.getHBaseAdmin();
Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin);
// load the table so we have some data
@ -423,8 +422,7 @@ public class TestFlushSnapshotFromClient {
String snapshotName = "flushSnapshotCreateListDestroy";
FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
SnapshotTestingUtils.createSnapshotAndValidate(admin,
TableName.valueOf(STRING_TABLE_NAME), Bytes.toString(TEST_FAM),
SnapshotTestingUtils.createSnapshotAndValidate(admin, TABLE_NAME, Bytes.toString(TEST_FAM),
snapshotName, rootDir, fs, true);
}
@ -435,12 +433,10 @@ public class TestFlushSnapshotFromClient {
*/
@Test(timeout=300000)
public void testConcurrentSnapshottingAttempts() throws IOException, InterruptedException {
final String STRING_TABLE2_NAME = STRING_TABLE_NAME + "2";
final TableName TABLE2_NAME =
TableName.valueOf(STRING_TABLE2_NAME);
final TableName TABLE2_NAME = TableName.valueOf(TABLE_NAME + "2");
int ssNum = 20;
HBaseAdmin admin = UTIL.getHBaseAdmin();
Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin);
// create second testing table
@ -460,7 +456,7 @@ public class TestFlushSnapshotFromClient {
@Override
public void run() {
try {
HBaseAdmin admin = UTIL.getHBaseAdmin();
Admin admin = UTIL.getHBaseAdmin();
LOG.info("Submitting snapshot request: " + ClientSnapshotDescriptionUtils.toString(ss));
admin.takeSnapshotAsync(ss);
} catch (Exception e) {
@ -541,7 +537,7 @@ public class TestFlushSnapshotFromClient {
private void waitRegionsAfterMerge(final long numRegionsAfterMerge)
throws IOException, InterruptedException {
HBaseAdmin admin = UTIL.getHBaseAdmin();
Admin admin = UTIL.getHBaseAdmin();
// Verify that there's one region less
long startTime = System.currentTimeMillis();
while (admin.getTableRegions(TABLE_NAME).size() != numRegionsAfterMerge) {

View File

@ -25,6 +25,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.LargeTests;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.master.MasterFileSystem;
@ -60,7 +61,7 @@ public class TestRestoreFlushSnapshotFromClient {
private int snapshot0Rows;
private int snapshot1Rows;
private TableName tableName;
private HBaseAdmin admin;
private Admin admin;
@BeforeClass
public static void setUpBeforeClass() throws Exception {
@ -162,7 +163,7 @@ public class TestRestoreFlushSnapshotFromClient {
@Test(expected=SnapshotDoesNotExistException.class)
public void testCloneNonExistentSnapshot() throws IOException, InterruptedException {
String snapshotName = "random-snapshot-" + System.currentTimeMillis();
String tableName = "random-table-" + System.currentTimeMillis();
TableName tableName = TableName.valueOf("random-table-" + System.currentTimeMillis());
admin.cloneSnapshot(snapshotName, tableName);
}

View File

@ -62,6 +62,7 @@ import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
@ -261,7 +262,7 @@ public class TestHBaseFsck {
* This method is used to undeploy a region -- close it and attempt to
* remove its state from the Master.
*/
private void undeployRegion(HBaseAdmin admin, ServerName sn,
private void undeployRegion(Admin admin, ServerName sn,
HRegionInfo hri) throws IOException, InterruptedException {
try {
HBaseFsckRepair.closeRegionSilentlyAndWait(admin, sn, hri);
@ -482,7 +483,7 @@ public class TestHBaseFsck {
Path tableinfo = null;
try {
setupTable(table);
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
Path hbaseTableDir = FSUtils.getTableDir(
FSUtils.getRootDir(conf), table);
@ -585,8 +586,7 @@ public class TestHBaseFsck {
/**
* Get region info from local cluster.
*/
Map<ServerName, List<String>> getDeployedHRIs(
final HBaseAdmin admin) throws IOException {
Map<ServerName, List<String>> getDeployedHRIs(final Admin admin) throws IOException {
ClusterStatus status = admin.getClusterStatus();
Collection<ServerName> regionServers = status.getServers();
Map<ServerName, List<String>> mm =
@ -645,7 +645,7 @@ public class TestHBaseFsck {
// different regions with the same start/endkeys since it doesn't
// differentiate on ts/regionId! We actually need to recheck
// deployments!
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
while (findDeployedHSI(getDeployedHRIs(admin), hriDupe) == null) {
Thread.sleep(250);
}
@ -803,7 +803,7 @@ public class TestHBaseFsck {
}
}
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
HBaseFsckRepair.closeRegionSilentlyAndWait(admin,
cluster.getRegionServer(k).getServerName(), hbi.getHdfsHRI());
admin.offline(regionName);
@ -1388,7 +1388,7 @@ public class TestHBaseFsck {
HRegionInfo hri = location.getRegionInfo();
// do a regular split
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
byte[] regionName = location.getRegionInfo().getRegionName();
admin.split(location.getRegionInfo().getRegionName(), Bytes.toBytes("BM"));
TestEndToEndSplitTransaction.blockUntilRegionSplit(
@ -1438,7 +1438,7 @@ public class TestHBaseFsck {
HRegionInfo hri = location.getRegionInfo();
// do a regular split
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
byte[] regionName = location.getRegionInfo().getRegionName();
admin.split(location.getRegionInfo().getRegionName(), Bytes.toBytes("BM"));
TestEndToEndSplitTransaction.blockUntilRegionSplit(
@ -1831,7 +1831,7 @@ public class TestHBaseFsck {
assertEquals(hfcc.getMissing().size(), missing);
// its been fixed, verify that we can enable
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
admin.enableTableAsync(table);
while (!admin.isTableEnabled(table)) {
try {
@ -2211,7 +2211,14 @@ public class TestHBaseFsck {
HRegionInfo hri = metaLocation.getRegionInfo();
if (unassign) {
LOG.info("Undeploying meta region " + hri + " from server " + hsa);
undeployRegion(new HBaseAdmin(conf), hsa, hri);
HConnection unmanagedConnection = HConnectionManager.createConnection(conf);
Admin admin = unmanagedConnection.getAdmin();
try {
undeployRegion(admin, hsa, hri);
} finally {
admin.close();
unmanagedConnection.close();
}
}
if (regionInfoOnly) {
@ -2291,7 +2298,7 @@ public class TestHBaseFsck {
assertNotEquals(region1, region2);
// do a region merge
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
Admin admin = TEST_UTIL.getHBaseAdmin();
admin.mergeRegions(region1.getEncodedNameAsBytes(),
region2.getEncodedNameAsBytes(), false);

Some files were not shown because too many files have changed in this diff Show More