HBASE-11068 Update code to use Admin factory method instead of constructor

This commit is contained in:
stack 2014-08-04 11:42:38 -07:00
parent 757b13dea4
commit e91e2659a7
101 changed files with 522 additions and 432 deletions

View File

@ -126,7 +126,7 @@ public class IntegrationTestIngest extends IntegrationTestBase {
} }
private void deleteTableIfNecessary() throws IOException { private void deleteTableIfNecessary() throws IOException {
if (util.getHBaseAdmin().tableExists(getTablename())) { if (util.getHBaseAdmin().tableExists(TableName.valueOf(getTablename()))) {
util.deleteTable(Bytes.toBytes(getTablename())); util.deleteTable(Bytes.toBytes(getTablename()));
} }
} }

View File

@ -21,6 +21,7 @@ import java.io.IOException;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Waiter.Predicate; import org.apache.hadoop.hbase.Waiter.Predicate;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.io.crypto.KeyProviderForTesting; import org.apache.hadoop.hbase.io.crypto.KeyProviderForTesting;
import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFile;
@ -78,20 +79,20 @@ public class IntegrationTestIngestWithEncryption extends IntegrationTestIngest {
// Update the test table schema so HFiles from this point will be written with // Update the test table schema so HFiles from this point will be written with
// encryption features enabled. // encryption features enabled.
final HBaseAdmin admin = util.getHBaseAdmin(); final Admin admin = util.getHBaseAdmin();
HTableDescriptor tableDescriptor = HTableDescriptor tableDescriptor =
new HTableDescriptor(admin.getTableDescriptor(Bytes.toBytes(getTablename()))); new HTableDescriptor(admin.getTableDescriptor(TableName.valueOf(getTablename())));
for (HColumnDescriptor columnDescriptor: tableDescriptor.getColumnFamilies()) { for (HColumnDescriptor columnDescriptor: tableDescriptor.getColumnFamilies()) {
columnDescriptor.setEncryptionType("AES"); columnDescriptor.setEncryptionType("AES");
LOG.info("Updating CF schema for " + getTablename() + "." + LOG.info("Updating CF schema for " + getTablename() + "." +
columnDescriptor.getNameAsString()); columnDescriptor.getNameAsString());
admin.disableTable(getTablename()); admin.disableTable(TableName.valueOf(getTablename()));
admin.modifyColumn(getTablename(), columnDescriptor); admin.modifyColumn(TableName.valueOf(getTablename()), columnDescriptor);
admin.enableTable(getTablename()); admin.enableTable(TableName.valueOf(getTablename()));
util.waitFor(30000, 1000, true, new Predicate<IOException>() { util.waitFor(30000, 1000, true, new Predicate<IOException>() {
@Override @Override
public boolean evaluate() throws IOException { public boolean evaluate() throws IOException {
return admin.isTableAvailable(getTablename()); return admin.isTableAvailable(TableName.valueOf(getTablename()));
} }
}); });
} }

View File

@ -24,6 +24,7 @@ import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.util.RegionSplitter; import org.apache.hadoop.hbase.util.RegionSplitter;
import org.apache.hadoop.hbase.util.RegionSplitter.SplitAlgorithm; import org.apache.hadoop.hbase.util.RegionSplitter.SplitAlgorithm;
@ -49,7 +50,7 @@ public class IntegrationTestManyRegions {
protected static final Log LOG protected static final Log LOG
= LogFactory.getLog(IntegrationTestManyRegions.class); = LogFactory.getLog(IntegrationTestManyRegions.class);
protected static final String TABLE_NAME = CLASS_NAME; protected static final TableName TABLE_NAME = TableName.valueOf(CLASS_NAME);
protected static final String COLUMN_NAME = "f"; protected static final String COLUMN_NAME = "f";
protected static final String REGION_COUNT_KEY protected static final String REGION_COUNT_KEY
= String.format("hbase.%s.regions", CLASS_NAME); = String.format("hbase.%s.regions", CLASS_NAME);
@ -80,7 +81,7 @@ public class IntegrationTestManyRegions {
util.initializeCluster(REGION_SERVER_COUNT); util.initializeCluster(REGION_SERVER_COUNT);
LOG.info("Cluster initialized"); LOG.info("Cluster initialized");
HBaseAdmin admin = util.getHBaseAdmin(); Admin admin = util.getHBaseAdmin();
if (admin.tableExists(TABLE_NAME)) { if (admin.tableExists(TABLE_NAME)) {
LOG.info(String.format("Deleting existing table %s.", TABLE_NAME)); LOG.info(String.format("Deleting existing table %s.", TABLE_NAME));
if (admin.isTableEnabled(TABLE_NAME)) admin.disableTable(TABLE_NAME); if (admin.isTableEnabled(TABLE_NAME)) admin.disableTable(TABLE_NAME);
@ -93,7 +94,7 @@ public class IntegrationTestManyRegions {
@After @After
public void tearDown() throws IOException { public void tearDown() throws IOException {
LOG.info("Cleaning up after test."); LOG.info("Cleaning up after test.");
HBaseAdmin admin = util.getHBaseAdmin(); Admin admin = util.getHBaseAdmin();
if (admin.tableExists(TABLE_NAME)) { if (admin.tableExists(TABLE_NAME)) {
if (admin.isTableEnabled(TABLE_NAME)) admin.disableTable(TABLE_NAME); if (admin.isTableEnabled(TABLE_NAME)) admin.disableTable(TABLE_NAME);
admin.deleteTable(TABLE_NAME); admin.deleteTable(TABLE_NAME);
@ -122,10 +123,10 @@ public class IntegrationTestManyRegions {
private static class Worker implements Runnable { private static class Worker implements Runnable {
private final CountDownLatch doneSignal; private final CountDownLatch doneSignal;
private final HBaseAdmin admin; private final Admin admin;
private boolean success = false; private boolean success = false;
public Worker(final CountDownLatch doneSignal, final HBaseAdmin admin) { public Worker(final CountDownLatch doneSignal, final Admin admin) {
this.doneSignal = doneSignal; this.doneSignal = doneSignal;
this.admin = admin; this.admin = admin;
} }
@ -137,7 +138,7 @@ public class IntegrationTestManyRegions {
@Override @Override
public void run() { public void run() {
long startTime, endTime; long startTime, endTime;
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(TABLE_NAME)); HTableDescriptor desc = new HTableDescriptor(TABLE_NAME);
desc.addFamily(new HColumnDescriptor(COLUMN_NAME)); desc.addFamily(new HColumnDescriptor(COLUMN_NAME));
SplitAlgorithm algo = new RegionSplitter.HexStringSplit(); SplitAlgorithm algo = new RegionSplitter.HexStringSplit();
byte[][] splits = algo.split(REGION_COUNT); byte[][] splits = algo.split(REGION_COUNT);

View File

@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.chaos.actions.RestartRsHoldingTableAction;
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey; import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
import org.apache.hadoop.hbase.chaos.policies.PeriodicRandomActionPolicy; import org.apache.hadoop.hbase.chaos.policies.PeriodicRandomActionPolicy;
import org.apache.hadoop.hbase.chaos.policies.Policy; import org.apache.hadoop.hbase.chaos.policies.Policy;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.ipc.RpcClient; import org.apache.hadoop.hbase.ipc.RpcClient;
import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy; import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy;
@ -86,9 +87,9 @@ public class IntegrationTestRegionReplicaPerf extends IntegrationTestBase {
*/ */
static class PerfEvalCallable implements Callable<TimingResult> { static class PerfEvalCallable implements Callable<TimingResult> {
private final Queue<String> argv = new LinkedList<String>(); private final Queue<String> argv = new LinkedList<String>();
private final HBaseAdmin admin; private final Admin admin;
public PerfEvalCallable(HBaseAdmin admin, String argv) { public PerfEvalCallable(Admin admin, String argv) {
// TODO: this API is awkward, should take HConnection, not HBaseAdmin // TODO: this API is awkward, should take HConnection, not HBaseAdmin
this.admin = admin; this.admin = admin;
this.argv.addAll(Arrays.asList(argv.split(" "))); this.argv.addAll(Arrays.asList(argv.split(" ")));

View File

@ -49,8 +49,8 @@ import org.junit.Assert;
@InterfaceAudience.Private @InterfaceAudience.Private
public class StripeCompactionsPerformanceEvaluation extends AbstractHBaseTool { public class StripeCompactionsPerformanceEvaluation extends AbstractHBaseTool {
private static final Log LOG = LogFactory.getLog(StripeCompactionsPerformanceEvaluation.class); private static final Log LOG = LogFactory.getLog(StripeCompactionsPerformanceEvaluation.class);
private static final String TABLE_NAME = private static final TableName TABLE_NAME =
StripeCompactionsPerformanceEvaluation.class.getSimpleName(); TableName.valueOf(StripeCompactionsPerformanceEvaluation.class.getSimpleName());
private static final byte[] COLUMN_FAMILY = Bytes.toBytes("CF"); private static final byte[] COLUMN_FAMILY = Bytes.toBytes("CF");
private static final int MIN_NUM_SERVERS = 1; private static final int MIN_NUM_SERVERS = 1;
@ -199,9 +199,8 @@ public class StripeCompactionsPerformanceEvaluation extends AbstractHBaseTool {
status(String.format("%s test starting on %d servers; preloading 0 to %d and writing to %d", status(String.format("%s test starting on %d servers; preloading 0 to %d and writing to %d",
description, numServers, startKey, endKey)); description, numServers, startKey, endKey));
TableName tn = TableName.valueOf(TABLE_NAME);
if (preloadKeys > 0) { if (preloadKeys > 0) {
MultiThreadedWriter preloader = new MultiThreadedWriter(dataGen, conf, tn); MultiThreadedWriter preloader = new MultiThreadedWriter(dataGen, conf, TABLE_NAME);
long time = System.currentTimeMillis(); long time = System.currentTimeMillis();
preloader.start(0, startKey, writeThreads); preloader.start(0, startKey, writeThreads);
preloader.waitForFinish(); preloader.waitForFinish();
@ -214,8 +213,8 @@ public class StripeCompactionsPerformanceEvaluation extends AbstractHBaseTool {
Thread.sleep(waitTime); Thread.sleep(waitTime);
} }
MultiThreadedWriter writer = new MultiThreadedWriter(dataGen, conf, tn); MultiThreadedWriter writer = new MultiThreadedWriter(dataGen, conf, TABLE_NAME);
MultiThreadedReader reader = new MultiThreadedReader(dataGen, conf, tn, 100); MultiThreadedReader reader = new MultiThreadedReader(dataGen, conf, TABLE_NAME, 100);
// reader.getMetrics().enable(); // reader.getMetrics().enable();
reader.linkToWriter(writer); reader.linkToWriter(writer);

View File

@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.IntegrationTestingUtility;
import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey; import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
@ -129,7 +130,7 @@ public class Action {
LOG.info("Moving " + victimRegions.size() + " regions from " + fromServers.size() LOG.info("Moving " + victimRegions.size() + " regions from " + fromServers.size()
+ " servers to " + toServers.size() + " different servers"); + " servers to " + toServers.size() + " different servers");
HBaseAdmin admin = this.context.getHBaseIntegrationTestingUtility().getHBaseAdmin(); Admin admin = this.context.getHBaseIntegrationTestingUtility().getHBaseAdmin();
for (byte[] victimRegion : victimRegions) { for (byte[] victimRegion : victimRegions) {
int targetIx = RandomUtils.nextInt(toServers.size()); int targetIx = RandomUtils.nextInt(toServers.size());
admin.move(victimRegion, Bytes.toBytes(toServers.get(targetIx).getServerName())); admin.move(victimRegion, Bytes.toBytes(toServers.get(targetIx).getServerName()));
@ -137,7 +138,7 @@ public class Action {
} }
protected void forceBalancer() throws Exception { protected void forceBalancer() throws Exception {
HBaseAdmin admin = this.context.getHBaseIntegrationTestingUtility().getHBaseAdmin(); Admin admin = this.context.getHBaseIntegrationTestingUtility().getHBaseAdmin();
boolean result = admin.balancer(); boolean result = admin.balancer();
if (!result) { if (!result) {
LOG.error("Balancer didn't succeed"); LOG.error("Balancer didn't succeed");

View File

@ -23,6 +23,8 @@ import java.io.IOException;
import org.apache.commons.lang.RandomStringUtils; import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
@ -30,13 +32,11 @@ import org.apache.hadoop.hbase.util.Bytes;
* Action the adds a column family to a table. * Action the adds a column family to a table.
*/ */
public class AddColumnAction extends Action { public class AddColumnAction extends Action {
private final byte[] tableName; private final TableName tableName;
private final String tableNameString; private Admin admin;
private HBaseAdmin admin;
public AddColumnAction(String tableName) { public AddColumnAction(String tableName) {
tableNameString = tableName; this.tableName = TableName.valueOf(tableName);
this.tableName = Bytes.toBytes(tableName);
} }
@Override @Override
@ -55,7 +55,7 @@ public class AddColumnAction extends Action {
columnDescriptor = new HColumnDescriptor(RandomStringUtils.randomAlphabetic(5)); columnDescriptor = new HColumnDescriptor(RandomStringUtils.randomAlphabetic(5));
} }
LOG.debug("Performing action: Adding " + columnDescriptor + " to " + tableNameString); LOG.debug("Performing action: Adding " + columnDescriptor + " to " + tableName);
tableDescriptor.addFamily(columnDescriptor); tableDescriptor.addFamily(columnDescriptor);
admin.modifyTable(tableName, tableDescriptor); admin.modifyTable(tableName, tableDescriptor);

View File

@ -23,6 +23,8 @@ import java.util.Random;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
@ -32,30 +34,27 @@ import org.apache.hadoop.hbase.util.Bytes;
* table * table
*/ */
public class ChangeBloomFilterAction extends Action { public class ChangeBloomFilterAction extends Action {
private final byte[] tableNameBytes;
private final long sleepTime; private final long sleepTime;
private final String tableName; private final TableName tableName;
public ChangeBloomFilterAction(String tableName) { public ChangeBloomFilterAction(String tableName) {
this(-1, tableName); this(-1, tableName);
} }
public ChangeBloomFilterAction(int sleepTime, String tableName) { public ChangeBloomFilterAction(int sleepTime, String tableName) {
this.tableNameBytes = Bytes.toBytes(tableName);
this.sleepTime = sleepTime; this.sleepTime = sleepTime;
this.tableName = tableName; this.tableName = TableName.valueOf(tableName);
} }
@Override @Override
public void perform() throws Exception { public void perform() throws Exception {
Random random = new Random(); Random random = new Random();
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility(); HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
HBaseAdmin admin = util.getHBaseAdmin(); Admin admin = util.getHBaseAdmin();
LOG.info("Performing action: Change bloom filter on all columns of table " LOG.info("Performing action: Change bloom filter on all columns of table "
+ tableName); + tableName);
HTableDescriptor tableDescriptor = admin.getTableDescriptor(Bytes HTableDescriptor tableDescriptor = admin.getTableDescriptor(tableName);
.toBytes(tableName));
HColumnDescriptor[] columnDescriptors = tableDescriptor.getColumnFamilies(); HColumnDescriptor[] columnDescriptors = tableDescriptor.getColumnFamilies();
if (columnDescriptors == null || columnDescriptors.length == 0) { if (columnDescriptors == null || columnDescriptors.length == 0) {

View File

@ -23,6 +23,8 @@ import java.util.Random;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
@ -31,15 +33,15 @@ import org.apache.hadoop.hbase.util.Bytes;
* Action that changes the compression algorithm on a column family from a list of tables. * Action that changes the compression algorithm on a column family from a list of tables.
*/ */
public class ChangeCompressionAction extends Action { public class ChangeCompressionAction extends Action {
private final byte[] tableName; private final TableName tableName;
private final String tableNameString; private final String tableNameString;
private HBaseAdmin admin; private Admin admin;
private Random random; private Random random;
public ChangeCompressionAction(String tableName) { public ChangeCompressionAction(String tableName) {
tableNameString = tableName; tableNameString = tableName;
this.tableName = Bytes.toBytes(tableName); this.tableName = TableName.valueOf(tableName);
this.random = new Random(); this.random = new Random();
} }

View File

@ -23,6 +23,8 @@ import java.util.Random;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
@ -31,15 +33,13 @@ import org.apache.hadoop.hbase.util.Bytes;
* Action that changes the encoding on a column family from a list of tables. * Action that changes the encoding on a column family from a list of tables.
*/ */
public class ChangeEncodingAction extends Action { public class ChangeEncodingAction extends Action {
private final byte[] tableName; private final TableName tableName;
private final String tableNameString;
private HBaseAdmin admin; private Admin admin;
private Random random; private Random random;
public ChangeEncodingAction(String tableName) { public ChangeEncodingAction(String tableName) {
tableNameString = tableName; this.tableName = TableName.valueOf(tableName);
this.tableName = Bytes.toBytes(tableName);
this.random = new Random(); this.random = new Random();
} }
@ -58,7 +58,7 @@ public class ChangeEncodingAction extends Action {
return; return;
} }
LOG.debug("Performing action: Changing encodings on " + tableNameString); LOG.debug("Performing action: Changing encodings on " + tableName);
// possible DataBlockEncoding id's // possible DataBlockEncoding id's
int[] possibleIds = {0, 2, 3, 4/*, 6*/}; int[] possibleIds = {0, 2, 3, 4/*, 6*/};
for (HColumnDescriptor descriptor : columnDescriptors) { for (HColumnDescriptor descriptor : columnDescriptors) {

View File

@ -23,6 +23,8 @@ import java.util.Random;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
@ -32,15 +34,15 @@ import org.apache.hadoop.hbase.util.Bytes;
* Always keeps at least 1 as the number of versions. * Always keeps at least 1 as the number of versions.
*/ */
public class ChangeVersionsAction extends Action { public class ChangeVersionsAction extends Action {
private final byte[] tableName; private final TableName tableName;
private final String tableNameString; private final String tableNameString;
private HBaseAdmin admin; private Admin admin;
private Random random; private Random random;
public ChangeVersionsAction(String tableName) { public ChangeVersionsAction(String tableName) {
tableNameString = tableName; tableNameString = tableName;
this.tableName = Bytes.toBytes(tableName); this.tableName = TableName.valueOf(tableName);
this.random = new Random(); this.random = new Random();
} }

View File

@ -23,7 +23,9 @@ import java.util.List;
import org.apache.commons.lang.math.RandomUtils; import org.apache.commons.lang.math.RandomUtils;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey; import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
@ -31,10 +33,9 @@ import org.apache.hadoop.hbase.util.Bytes;
* Region that queues a compaction of a random region from the table. * Region that queues a compaction of a random region from the table.
*/ */
public class CompactRandomRegionOfTableAction extends Action { public class CompactRandomRegionOfTableAction extends Action {
private final byte[] tableNameBytes;
private final int majorRatio; private final int majorRatio;
private final long sleepTime; private final long sleepTime;
private final String tableName; private final TableName tableName;
public CompactRandomRegionOfTableAction( public CompactRandomRegionOfTableAction(
String tableName, float majorRatio) { String tableName, float majorRatio) {
@ -43,21 +44,20 @@ public class CompactRandomRegionOfTableAction extends Action {
public CompactRandomRegionOfTableAction( public CompactRandomRegionOfTableAction(
int sleepTime, String tableName, float majorRatio) { int sleepTime, String tableName, float majorRatio) {
this.tableNameBytes = Bytes.toBytes(tableName);
this.majorRatio = (int) (100 * majorRatio); this.majorRatio = (int) (100 * majorRatio);
this.sleepTime = sleepTime; this.sleepTime = sleepTime;
this.tableName = tableName; this.tableName = TableName.valueOf(tableName);
} }
@Override @Override
public void perform() throws Exception { public void perform() throws Exception {
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility(); HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
HBaseAdmin admin = util.getHBaseAdmin(); Admin admin = util.getHBaseAdmin();
boolean major = RandomUtils.nextInt(100) < majorRatio; boolean major = RandomUtils.nextInt(100) < majorRatio;
LOG.info("Performing action: Compact random region of table " LOG.info("Performing action: Compact random region of table "
+ tableName + ", major=" + major); + tableName + ", major=" + major);
List<HRegionInfo> regions = admin.getTableRegions(tableNameBytes); List<HRegionInfo> regions = admin.getTableRegions(tableName);
if (regions == null || regions.isEmpty()) { if (regions == null || regions.isEmpty()) {
LOG.info("Table " + tableName + " doesn't have regions to compact"); LOG.info("Table " + tableName + " doesn't have regions to compact");
return; return;

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.chaos.actions;
import org.apache.commons.lang.math.RandomUtils; import org.apache.commons.lang.math.RandomUtils;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
@ -47,7 +48,7 @@ public class CompactTableAction extends Action {
@Override @Override
public void perform() throws Exception { public void perform() throws Exception {
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility(); HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
HBaseAdmin admin = util.getHBaseAdmin(); Admin admin = util.getHBaseAdmin();
boolean major = RandomUtils.nextInt(100) < majorRatio; boolean major = RandomUtils.nextInt(100) < majorRatio;
LOG.info("Performing action: Compact table " + tableName + ", major=" + major); LOG.info("Performing action: Compact table " + tableName + ", major=" + major);

View File

@ -22,7 +22,9 @@ import java.util.List;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey; import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
@ -30,27 +32,25 @@ import org.apache.hadoop.hbase.util.Bytes;
* Action that tries to flush a random region of a table. * Action that tries to flush a random region of a table.
*/ */
public class FlushRandomRegionOfTableAction extends Action { public class FlushRandomRegionOfTableAction extends Action {
private final byte[] tableNameBytes;
private final long sleepTime; private final long sleepTime;
private final String tableName; private final TableName tableName;
public FlushRandomRegionOfTableAction(String tableName) { public FlushRandomRegionOfTableAction(String tableName) {
this (-1, tableName); this (-1, tableName);
} }
public FlushRandomRegionOfTableAction(int sleepTime, String tableName) { public FlushRandomRegionOfTableAction(int sleepTime, String tableName) {
this.tableNameBytes = Bytes.toBytes(tableName);
this.sleepTime = sleepTime; this.sleepTime = sleepTime;
this.tableName = tableName; this.tableName = TableName.valueOf(tableName);
} }
@Override @Override
public void perform() throws Exception { public void perform() throws Exception {
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility(); HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
HBaseAdmin admin = util.getHBaseAdmin(); Admin admin = util.getHBaseAdmin();
LOG.info("Performing action: Flush random region of table " + tableName); LOG.info("Performing action: Flush random region of table " + tableName);
List<HRegionInfo> regions = admin.getTableRegions(tableNameBytes); List<HRegionInfo> regions = admin.getTableRegions(tableName);
if (regions == null || regions.isEmpty()) { if (regions == null || regions.isEmpty()) {
LOG.info("Table " + tableName + " doesn't have regions to flush"); LOG.info("Table " + tableName + " doesn't have regions to flush");
return; return;

View File

@ -19,6 +19,8 @@
package org.apache.hadoop.hbase.chaos.actions; package org.apache.hadoop.hbase.chaos.actions;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
@ -26,28 +28,26 @@ import org.apache.hadoop.hbase.util.Bytes;
* Action that tries to flush a table. * Action that tries to flush a table.
*/ */
public class FlushTableAction extends Action { public class FlushTableAction extends Action {
private final byte[] tableNameBytes;
private final long sleepTime; private final long sleepTime;
private final String tableName; private final TableName tableName;
public FlushTableAction(String tableName) { public FlushTableAction(String tableName) {
this(-1, tableName); this(-1, tableName);
} }
public FlushTableAction(int sleepTime, String tableName) { public FlushTableAction(int sleepTime, String tableName) {
this.tableNameBytes = Bytes.toBytes(tableName);
this.sleepTime = sleepTime; this.sleepTime = sleepTime;
this.tableName = tableName; this.tableName = TableName.valueOf(tableName);
} }
@Override @Override
public void perform() throws Exception { public void perform() throws Exception {
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility(); HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
HBaseAdmin admin = util.getHBaseAdmin(); Admin admin = util.getHBaseAdmin();
LOG.info("Performing action: Flush table " + tableName); LOG.info("Performing action: Flush table " + tableName);
try { try {
admin.flush(tableNameBytes); admin.flush(tableName.toBytes());
} catch (Exception ex) { } catch (Exception ex) {
LOG.warn("Flush failed, might be caused by other chaos: " + ex.getMessage()); LOG.warn("Flush failed, might be caused by other chaos: " + ex.getMessage());
} }

View File

@ -23,6 +23,8 @@ import java.util.List;
import org.apache.commons.lang.math.RandomUtils; import org.apache.commons.lang.math.RandomUtils;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
@ -30,8 +32,7 @@ import org.apache.hadoop.hbase.util.Bytes;
* Action to merge regions of a table. * Action to merge regions of a table.
*/ */
public class MergeRandomAdjacentRegionsOfTableAction extends Action { public class MergeRandomAdjacentRegionsOfTableAction extends Action {
private final byte[] tableNameBytes; private final TableName tableName;
private final String tableName;
private final long sleepTime; private final long sleepTime;
public MergeRandomAdjacentRegionsOfTableAction(String tableName) { public MergeRandomAdjacentRegionsOfTableAction(String tableName) {
@ -39,18 +40,17 @@ public class MergeRandomAdjacentRegionsOfTableAction extends Action {
} }
public MergeRandomAdjacentRegionsOfTableAction(int sleepTime, String tableName) { public MergeRandomAdjacentRegionsOfTableAction(int sleepTime, String tableName) {
this.tableNameBytes = Bytes.toBytes(tableName); this.tableName = TableName.valueOf(tableName);
this.tableName = tableName;
this.sleepTime = sleepTime; this.sleepTime = sleepTime;
} }
@Override @Override
public void perform() throws Exception { public void perform() throws Exception {
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility(); HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
HBaseAdmin admin = util.getHBaseAdmin(); Admin admin = util.getHBaseAdmin();
LOG.info("Performing action: Merge random adjacent regions of table " + tableName); LOG.info("Performing action: Merge random adjacent regions of table " + tableName);
List<HRegionInfo> regions = admin.getTableRegions(tableNameBytes); List<HRegionInfo> regions = admin.getTableRegions(tableName);
if (regions == null || regions.size() < 2) { if (regions == null || regions.size() < 2) {
LOG.info("Table " + tableName + " doesn't have enough regions to merge"); LOG.info("Table " + tableName + " doesn't have enough regions to merge");
return; return;

View File

@ -22,7 +22,9 @@ import java.util.List;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey; import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
@ -31,8 +33,7 @@ import org.apache.hadoop.hbase.util.Bytes;
*/ */
public class MoveRandomRegionOfTableAction extends Action { public class MoveRandomRegionOfTableAction extends Action {
private final long sleepTime; private final long sleepTime;
private final byte[] tableNameBytes; private final TableName tableName;
private final String tableName;
public MoveRandomRegionOfTableAction(String tableName) { public MoveRandomRegionOfTableAction(String tableName) {
this(-1, tableName); this(-1, tableName);
@ -40,8 +41,7 @@ public class MoveRandomRegionOfTableAction extends Action {
public MoveRandomRegionOfTableAction(long sleepTime, String tableName) { public MoveRandomRegionOfTableAction(long sleepTime, String tableName) {
this.sleepTime = sleepTime; this.sleepTime = sleepTime;
this.tableNameBytes = Bytes.toBytes(tableName); this.tableName = TableName.valueOf(tableName);
this.tableName = tableName;
} }
@Override @Override
@ -51,10 +51,10 @@ public class MoveRandomRegionOfTableAction extends Action {
} }
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility(); HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
HBaseAdmin admin = util.getHBaseAdmin(); Admin admin = util.getHBaseAdmin();
LOG.info("Performing action: Move random region of table " + tableName); LOG.info("Performing action: Move random region of table " + tableName);
List<HRegionInfo> regions = admin.getTableRegions(tableNameBytes); List<HRegionInfo> regions = admin.getTableRegions(tableName);
if (regions == null || regions.isEmpty()) { if (regions == null || regions.isEmpty()) {
LOG.info("Table " + tableName + " doesn't have regions to move"); LOG.info("Table " + tableName + " doesn't have regions to move");
return; return;

View File

@ -25,7 +25,9 @@ import java.util.List;
import org.apache.commons.lang.math.RandomUtils; import org.apache.commons.lang.math.RandomUtils;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.chaos.factories.MonkeyConstants; import org.apache.hadoop.hbase.chaos.factories.MonkeyConstants;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
@ -34,8 +36,7 @@ import org.apache.hadoop.hbase.util.Bytes;
*/ */
public class MoveRegionsOfTableAction extends Action { public class MoveRegionsOfTableAction extends Action {
private final long sleepTime; private final long sleepTime;
private final byte[] tableNameBytes; private final TableName tableName;
private final String tableName;
private final long maxTime; private final long maxTime;
public MoveRegionsOfTableAction(String tableName) { public MoveRegionsOfTableAction(String tableName) {
@ -44,8 +45,7 @@ public class MoveRegionsOfTableAction extends Action {
public MoveRegionsOfTableAction(long sleepTime, long maxSleepTime, String tableName) { public MoveRegionsOfTableAction(long sleepTime, long maxSleepTime, String tableName) {
this.sleepTime = sleepTime; this.sleepTime = sleepTime;
this.tableNameBytes = Bytes.toBytes(tableName); this.tableName = TableName.valueOf(tableName);
this.tableName = tableName;
this.maxTime = maxSleepTime; this.maxTime = maxSleepTime;
} }
@ -55,12 +55,12 @@ public class MoveRegionsOfTableAction extends Action {
Thread.sleep(sleepTime); Thread.sleep(sleepTime);
} }
HBaseAdmin admin = this.context.getHBaseIntegrationTestingUtility().getHBaseAdmin(); Admin admin = this.context.getHBaseIntegrationTestingUtility().getHBaseAdmin();
Collection<ServerName> serversList = admin.getClusterStatus().getServers(); Collection<ServerName> serversList = admin.getClusterStatus().getServers();
ServerName[] servers = serversList.toArray(new ServerName[serversList.size()]); ServerName[] servers = serversList.toArray(new ServerName[serversList.size()]);
LOG.info("Performing action: Move regions of table " + tableName); LOG.info("Performing action: Move regions of table " + tableName);
List<HRegionInfo> regions = admin.getTableRegions(tableNameBytes); List<HRegionInfo> regions = admin.getTableRegions(tableName);
if (regions == null || regions.isEmpty()) { if (regions == null || regions.isEmpty()) {
LOG.info("Table " + tableName + " doesn't have regions to move"); LOG.info("Table " + tableName + " doesn't have regions to move");
return; return;

View File

@ -24,6 +24,8 @@ import java.util.Set;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
@ -31,15 +33,15 @@ import org.apache.hadoop.hbase.util.Bytes;
* Action that removes a column family. * Action that removes a column family.
*/ */
public class RemoveColumnAction extends Action { public class RemoveColumnAction extends Action {
private final byte[] tableName; private final TableName tableName;
private final Set<String> protectedColumns; private final Set<String> protectedColumns;
private final String tableNameString; private final String tableNameString;
private HBaseAdmin admin; private Admin admin;
private Random random; private Random random;
public RemoveColumnAction(String tableName, Set<String> protectedColumns) { public RemoveColumnAction(String tableName, Set<String> protectedColumns) {
tableNameString = tableName; tableNameString = tableName;
this.tableName = Bytes.toBytes(tableName); this.tableName = TableName.valueOf(tableName);
this.protectedColumns = protectedColumns; this.protectedColumns = protectedColumns;
random = new Random(); random = new Random();
} }

View File

@ -19,13 +19,15 @@
package org.apache.hadoop.hbase.chaos.actions; package org.apache.hadoop.hbase.chaos.actions;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
/** /**
* Action that tries to take a snapshot of a table. * Action that tries to take a snapshot of a table.
*/ */
public class SnapshotTableAction extends Action { public class SnapshotTableAction extends Action {
private final String tableName; private final TableName tableName;
private final long sleepTime; private final long sleepTime;
public SnapshotTableAction(String tableName) { public SnapshotTableAction(String tableName) {
@ -33,7 +35,7 @@ public class SnapshotTableAction extends Action {
} }
public SnapshotTableAction(int sleepTime, String tableName) { public SnapshotTableAction(int sleepTime, String tableName) {
this.tableName = tableName; this.tableName = TableName.valueOf(tableName);
this.sleepTime = sleepTime; this.sleepTime = sleepTime;
} }
@ -41,7 +43,7 @@ public class SnapshotTableAction extends Action {
public void perform() throws Exception { public void perform() throws Exception {
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility(); HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
String snapshotName = tableName + "-it-" + System.currentTimeMillis(); String snapshotName = tableName + "-it-" + System.currentTimeMillis();
HBaseAdmin admin = util.getHBaseAdmin(); Admin admin = util.getHBaseAdmin();
LOG.info("Performing action: Snapshot table " + tableName); LOG.info("Performing action: Snapshot table " + tableName);
admin.snapshot(snapshotName, tableName); admin.snapshot(snapshotName, tableName);

View File

@ -22,7 +22,9 @@ import java.util.List;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey; import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
@ -30,27 +32,25 @@ import org.apache.hadoop.hbase.util.Bytes;
* Action that tries to split a random region of a table. * Action that tries to split a random region of a table.
*/ */
public class SplitRandomRegionOfTableAction extends Action { public class SplitRandomRegionOfTableAction extends Action {
private final byte[] tableNameBytes;
private final long sleepTime; private final long sleepTime;
private final String tableName; private final TableName tableName;
public SplitRandomRegionOfTableAction(String tableName) { public SplitRandomRegionOfTableAction(String tableName) {
this(-1, tableName); this(-1, tableName);
} }
public SplitRandomRegionOfTableAction(int sleepTime, String tableName) { public SplitRandomRegionOfTableAction(int sleepTime, String tableName) {
this.tableNameBytes = Bytes.toBytes(tableName);
this.sleepTime = sleepTime; this.sleepTime = sleepTime;
this.tableName = tableName; this.tableName = TableName.valueOf(tableName);
} }
@Override @Override
public void perform() throws Exception { public void perform() throws Exception {
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility(); HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
HBaseAdmin admin = util.getHBaseAdmin(); Admin admin = util.getHBaseAdmin();
LOG.info("Performing action: Split random region of table " + tableName); LOG.info("Performing action: Split random region of table " + tableName);
List<HRegionInfo> regions = admin.getTableRegions(tableNameBytes); List<HRegionInfo> regions = admin.getTableRegions(tableName);
if (regions == null || regions.isEmpty()) { if (regions == null || regions.isEmpty()) {
LOG.info("Table " + tableName + " doesn't have regions to split"); LOG.info("Table " + tableName + " doesn't have regions to split");
return; return;

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.chaos.actions;
import java.util.Random; import java.util.Random;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
@ -39,7 +40,7 @@ public class TruncateTableAction extends Action {
@Override @Override
public void perform() throws Exception { public void perform() throws Exception {
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility(); HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
HBaseAdmin admin = util.getHBaseAdmin(); Admin admin = util.getHBaseAdmin();
boolean preserveSplits = random.nextBoolean(); boolean preserveSplits = random.nextBoolean();
LOG.info("Performing action: Truncate table " + tableName.getNameAsString() + LOG.info("Performing action: Truncate table " + tableName.getNameAsString() +

View File

@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.IntegrationTestingUtility;
import org.apache.hadoop.hbase.IntegrationTests; import org.apache.hadoop.hbase.IntegrationTests;
import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Consistency; import org.apache.hadoop.hbase.client.Consistency;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
@ -196,7 +197,7 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
if (replicaCount == NUM_REPLICA_COUNT_DEFAULT) return; if (replicaCount == NUM_REPLICA_COUNT_DEFAULT) return;
TableName t = TableName.valueOf(getTablename()); TableName t = TableName.valueOf(getTablename());
HBaseAdmin admin = util.getHBaseAdmin(); Admin admin = util.getHBaseAdmin();
HTableDescriptor desc = admin.getTableDescriptor(t); HTableDescriptor desc = admin.getTableDescriptor(t);
desc.addCoprocessor(SlowMeCoproScanOperations.class.getName()); desc.addCoprocessor(SlowMeCoproScanOperations.class.getName());
HBaseTestingUtility.modifyTableSync(admin, desc); HBaseTestingUtility.modifyTableSync(admin, desc);
@ -226,7 +227,7 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
} }
private void setupTable() throws IOException, InterruptedException { private void setupTable() throws IOException, InterruptedException {
if (util.getHBaseAdmin().tableExists(getTablename())) { if (util.getHBaseAdmin().tableExists(TableName.valueOf(getTablename()))) {
util.deleteTable(getTablename()); util.deleteTable(getTablename());
} }

View File

@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.IntegrationTestingUtility;
import org.apache.hadoop.hbase.IntegrationTests; import org.apache.hadoop.hbase.IntegrationTests;
import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.KeyValue.Type;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Scan;
@ -274,7 +275,7 @@ public class IntegrationTestImportTsv implements Configurable, Tool {
} catch (Exception e) { } catch (Exception e) {
throw new IOException("Underlying MapReduce job failed. Aborting commit.", e); throw new IOException("Underlying MapReduce job failed. Aborting commit.", e);
} finally { } finally {
if (util.getHBaseAdmin().tableExists(table)) { if (util.getHBaseAdmin().tableExists(TableName.valueOf(table))) {
util.deleteTable(table); util.deleteTable(table);
} }
} }

View File

@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.IntegrationTestBase;
import org.apache.hadoop.hbase.IntegrationTestingUtility; import org.apache.hadoop.hbase.IntegrationTestingUtility;
import org.apache.hadoop.hbase.IntegrationTests; import org.apache.hadoop.hbase.IntegrationTests;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
@ -382,7 +383,7 @@ public void cleanUpCluster() throws Exception {
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TEST_NAME)); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TEST_NAME));
htd.addFamily(new HColumnDescriptor(TEST_FAMILY)); htd.addFamily(new HColumnDescriptor(TEST_FAMILY));
HBaseAdmin admin = getTestingUtil(getConf()).getHBaseAdmin(); Admin admin = getTestingUtil(getConf()).getHBaseAdmin();
admin.createTable(htd, Bytes.toBytes(0L), Bytes.toBytes(-1L), 40); admin.createTable(htd, Bytes.toBytes(0L), Bytes.toBytes(-1L), 40);
doLoad(getConf(), htd); doLoad(getConf(), htd);

View File

@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.IntegrationTestingUtility;
import org.apache.hadoop.hbase.IntegrationTests; import org.apache.hadoop.hbase.IntegrationTests;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.chaos.factories.MonkeyFactory; import org.apache.hadoop.hbase.chaos.factories.MonkeyFactory;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Consistency; import org.apache.hadoop.hbase.client.Consistency;
import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
@ -156,7 +157,7 @@ public class IntegrationTestTimeBoundedRequestsWithRegionReplicas extends Integr
// flush the table // flush the table
LOG.info("Flushing the table"); LOG.info("Flushing the table");
HBaseAdmin admin = util.getHBaseAdmin(); Admin admin = util.getHBaseAdmin();
admin.flush(getTablename()); admin.flush(getTablename());
// re-open the regions to make sure that the replicas are up to date // re-open the regions to make sure that the replicas are up to date
@ -166,8 +167,8 @@ public class IntegrationTestTimeBoundedRequestsWithRegionReplicas extends Integr
Threads.sleep(refreshTime); Threads.sleep(refreshTime);
} else { } else {
LOG.info("Reopening the table"); LOG.info("Reopening the table");
admin.disableTable(getTablename()); admin.disableTable(TableName.valueOf(getTablename()));
admin.enableTable(getTablename()); admin.enableTable(TableName.valueOf(getTablename()));
} }
// We should only start the ChaosMonkey after the readers are started and have cached // We should only start the ChaosMonkey after the readers are started and have cached

View File

@ -25,6 +25,8 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.IntegrationTestingUtility; import org.apache.hadoop.hbase.IntegrationTestingUtility;
import org.apache.hadoop.hbase.IntegrationTests; import org.apache.hadoop.hbase.IntegrationTests;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
@ -58,11 +60,11 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
public static final String TABLE_NAME_DEFAULT = "SendTracesTable"; public static final String TABLE_NAME_DEFAULT = "SendTracesTable";
public static final String COLUMN_FAMILY_DEFAULT = "D"; public static final String COLUMN_FAMILY_DEFAULT = "D";
private String tableName = TABLE_NAME_DEFAULT; private TableName tableName = TableName.valueOf(TABLE_NAME_DEFAULT);
private String familyName = COLUMN_FAMILY_DEFAULT; private byte[] familyName = Bytes.toBytes(COLUMN_FAMILY_DEFAULT);
private IntegrationTestingUtility util; private IntegrationTestingUtility util;
private Random random = new Random(); private Random random = new Random();
private HBaseAdmin admin; private Admin admin;
private SpanReceiverHost receiverHost; private SpanReceiverHost receiverHost;
public static void main(String[] args) throws Exception { public static void main(String[] args) throws Exception {
@ -83,8 +85,8 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
String tableNameString = cmd.getOptionValue(TABLE_ARG, TABLE_NAME_DEFAULT); String tableNameString = cmd.getOptionValue(TABLE_ARG, TABLE_NAME_DEFAULT);
String familyString = cmd.getOptionValue(CF_ARG, COLUMN_FAMILY_DEFAULT); String familyString = cmd.getOptionValue(CF_ARG, COLUMN_FAMILY_DEFAULT);
this.tableName = tableNameString; this.tableName = TableName.valueOf(tableNameString);
this.familyName = familyString; this.familyName = Bytes.toBytes(familyString);
} }
@Override @Override
@ -248,20 +250,18 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
Put p = new Put(Bytes.toBytes(rk)); Put p = new Put(Bytes.toBytes(rk));
for (int y = 0; y < 10; y++) { for (int y = 0; y < 10; y++) {
random.nextBytes(value); random.nextBytes(value);
p.add(Bytes.toBytes(familyName), p.add(familyName, Bytes.toBytes(random.nextLong()), value);
Bytes.toBytes(random.nextLong()),
value);
} }
ht.put(p); ht.put(p);
} }
if ((x % 1000) == 0) { if ((x % 1000) == 0) {
admin.flush(Bytes.toBytes(tableName)); admin.flush(tableName.toBytes());
} }
} finally { } finally {
traceScope.close(); traceScope.close();
} }
} }
admin.flush(Bytes.toBytes(tableName)); admin.flush(tableName.toBytes());
return rowKeys; return rowKeys;
} }

View File

@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotEnabledException; import org.apache.hadoop.hbase.TableNotEnabledException;
import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
@ -453,7 +454,7 @@ public final class Canary implements Tool {
* Canary entry point for specified table. * Canary entry point for specified table.
* @throws Exception * @throws Exception
*/ */
public static void sniff(final HBaseAdmin admin, TableName tableName) throws Exception { public static void sniff(final Admin admin, TableName tableName) throws Exception {
sniff(admin, new StdOutSink(), tableName.getNameAsString()); sniff(admin, new StdOutSink(), tableName.getNameAsString());
} }
@ -461,10 +462,10 @@ public final class Canary implements Tool {
* Canary entry point for specified table. * Canary entry point for specified table.
* @throws Exception * @throws Exception
*/ */
private static void sniff(final HBaseAdmin admin, final Sink sink, String tableName) private static void sniff(final Admin admin, final Sink sink, String tableName)
throws Exception { throws Exception {
if (admin.isTableAvailable(tableName)) { if (admin.isTableAvailable(TableName.valueOf(tableName))) {
sniff(admin, sink, admin.getTableDescriptor(tableName.getBytes())); sniff(admin, sink, admin.getTableDescriptor(TableName.valueOf(tableName)));
} else { } else {
LOG.warn(String.format("Table %s is not available", tableName)); LOG.warn(String.format("Table %s is not available", tableName));
} }
@ -473,7 +474,7 @@ public final class Canary implements Tool {
/* /*
* Loops over regions that owns this table, and output some information abouts the state. * Loops over regions that owns this table, and output some information abouts the state.
*/ */
private static void sniff(final HBaseAdmin admin, final Sink sink, HTableDescriptor tableDesc) private static void sniff(final Admin admin, final Sink sink, HTableDescriptor tableDesc)
throws Exception { throws Exception {
HTable table = null; HTable table = null;
@ -484,7 +485,7 @@ public final class Canary implements Tool {
} }
try { try {
for (HRegionInfo region : admin.getTableRegions(tableDesc.getName())) { for (HRegionInfo region : admin.getTableRegions(tableDesc.getTableName())) {
try { try {
sniffRegion(admin, sink, region, table); sniffRegion(admin, sink, region, table);
} catch (Exception e) { } catch (Exception e) {
@ -502,7 +503,7 @@ public final class Canary implements Tool {
* failure. * failure.
*/ */
private static void sniffRegion( private static void sniffRegion(
final HBaseAdmin admin, final Admin admin,
final Sink sink, final Sink sink,
HRegionInfo region, HRegionInfo region,
HTable table) throws Exception { HTable table) throws Exception {

View File

@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
@ -143,7 +144,7 @@ public class HBaseFsckRepair {
* Contacts a region server and waits up to hbase.hbck.close.timeout ms * Contacts a region server and waits up to hbase.hbck.close.timeout ms
* (default 120s) to close the region. This bypasses the active hmaster. * (default 120s) to close the region. This bypasses the active hmaster.
*/ */
public static void closeRegionSilentlyAndWait(HBaseAdmin admin, public static void closeRegionSilentlyAndWait(Admin admin,
ServerName server, HRegionInfo region) throws IOException, InterruptedException { ServerName server, HRegionInfo region) throws IOException, InterruptedException {
HConnection connection = admin.getConnection(); HConnection connection = admin.getConnection();
AdminService.BlockingInterface rs = connection.getAdmin(server); AdminService.BlockingInterface rs = connection.getAdmin(server);

View File

@ -54,11 +54,13 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Waiter.Predicate; import org.apache.hadoop.hbase.Waiter.Predicate;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
@ -1468,7 +1470,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
/** /**
* Modify a table, synchronous. Waiting logic similar to that of {@code admin.rb#alter_status}. * Modify a table, synchronous. Waiting logic similar to that of {@code admin.rb#alter_status}.
*/ */
public static void modifyTableSync(HBaseAdmin admin, HTableDescriptor desc) public static void modifyTableSync(Admin admin, HTableDescriptor desc)
throws IOException, InterruptedException { throws IOException, InterruptedException {
admin.modifyTable(desc.getTableName(), desc); admin.modifyTable(desc.getTableName(), desc);
Pair<Integer, Integer> status = new Pair<Integer, Integer>() {{ Pair<Integer, Integer> status = new Pair<Integer, Integer>() {{
@ -1494,7 +1496,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
/** /**
* Set the number of Region replicas. * Set the number of Region replicas.
*/ */
public static void setReplicas(HBaseAdmin admin, TableName table, int replicaCount) public static void setReplicas(Admin admin, TableName table, int replicaCount)
throws IOException, InterruptedException { throws IOException, InterruptedException {
admin.disableTable(table); admin.disableTable(table);
HTableDescriptor desc = admin.getTableDescriptor(table); HTableDescriptor desc = admin.getTableDescriptor(table);
@ -2041,8 +2043,8 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
HConnection conn = table.getConnection(); HConnection conn = table.getConnection();
conn.clearRegionCache(); conn.clearRegionCache();
// assign all the new regions IF table is enabled. // assign all the new regions IF table is enabled.
HBaseAdmin admin = getHBaseAdmin(); Admin admin = getHBaseAdmin();
if (admin.isTableEnabled(table.getTableName())) { if (admin.isTableEnabled(table.getName())) {
for(HRegionInfo hri : newRegions) { for(HRegionInfo hri : newRegions) {
admin.assign(hri.getRegionName()); admin.assign(hri.getRegionName());
} }
@ -2469,15 +2471,15 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
} }
/** /**
* Returns a HBaseAdmin instance. * Returns a Admin instance.
* This instance is shared between HBaseTestingUtility instance users. * This instance is shared between HBaseTestingUtility instance users.
* Closing it has no effect, it will be closed automatically when the * Closing it has no effect, it will be closed automatically when the
* cluster shutdowns * cluster shutdowns
* *
* @return The HBaseAdmin instance. * @return An Admin instance.
* @throws IOException * @throws IOException
*/ */
public synchronized HBaseAdmin getHBaseAdmin() public synchronized Admin getHBaseAdmin()
throws IOException { throws IOException {
if (hbaseAdmin == null){ if (hbaseAdmin == null){
hbaseAdmin = new HBaseAdminForTests(getConfiguration()); hbaseAdmin = new HBaseAdminForTests(getConfiguration());
@ -2648,7 +2650,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
waitTableAvailable(getHBaseAdmin(), table, 30000); waitTableAvailable(getHBaseAdmin(), table, 30000);
} }
public void waitTableAvailable(HBaseAdmin admin, byte[] table) public void waitTableAvailable(Admin admin, byte[] table)
throws InterruptedException, IOException { throws InterruptedException, IOException {
waitTableAvailable(admin, table, 30000); waitTableAvailable(admin, table, 30000);
} }
@ -2665,10 +2667,10 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
waitTableAvailable(getHBaseAdmin(), table, timeoutMillis); waitTableAvailable(getHBaseAdmin(), table, timeoutMillis);
} }
public void waitTableAvailable(HBaseAdmin admin, byte[] table, long timeoutMillis) public void waitTableAvailable(Admin admin, byte[] table, long timeoutMillis)
throws InterruptedException, IOException { throws InterruptedException, IOException {
long startWait = System.currentTimeMillis(); long startWait = System.currentTimeMillis();
while (!admin.isTableAvailable(table)) { while (!admin.isTableAvailable(TableName.valueOf(table))) {
assertTrue("Timed out waiting for table to become available " + assertTrue("Timed out waiting for table to become available " +
Bytes.toStringBinary(table), Bytes.toStringBinary(table),
System.currentTimeMillis() - startWait < timeoutMillis); System.currentTimeMillis() - startWait < timeoutMillis);
@ -2690,7 +2692,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
waitTableEnabled(getHBaseAdmin(), table, 30000); waitTableEnabled(getHBaseAdmin(), table, 30000);
} }
public void waitTableEnabled(HBaseAdmin admin, byte[] table) public void waitTableEnabled(Admin admin, byte[] table)
throws InterruptedException, IOException { throws InterruptedException, IOException {
waitTableEnabled(admin, table, 30000); waitTableEnabled(admin, table, 30000);
} }
@ -2709,12 +2711,13 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
waitTableEnabled(getHBaseAdmin(), table, timeoutMillis); waitTableEnabled(getHBaseAdmin(), table, timeoutMillis);
} }
public void waitTableEnabled(HBaseAdmin admin, byte[] table, long timeoutMillis) public void waitTableEnabled(Admin admin, byte[] table, long timeoutMillis)
throws InterruptedException, IOException { throws InterruptedException, IOException {
TableName tableName = TableName.valueOf(table);
long startWait = System.currentTimeMillis(); long startWait = System.currentTimeMillis();
waitTableAvailable(admin, table, timeoutMillis); waitTableAvailable(admin, table, timeoutMillis);
long remainder = System.currentTimeMillis() - startWait; long remainder = System.currentTimeMillis() - startWait;
while (!admin.isTableEnabled(table)) { while (!admin.isTableEnabled(tableName)) {
assertTrue("Timed out waiting for table to become available and enabled " + assertTrue("Timed out waiting for table to become available and enabled " +
Bytes.toStringBinary(table), Bytes.toStringBinary(table),
System.currentTimeMillis() - remainder < timeoutMillis); System.currentTimeMillis() - remainder < timeoutMillis);
@ -2726,7 +2729,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
// Below we do a get. The get will retry if a NotServeringRegionException or a // Below we do a get. The get will retry if a NotServeringRegionException or a
// RegionOpeningException. It is crass but when done all will be online. // RegionOpeningException. It is crass but when done all will be online.
try { try {
Canary.sniff(admin, TableName.valueOf(table)); Canary.sniff(admin, tableName);
} catch (Exception e) { } catch (Exception e) {
throw new IOException(e); throw new IOException(e);
} }
@ -3276,7 +3279,9 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
} }
int totalNumberOfRegions = 0; int totalNumberOfRegions = 0;
HBaseAdmin admin = new HBaseAdmin(conf); HConnection unmanagedConnection = HConnectionManager.createConnection(conf);
Admin admin = unmanagedConnection.getAdmin();
try { try {
// create a table a pre-splits regions. // create a table a pre-splits regions.
// The number of splits is set as: // The number of splits is set as:
@ -3303,6 +3308,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
" already exists, continuing"); " already exists, continuing");
} finally { } finally {
admin.close(); admin.close();
unmanagedConnection.close();
} }
return totalNumberOfRegions; return totalNumberOfRegions;
} }

View File

@ -47,6 +47,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured; import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Consistency; import org.apache.hadoop.hbase.client.Consistency;
import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Get;
@ -272,7 +273,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
* {@code opts.presplitRegions} is specified or when the existing table's * {@code opts.presplitRegions} is specified or when the existing table's
* region replica count doesn't match {@code opts.replicas}. * region replica count doesn't match {@code opts.replicas}.
*/ */
static boolean checkTable(HBaseAdmin admin, TestOptions opts) throws IOException { static boolean checkTable(Admin admin, TestOptions opts) throws IOException {
TableName tableName = TableName.valueOf(opts.tableName); TableName tableName = TableName.valueOf(opts.tableName);
boolean needsDelete = false, exists = admin.tableExists(tableName); boolean needsDelete = false, exists = admin.tableExists(tableName);
boolean isReadCmd = opts.cmdName.toLowerCase().contains("read") boolean isReadCmd = opts.cmdName.toLowerCase().contains("read")

View File

@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
@ -76,7 +77,7 @@ public class TestHColumnDescriptorDefaultVersions {
@Test @Test
public void testCreateTableWithDefault() throws IOException { public void testCreateTableWithDefault() throws IOException {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
// Create a table with one family // Create a table with one family
HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME); HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME);
HColumnDescriptor hcd = new HColumnDescriptor(FAMILY); HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
@ -97,7 +98,7 @@ public class TestHColumnDescriptorDefaultVersions {
TEST_UTIL.getConfiguration().setInt("hbase.column.max.version", 3); TEST_UTIL.getConfiguration().setInt("hbase.column.max.version", 3);
TEST_UTIL.startMiniCluster(1); TEST_UTIL.startMiniCluster(1);
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
// Create a table with one family // Create a table with one family
HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME); HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME);
HColumnDescriptor hcd = new HColumnDescriptor(FAMILY); HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
@ -119,7 +120,7 @@ public class TestHColumnDescriptorDefaultVersions {
TEST_UTIL.getConfiguration().setInt("hbase.column.max.version", 3); TEST_UTIL.getConfiguration().setInt("hbase.column.max.version", 3);
TEST_UTIL.startMiniCluster(1); TEST_UTIL.startMiniCluster(1);
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
// Create a table with one family // Create a table with one family
HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME); HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME);
HColumnDescriptor hcd = HColumnDescriptor hcd =
@ -140,7 +141,7 @@ public class TestHColumnDescriptorDefaultVersions {
private void verifyHColumnDescriptor(int expected, final TableName tableName, private void verifyHColumnDescriptor(int expected, final TableName tableName,
final byte[]... families) throws IOException { final byte[]... families) throws IOException {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
// Verify descriptor from master // Verify descriptor from master
HTableDescriptor htd = admin.getTableDescriptor(tableName); HTableDescriptor htd = admin.getTableDescriptor(tableName);

View File

@ -29,6 +29,7 @@ import java.util.Random;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnection;
@ -209,7 +210,7 @@ public class TestMetaTableAccessor {
assertFalse(MetaTableAccessor.tableExists(hConnection, name)); assertFalse(MetaTableAccessor.tableExists(hConnection, name));
UTIL.createTable(name, HConstants.CATALOG_FAMILY); UTIL.createTable(name, HConstants.CATALOG_FAMILY);
assertTrue(MetaTableAccessor.tableExists(hConnection, name)); assertTrue(MetaTableAccessor.tableExists(hConnection, name));
HBaseAdmin admin = UTIL.getHBaseAdmin(); Admin admin = UTIL.getHBaseAdmin();
admin.disableTable(name); admin.disableTable(name);
admin.deleteTable(name); admin.deleteTable(name);
assertFalse(MetaTableAccessor.tableExists(hConnection, name)); assertFalse(MetaTableAccessor.tableExists(hConnection, name));

View File

@ -33,6 +33,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
@ -58,7 +59,7 @@ public class TestNamespace {
private static HMaster master; private static HMaster master;
protected final static int NUM_SLAVES_BASE = 4; protected final static int NUM_SLAVES_BASE = 4;
private static HBaseTestingUtility TEST_UTIL; private static HBaseTestingUtility TEST_UTIL;
protected static HBaseAdmin admin; protected static Admin admin;
protected static HBaseCluster cluster; protected static HBaseCluster cluster;
private static ZKNamespaceManager zkNamespaceManager; private static ZKNamespaceManager zkNamespaceManager;
private String prefix = "TestNamespace"; private String prefix = "TestNamespace";
@ -198,8 +199,8 @@ public class TestNamespace {
String nsName = prefix+"_"+testName; String nsName = prefix+"_"+testName;
LOG.info(testName); LOG.info(testName);
byte[] tableName = Bytes.toBytes("my_table"); TableName tableName = TableName.valueOf("my_table");
byte[] tableNameFoo = Bytes.toBytes(nsName+":my_table"); TableName tableNameFoo = TableName.valueOf(nsName+":my_table");
//create namespace and verify //create namespace and verify
admin.createNamespace(NamespaceDescriptor.create(nsName).build()); admin.createNamespace(NamespaceDescriptor.create(nsName).build());
TEST_UTIL.createTable(tableName, Bytes.toBytes(nsName)); TEST_UTIL.createTable(tableName, Bytes.toBytes(nsName));
@ -276,13 +277,13 @@ public class TestNamespace {
@Test @Test
public void createTableInSystemNamespace() throws Exception { public void createTableInSystemNamespace() throws Exception {
String tableName = "hbase:createTableInSystemNamespace"; TableName tableName = TableName.valueOf("hbase:createTableInSystemNamespace");
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); HTableDescriptor desc = new HTableDescriptor(tableName);
HColumnDescriptor colDesc = new HColumnDescriptor("cf1"); HColumnDescriptor colDesc = new HColumnDescriptor("cf1");
desc.addFamily(colDesc); desc.addFamily(colDesc);
admin.createTable(desc); admin.createTable(desc);
assertEquals(0, admin.listTables().length); assertEquals(0, admin.listTables().length);
assertTrue(admin.tableExists(Bytes.toBytes(tableName))); assertTrue(admin.tableExists(tableName));
admin.disableTable(desc.getTableName()); admin.disableTable(desc.getTableName());
admin.deleteTable(desc.getTableName()); admin.deleteTable(desc.getTableName());
} }

View File

@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
@ -117,7 +118,7 @@ public class TestHFileArchiving {
TableName.valueOf("testRemovesRegionDirOnArchive"); TableName.valueOf("testRemovesRegionDirOnArchive");
UTIL.createTable(TABLE_NAME, TEST_FAM); UTIL.createTable(TABLE_NAME, TEST_FAM);
final HBaseAdmin admin = UTIL.getHBaseAdmin(); final Admin admin = UTIL.getHBaseAdmin();
// get the current store files for the region // get the current store files for the region
List<HRegion> servingRegions = UTIL.getHBaseCluster().getRegions(TABLE_NAME); List<HRegion> servingRegions = UTIL.getHBaseCluster().getRegions(TABLE_NAME);

View File

@ -88,9 +88,6 @@ public class TestAdmin {
final Log LOG = LogFactory.getLog(getClass()); final Log LOG = LogFactory.getLog(getClass());
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private Admin admin; private Admin admin;
// We use actual HBaseAdmin instance instead of going via Admin interface in
// here because makes use of an internal HBA method (TODO: Fix.).
private HBaseAdmin rawAdminInstance;
@BeforeClass @BeforeClass
public static void setUpBeforeClass() throws Exception { public static void setUpBeforeClass() throws Exception {
@ -110,7 +107,7 @@ public class TestAdmin {
@Before @Before
public void setUp() throws Exception { public void setUp() throws Exception {
this.admin = this.rawAdminInstance = TEST_UTIL.getHBaseAdmin(); this.admin = TEST_UTIL.getHBaseAdmin();
} }
@After @After
@ -151,11 +148,12 @@ public class TestAdmin {
@Test (timeout=300000) @Test (timeout=300000)
public void testDeleteEditUnknownColumnFamilyAndOrTable() throws IOException { public void testDeleteEditUnknownColumnFamilyAndOrTable() throws IOException {
// Test we get exception if we try to // Test we get exception if we try to
final String nonexistent = "nonexistent"; final TableName nonexistentTable = TableName.valueOf("nonexistent");
HColumnDescriptor nonexistentHcd = new HColumnDescriptor(nonexistent); final byte[] nonexistentColumn = Bytes.toBytes("nonexistent");
HColumnDescriptor nonexistentHcd = new HColumnDescriptor(nonexistentColumn);
Exception exception = null; Exception exception = null;
try { try {
this.admin.addColumn(TableName.valueOf(nonexistent), nonexistentHcd); this.admin.addColumn(nonexistentTable, nonexistentHcd);
} catch (IOException e) { } catch (IOException e) {
exception = e; exception = e;
} }
@ -163,7 +161,7 @@ public class TestAdmin {
exception = null; exception = null;
try { try {
this.admin.deleteTable(TableName.valueOf(nonexistent)); this.admin.deleteTable(nonexistentTable);
} catch (IOException e) { } catch (IOException e) {
exception = e; exception = e;
} }
@ -171,7 +169,7 @@ public class TestAdmin {
exception = null; exception = null;
try { try {
this.admin.deleteColumn(TableName.valueOf(nonexistent), Bytes.toBytes(nonexistent)); this.admin.deleteColumn(nonexistentTable, nonexistentColumn);
} catch (IOException e) { } catch (IOException e) {
exception = e; exception = e;
} }
@ -179,7 +177,7 @@ public class TestAdmin {
exception = null; exception = null;
try { try {
this.admin.disableTable(TableName.valueOf(nonexistent)); this.admin.disableTable(nonexistentTable);
} catch (IOException e) { } catch (IOException e) {
exception = e; exception = e;
} }
@ -187,7 +185,7 @@ public class TestAdmin {
exception = null; exception = null;
try { try {
this.admin.enableTable(TableName.valueOf(nonexistent)); this.admin.enableTable(nonexistentTable);
} catch (IOException e) { } catch (IOException e) {
exception = e; exception = e;
} }
@ -195,7 +193,7 @@ public class TestAdmin {
exception = null; exception = null;
try { try {
this.admin.modifyColumn(TableName.valueOf(nonexistent), nonexistentHcd); this.admin.modifyColumn(nonexistentTable, nonexistentHcd);
} catch (IOException e) { } catch (IOException e) {
exception = e; exception = e;
} }
@ -203,7 +201,7 @@ public class TestAdmin {
exception = null; exception = null;
try { try {
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(nonexistent)); HTableDescriptor htd = new HTableDescriptor(nonexistentTable);
htd.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); htd.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
this.admin.modifyTable(htd.getTableName(), htd); this.admin.modifyTable(htd.getTableName(), htd);
} catch (IOException e) { } catch (IOException e) {
@ -1759,6 +1757,10 @@ public class TestAdmin {
@Test (timeout=300000) @Test (timeout=300000)
public void testGetRegion() throws Exception { public void testGetRegion() throws Exception {
// We use actual HBaseAdmin instance instead of going via Admin interface in
// here because makes use of an internal HBA method (TODO: Fix.).
HBaseAdmin rawAdmin = new HBaseAdmin(TEST_UTIL.getConfiguration());
final String name = "testGetRegion"; final String name = "testGetRegion";
LOG.info("Started " + name); LOG.info("Started " + name);
final byte [] nameBytes = Bytes.toBytes(name); final byte [] nameBytes = Bytes.toBytes(name);
@ -1768,9 +1770,9 @@ public class TestAdmin {
HRegionLocation regionLocation = t.getRegionLocation("mmm"); HRegionLocation regionLocation = t.getRegionLocation("mmm");
HRegionInfo region = regionLocation.getRegionInfo(); HRegionInfo region = regionLocation.getRegionInfo();
byte[] regionName = region.getRegionName(); byte[] regionName = region.getRegionName();
Pair<HRegionInfo, ServerName> pair = rawAdminInstance.getRegion(regionName); Pair<HRegionInfo, ServerName> pair = rawAdmin.getRegion(regionName);
assertTrue(Bytes.equals(regionName, pair.getFirst().getRegionName())); assertTrue(Bytes.equals(regionName, pair.getFirst().getRegionName()));
pair = rawAdminInstance.getRegion(region.getEncodedNameAsBytes()); pair = rawAdmin.getRegion(region.getEncodedNameAsBytes());
assertTrue(Bytes.equals(regionName, pair.getFirst().getRegionName())); assertTrue(Bytes.equals(regionName, pair.getFirst().getRegionName()));
} }
} }

View File

@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver; import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.ObserverContext;
@ -50,7 +51,7 @@ public class TestClientOperationInterrupt {
private static final Log LOG = LogFactory.getLog(TestClientOperationInterrupt.class); private static final Log LOG = LogFactory.getLog(TestClientOperationInterrupt.class);
private static HBaseTestingUtility util; private static HBaseTestingUtility util;
private static final byte[] tableName = Bytes.toBytes("test"); private static final TableName tableName = TableName.valueOf("test");
private static final byte[] dummy = Bytes.toBytes("dummy"); private static final byte[] dummy = Bytes.toBytes("dummy");
private static final byte[] row1 = Bytes.toBytes("r1"); private static final byte[] row1 = Bytes.toBytes("r1");
private static final byte[] test = Bytes.toBytes("test"); private static final byte[] test = Bytes.toBytes("test");
@ -73,7 +74,7 @@ public class TestClientOperationInterrupt {
util = new HBaseTestingUtility(conf); util = new HBaseTestingUtility(conf);
util.startMiniCluster(); util.startMiniCluster();
HBaseAdmin admin = util.getHBaseAdmin(); Admin admin = util.getHBaseAdmin();
if (admin.tableExists(tableName)) { if (admin.tableExists(tableName)) {
if (admin.isTableEnabled(tableName)) { if (admin.isTableEnabled(tableName)) {
admin.disableTable(tableName); admin.disableTable(tableName);

View File

@ -62,7 +62,7 @@ public class TestCloneSnapshotFromClient {
private int snapshot0Rows; private int snapshot0Rows;
private int snapshot1Rows; private int snapshot1Rows;
private TableName tableName; private TableName tableName;
private HBaseAdmin admin; private Admin admin;
@BeforeClass @BeforeClass
public static void setUpBeforeClass() throws Exception { public static void setUpBeforeClass() throws Exception {
@ -148,7 +148,7 @@ public class TestCloneSnapshotFromClient {
@Test(expected=SnapshotDoesNotExistException.class) @Test(expected=SnapshotDoesNotExistException.class)
public void testCloneNonExistentSnapshot() throws IOException, InterruptedException { public void testCloneNonExistentSnapshot() throws IOException, InterruptedException {
String snapshotName = "random-snapshot-" + System.currentTimeMillis(); String snapshotName = "random-snapshot-" + System.currentTimeMillis();
String tableName = "random-table-" + System.currentTimeMillis(); TableName tableName = TableName.valueOf("random-table-" + System.currentTimeMillis());
admin.cloneSnapshot(snapshotName, tableName); admin.cloneSnapshot(snapshotName, tableName);
} }

View File

@ -5450,7 +5450,7 @@ public class TestFromClientSide {
} }
private void checkTableIsLegal(HTableDescriptor htd) throws IOException { private void checkTableIsLegal(HTableDescriptor htd) throws IOException {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
admin.createTable(htd); admin.createTable(htd);
assertTrue(admin.tableExists(htd.getTableName())); assertTrue(admin.tableExists(htd.getTableName()));
admin.disableTable(htd.getTableName()); admin.disableTable(htd.getTableName());
@ -5458,7 +5458,7 @@ public class TestFromClientSide {
} }
private void checkTableIsIllegal(HTableDescriptor htd) throws IOException { private void checkTableIsIllegal(HTableDescriptor htd) throws IOException {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
try { try {
admin.createTable(htd); admin.createTable(htd);
fail(); fail();

View File

@ -405,7 +405,7 @@ public class TestFromClientSide3 {
@Test @Test
public void testGetEmptyRow() throws Exception { public void testGetEmptyRow() throws Exception {
//Create a table and put in 1 row //Create a table and put in 1 row
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(Bytes.toBytes("test"))); HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(Bytes.toBytes("test")));
desc.addFamily(new HColumnDescriptor(FAMILY)); desc.addFamily(new HColumnDescriptor(FAMILY));
admin.createTable(desc); admin.createTable(desc);

View File

@ -66,7 +66,7 @@ public class TestRestoreSnapshotFromClient {
private int snapshot0Rows; private int snapshot0Rows;
private int snapshot1Rows; private int snapshot1Rows;
private TableName tableName; private TableName tableName;
private HBaseAdmin admin; private Admin admin;
@BeforeClass @BeforeClass
public static void setUpBeforeClass() throws Exception { public static void setUpBeforeClass() throws Exception {

View File

@ -189,7 +189,7 @@ public class TestSnapshotCloneIndependence {
FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem(); FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
HBaseAdmin admin = UTIL.getHBaseAdmin(); Admin admin = UTIL.getHBaseAdmin();
final long startTime = System.currentTimeMillis(); final long startTime = System.currentTimeMillis();
final TableName localTableName = final TableName localTableName =
TableName.valueOf(STRING_TABLE_NAME + startTime); TableName.valueOf(STRING_TABLE_NAME + startTime);
@ -210,7 +210,7 @@ public class TestSnapshotCloneIndependence {
if (!online) { if (!online) {
admin.enableTable(localTableName); admin.enableTable(localTableName);
} }
byte[] cloneTableName = Bytes.toBytes("test-clone-" + localTableName); TableName cloneTableName = TableName.valueOf("test-clone-" + localTableName);
admin.cloneSnapshot(snapshotName, cloneTableName); admin.cloneSnapshot(snapshotName, cloneTableName);
HTable clonedTable = new HTable(UTIL.getConfiguration(), cloneTableName); HTable clonedTable = new HTable(UTIL.getConfiguration(), cloneTableName);
@ -267,7 +267,7 @@ public class TestSnapshotCloneIndependence {
Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
// Create a table // Create a table
HBaseAdmin admin = UTIL.getHBaseAdmin(); Admin admin = UTIL.getHBaseAdmin();
final long startTime = System.currentTimeMillis(); final long startTime = System.currentTimeMillis();
final TableName localTableName = final TableName localTableName =
TableName.valueOf(STRING_TABLE_NAME + startTime); TableName.valueOf(STRING_TABLE_NAME + startTime);
@ -286,7 +286,7 @@ public class TestSnapshotCloneIndependence {
admin.enableTable(localTableName); admin.enableTable(localTableName);
} }
byte[] cloneTableName = Bytes.toBytes("test-clone-" + localTableName); TableName cloneTableName = TableName.valueOf("test-clone-" + localTableName);
// Clone the snapshot // Clone the snapshot
byte[] snapshotName = Bytes.toBytes(snapshotNameAsString); byte[] snapshotName = Bytes.toBytes(snapshotNameAsString);
@ -323,7 +323,7 @@ public class TestSnapshotCloneIndependence {
Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
// Create a table // Create a table
HBaseAdmin admin = UTIL.getHBaseAdmin(); Admin admin = UTIL.getHBaseAdmin();
final long startTime = System.currentTimeMillis(); final long startTime = System.currentTimeMillis();
final TableName localTableName = final TableName localTableName =
TableName.valueOf(STRING_TABLE_NAME + startTime); TableName.valueOf(STRING_TABLE_NAME + startTime);
@ -339,7 +339,7 @@ public class TestSnapshotCloneIndependence {
if (!online) { if (!online) {
admin.enableTable(localTableName); admin.enableTable(localTableName);
} }
byte[] cloneTableName = Bytes.toBytes("test-clone-" + localTableName); TableName cloneTableName = TableName.valueOf("test-clone-" + localTableName);
// Clone the snapshot // Clone the snapshot
byte[] snapshotName = Bytes.toBytes(snapshotNameAsString); byte[] snapshotName = Bytes.toBytes(snapshotNameAsString);

View File

@ -125,7 +125,7 @@ public class TestSnapshotFromClient {
*/ */
@Test (timeout=300000) @Test (timeout=300000)
public void testMetaTablesSnapshot() throws Exception { public void testMetaTablesSnapshot() throws Exception {
HBaseAdmin admin = UTIL.getHBaseAdmin(); Admin admin = UTIL.getHBaseAdmin();
byte[] snapshotName = Bytes.toBytes("metaSnapshot"); byte[] snapshotName = Bytes.toBytes("metaSnapshot");
try { try {
@ -143,7 +143,7 @@ public class TestSnapshotFromClient {
*/ */
@Test (timeout=300000) @Test (timeout=300000)
public void testSnapshotDeletionWithRegex() throws Exception { public void testSnapshotDeletionWithRegex() throws Exception {
HBaseAdmin admin = UTIL.getHBaseAdmin(); Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots // make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin); SnapshotTestingUtils.assertNoSnapshots(admin);
@ -179,7 +179,7 @@ public class TestSnapshotFromClient {
*/ */
@Test (timeout=300000) @Test (timeout=300000)
public void testOfflineTableSnapshot() throws Exception { public void testOfflineTableSnapshot() throws Exception {
HBaseAdmin admin = UTIL.getHBaseAdmin(); Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots // make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin); SnapshotTestingUtils.assertNoSnapshots(admin);
@ -232,7 +232,7 @@ public class TestSnapshotFromClient {
@Test (timeout=300000) @Test (timeout=300000)
public void testSnapshotFailsOnNonExistantTable() throws Exception { public void testSnapshotFailsOnNonExistantTable() throws Exception {
HBaseAdmin admin = UTIL.getHBaseAdmin(); Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots // make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin); SnapshotTestingUtils.assertNoSnapshots(admin);
String tableName = "_not_a_table"; String tableName = "_not_a_table";
@ -241,7 +241,7 @@ public class TestSnapshotFromClient {
boolean fail = false; boolean fail = false;
do { do {
try { try {
admin.getTableDescriptor(Bytes.toBytes(tableName)); admin.getTableDescriptor(TableName.valueOf(tableName));
fail = true; fail = true;
LOG.error("Table:" + tableName + " already exists, checking a new name"); LOG.error("Table:" + tableName + " already exists, checking a new name");
tableName = tableName+"!"; tableName = tableName+"!";
@ -252,7 +252,7 @@ public class TestSnapshotFromClient {
// snapshot the non-existant table // snapshot the non-existant table
try { try {
admin.snapshot("fail", tableName); admin.snapshot("fail", TableName.valueOf(tableName));
fail("Snapshot succeeded even though there is not table."); fail("Snapshot succeeded even though there is not table.");
} catch (SnapshotCreationException e) { } catch (SnapshotCreationException e) {
LOG.info("Correctly failed to snapshot a non-existant table:" + e.getMessage()); LOG.info("Correctly failed to snapshot a non-existant table:" + e.getMessage());
@ -263,7 +263,7 @@ public class TestSnapshotFromClient {
public void testOfflineTableSnapshotWithEmptyRegions() throws Exception { public void testOfflineTableSnapshotWithEmptyRegions() throws Exception {
// test with an empty table with one region // test with an empty table with one region
HBaseAdmin admin = UTIL.getHBaseAdmin(); Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots // make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin); SnapshotTestingUtils.assertNoSnapshots(admin);

View File

@ -83,7 +83,7 @@ public class TestSnapshotMetadata {
private static final int BLOCK_SIZE = 98; private static final int BLOCK_SIZE = 98;
private static final int MAX_VERSIONS = 8; private static final int MAX_VERSIONS = 8;
private HBaseAdmin admin; private Admin admin;
private String originalTableDescription; private String originalTableDescription;
private HTableDescriptor originalTableDescriptor; private HTableDescriptor originalTableDescriptor;
TableName originalTableName; TableName originalTableName;
@ -185,7 +185,7 @@ public class TestSnapshotMetadata {
public void testDescribeMatchesAfterClone() throws Exception { public void testDescribeMatchesAfterClone() throws Exception {
// Clone the original table // Clone the original table
final String clonedTableNameAsString = "clone" + originalTableName; final String clonedTableNameAsString = "clone" + originalTableName;
final byte[] clonedTableName = Bytes.toBytes(clonedTableNameAsString); final TableName clonedTableName = TableName.valueOf(clonedTableNameAsString);
final String snapshotNameAsString = "snapshot" + originalTableName final String snapshotNameAsString = "snapshot" + originalTableName
+ System.currentTimeMillis(); + System.currentTimeMillis();
final byte[] snapshotName = Bytes.toBytes(snapshotNameAsString); final byte[] snapshotName = Bytes.toBytes(snapshotNameAsString);

View File

@ -88,7 +88,7 @@ public class TestTableSnapshotScanner {
} else { } else {
util.createTable(tableName, FAMILIES); util.createTable(tableName, FAMILIES);
} }
HBaseAdmin admin = util.getHBaseAdmin(); Admin admin = util.getHBaseAdmin();
// put some stuff in the table // put some stuff in the table
HTable table = new HTable(util.getConfiguration(), tableName); HTable table = new HTable(util.getConfiguration(), tableName);

View File

@ -51,7 +51,7 @@ public class TestConstraint {
.getLog(TestConstraint.class); .getLog(TestConstraint.class);
private static HBaseTestingUtility util; private static HBaseTestingUtility util;
private static final byte[] tableName = Bytes.toBytes("test"); private static final TableName tableName = TableName.valueOf("test");
private static final byte[] dummy = Bytes.toBytes("dummy"); private static final byte[] dummy = Bytes.toBytes("dummy");
private static final byte[] row1 = Bytes.toBytes("r1"); private static final byte[] row1 = Bytes.toBytes("r1");
private static final byte[] test = Bytes.toBytes("test"); private static final byte[] test = Bytes.toBytes("test");
@ -72,7 +72,7 @@ public class TestConstraint {
public void testConstraintPasses() throws Exception { public void testConstraintPasses() throws Exception {
// create the table // create the table
// it would be nice if this was also a method on the util // it would be nice if this was also a method on the util
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); HTableDescriptor desc = new HTableDescriptor(tableName);
for (byte[] family : new byte[][] { dummy, test }) { for (byte[] family : new byte[][] { dummy, test }) {
desc.addFamily(new HColumnDescriptor(family)); desc.addFamily(new HColumnDescriptor(family));
} }
@ -103,7 +103,7 @@ public class TestConstraint {
// create the table // create the table
// it would be nice if this was also a method on the util // it would be nice if this was also a method on the util
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); HTableDescriptor desc = new HTableDescriptor(tableName);
for (byte[] family : new byte[][] { dummy, test }) { for (byte[] family : new byte[][] { dummy, test }) {
desc.addFamily(new HColumnDescriptor(family)); desc.addFamily(new HColumnDescriptor(family));
} }
@ -140,7 +140,7 @@ public class TestConstraint {
@Test @Test
public void testDisableConstraint() throws Throwable { public void testDisableConstraint() throws Throwable {
// create the table // create the table
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); HTableDescriptor desc = new HTableDescriptor(tableName);
// add a family to the table // add a family to the table
for (byte[] family : new byte[][] { dummy, test }) { for (byte[] family : new byte[][] { dummy, test }) {
desc.addFamily(new HColumnDescriptor(family)); desc.addFamily(new HColumnDescriptor(family));
@ -175,7 +175,7 @@ public class TestConstraint {
@Test @Test
public void testDisableConstraints() throws Throwable { public void testDisableConstraints() throws Throwable {
// create the table // create the table
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); HTableDescriptor desc = new HTableDescriptor(tableName);
// add a family to the table // add a family to the table
for (byte[] family : new byte[][] { dummy, test }) { for (byte[] family : new byte[][] { dummy, test }) {
desc.addFamily(new HColumnDescriptor(family)); desc.addFamily(new HColumnDescriptor(family));
@ -207,7 +207,7 @@ public class TestConstraint {
@Test @Test
public void testIsUnloaded() throws Exception { public void testIsUnloaded() throws Exception {
// create the table // create the table
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); HTableDescriptor desc = new HTableDescriptor(tableName);
// add a family to the table // add a family to the table
for (byte[] family : new byte[][] { dummy, test }) { for (byte[] family : new byte[][] { dummy, test }) {
desc.addFamily(new HColumnDescriptor(family)); desc.addFamily(new HColumnDescriptor(family));

View File

@ -23,6 +23,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.TestServerCustomProtocol; import org.apache.hadoop.hbase.regionserver.TestServerCustomProtocol;
@ -55,7 +56,7 @@ public class TestClassLoading {
private static MiniDFSCluster cluster; private static MiniDFSCluster cluster;
static final String tableName = "TestClassLoading"; static final TableName tableName = TableName.valueOf("TestClassLoading");
static final String cpName1 = "TestCP1"; static final String cpName1 = "TestCP1";
static final String cpName2 = "TestCP2"; static final String cpName2 = "TestCP2";
static final String cpName3 = "TestCP3"; static final String cpName3 = "TestCP3";
@ -137,7 +138,7 @@ public class TestClassLoading {
LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS2); LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS2);
// create a table that references the coprocessors // create a table that references the coprocessors
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); HTableDescriptor htd = new HTableDescriptor(tableName);
htd.addFamily(new HColumnDescriptor("test")); htd.addFamily(new HColumnDescriptor("test"));
// without configuration values // without configuration values
htd.setValue("COPROCESSOR$1", jarFileOnHDFS1.toString() + "|" + cpName1 + htd.setValue("COPROCESSOR$1", jarFileOnHDFS1.toString() + "|" + cpName1 +
@ -145,7 +146,7 @@ public class TestClassLoading {
// with configuration values // with configuration values
htd.setValue("COPROCESSOR$2", jarFileOnHDFS2.toString() + "|" + cpName2 + htd.setValue("COPROCESSOR$2", jarFileOnHDFS2.toString() + "|" + cpName2 +
"|" + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3"); "|" + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3");
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(tableName)) { if (admin.tableExists(tableName)) {
if (admin.isTableEnabled(tableName)) { if (admin.isTableEnabled(tableName)) {
admin.disableTable(tableName); admin.disableTable(tableName);
@ -166,7 +167,7 @@ public class TestClassLoading {
MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster(); MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
for (HRegion region: for (HRegion region:
hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
if (region.getRegionNameAsString().startsWith(tableName)) { if (region.getRegionNameAsString().startsWith(tableName.getNameAsString())) {
foundTableRegion = true; foundTableRegion = true;
CoprocessorEnvironment env; CoprocessorEnvironment env;
env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName1); env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName1);
@ -226,7 +227,7 @@ public class TestClassLoading {
htd.addFamily(new HColumnDescriptor("test")); htd.addFamily(new HColumnDescriptor("test"));
htd.setValue("COPROCESSOR$1", getLocalPath(jarFile) + "|" + cpName3 + "|" + htd.setValue("COPROCESSOR$1", getLocalPath(jarFile) + "|" + cpName3 + "|" +
Coprocessor.PRIORITY_USER); Coprocessor.PRIORITY_USER);
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
admin.createTable(htd); admin.createTable(htd);
waitForTable(htd.getTableName()); waitForTable(htd.getTableName());
@ -252,7 +253,7 @@ public class TestClassLoading {
htd.addFamily(new HColumnDescriptor("test")); htd.addFamily(new HColumnDescriptor("test"));
htd.setValue("COPROCESSOR$1", getLocalPath(jarFile) + "|" + cpName4 + "|" + htd.setValue("COPROCESSOR$1", getLocalPath(jarFile) + "|" + cpName4 + "|" +
Coprocessor.PRIORITY_USER); Coprocessor.PRIORITY_USER);
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
admin.createTable(htd); admin.createTable(htd);
waitForTable(htd.getTableName()); waitForTable(htd.getTableName());
@ -296,7 +297,7 @@ public class TestClassLoading {
" | org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver | | k=v "; " | org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver | | k=v ";
// create a table that references the jar // create a table that references the jar
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); HTableDescriptor htd = new HTableDescriptor(tableName);
htd.addFamily(new HColumnDescriptor("test")); htd.addFamily(new HColumnDescriptor("test"));
// add 3 coprocessors by setting htd attributes directly. // add 3 coprocessors by setting htd attributes directly.
@ -314,7 +315,7 @@ public class TestClassLoading {
htd.addCoprocessor(cpName6, new Path(getLocalPath(jarFile6)), htd.addCoprocessor(cpName6, new Path(getLocalPath(jarFile6)),
Coprocessor.PRIORITY_USER, kvs); Coprocessor.PRIORITY_USER, kvs);
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(tableName)) { if (admin.tableExists(tableName)) {
if (admin.isTableEnabled(tableName)) { if (admin.isTableEnabled(tableName)) {
admin.disableTable(tableName); admin.disableTable(tableName);
@ -333,7 +334,7 @@ public class TestClassLoading {
MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster(); MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
for (HRegion region: for (HRegion region:
hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
if (region.getRegionNameAsString().startsWith(tableName)) { if (region.getRegionNameAsString().startsWith(tableName.getNameAsString())) {
found_1 = found_1 || found_1 = found_1 ||
(region.getCoprocessorHost().findCoprocessor(cpName1) != null); (region.getCoprocessorHost().findCoprocessor(cpName1) != null);
found_2 = found_2 || found_2 = found_2 ||
@ -398,7 +399,7 @@ public class TestClassLoading {
LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS); LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS);
// create a table that references the coprocessors // create a table that references the coprocessors
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); HTableDescriptor htd = new HTableDescriptor(tableName);
htd.addFamily(new HColumnDescriptor("test")); htd.addFamily(new HColumnDescriptor("test"));
// without configuration values // without configuration values
htd.setValue("COPROCESSOR$1", jarFileOnHDFS.toString() + "|" + cpName1 + htd.setValue("COPROCESSOR$1", jarFileOnHDFS.toString() + "|" + cpName1 +
@ -406,7 +407,7 @@ public class TestClassLoading {
// with configuration values // with configuration values
htd.setValue("COPROCESSOR$2", jarFileOnHDFS.toString() + "|" + cpName2 + htd.setValue("COPROCESSOR$2", jarFileOnHDFS.toString() + "|" + cpName2 +
"|" + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3"); "|" + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3");
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(tableName)) { if (admin.tableExists(tableName)) {
if (admin.isTableEnabled(tableName)) { if (admin.isTableEnabled(tableName)) {
admin.disableTable(tableName); admin.disableTable(tableName);
@ -422,7 +423,7 @@ public class TestClassLoading {
MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster(); MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
for (HRegion region: for (HRegion region:
hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
if (region.getRegionNameAsString().startsWith(tableName)) { if (region.getRegionNameAsString().startsWith(tableName.getNameAsString())) {
CoprocessorEnvironment env; CoprocessorEnvironment env;
env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName1); env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName1);
if (env != null) { if (env != null) {

View File

@ -30,6 +30,7 @@ import java.util.Map;
import java.util.NavigableMap; import java.util.NavigableMap;
import java.util.TreeMap; import java.util.TreeMap;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.hbase.util.ByteStringer;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
@ -283,7 +284,7 @@ public class TestCoprocessorEndpoint {
@Test @Test
public void testMasterCoprocessorService() throws Throwable { public void testMasterCoprocessorService() throws Throwable {
HBaseAdmin admin = util.getHBaseAdmin(); Admin admin = util.getHBaseAdmin();
final TestProtos.EchoRequestProto request = final TestProtos.EchoRequestProto request =
TestProtos.EchoRequestProto.newBuilder().setMessage("hello").build(); TestProtos.EchoRequestProto.newBuilder().setMessage("hello").build();
TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface service = TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface service =
@ -314,7 +315,7 @@ public class TestCoprocessorEndpoint {
@Test @Test
public void testMasterCoprocessorError() throws Throwable { public void testMasterCoprocessorError() throws Throwable {
HBaseAdmin admin = util.getHBaseAdmin(); Admin admin = util.getHBaseAdmin();
TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface service = TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface service =
TestRpcServiceProtos.TestProtobufRpcProto.newBlockingStub(admin.coprocessorService()); TestRpcServiceProtos.TestProtobufRpcProto.newBlockingStub(admin.coprocessorService());
try { try {

View File

@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
@ -85,7 +86,7 @@ public class TestMasterCoprocessorExceptionWithAbort {
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TEST_TABLE)); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TEST_TABLE));
htd.addFamily(new HColumnDescriptor(TEST_FAMILY)); htd.addFamily(new HColumnDescriptor(TEST_FAMILY));
try { try {
HBaseAdmin admin = UTIL.getHBaseAdmin(); Admin admin = UTIL.getHBaseAdmin();
admin.createTable(htd); admin.createTable(htd);
fail("BuggyMasterObserver failed to throw an exception."); fail("BuggyMasterObserver failed to throw an exception.");
} catch (IOException e) { } catch (IOException e) {

View File

@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
@ -191,7 +192,7 @@ public class TestMasterCoprocessorExceptionWithRemove {
boolean threwDNRE = false; boolean threwDNRE = false;
try { try {
HBaseAdmin admin = UTIL.getHBaseAdmin(); Admin admin = UTIL.getHBaseAdmin();
admin.createTable(htd1); admin.createTable(htd1);
} catch (IOException e) { } catch (IOException e) {
if (e.getClass().getName().equals("org.apache.hadoop.hbase.DoNotRetryIOException")) { if (e.getClass().getName().equals("org.apache.hadoop.hbase.DoNotRetryIOException")) {
@ -218,7 +219,7 @@ public class TestMasterCoprocessorExceptionWithRemove {
// by creating another table: should not have a problem this time. // by creating another table: should not have a problem this time.
HTableDescriptor htd2 = new HTableDescriptor(TableName.valueOf(TEST_TABLE2)); HTableDescriptor htd2 = new HTableDescriptor(TableName.valueOf(TEST_TABLE2));
htd2.addFamily(new HColumnDescriptor(TEST_FAMILY2)); htd2.addFamily(new HColumnDescriptor(TEST_FAMILY2));
HBaseAdmin admin = UTIL.getHBaseAdmin(); Admin admin = UTIL.getHBaseAdmin();
try { try {
admin.createTable(htd2); admin.createTable(htd2);
} catch (IOException e) { } catch (IOException e) {

View File

@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.AssignmentManager;
@ -1012,9 +1013,8 @@ public class TestMasterObserver {
private static HBaseTestingUtility UTIL = new HBaseTestingUtility(); private static HBaseTestingUtility UTIL = new HBaseTestingUtility();
private static byte[] TEST_SNAPSHOT = Bytes.toBytes("observed_snapshot"); private static byte[] TEST_SNAPSHOT = Bytes.toBytes("observed_snapshot");
private static TableName TEST_TABLE = private static TableName TEST_TABLE = TableName.valueOf("observed_table");
TableName.valueOf("observed_table"); private static TableName TEST_CLONE = TableName.valueOf("observed_clone");
private static byte[] TEST_CLONE = Bytes.toBytes("observed_clone");
private static byte[] TEST_FAMILY = Bytes.toBytes("fam1"); private static byte[] TEST_FAMILY = Bytes.toBytes("fam1");
private static byte[] TEST_FAMILY2 = Bytes.toBytes("fam2"); private static byte[] TEST_FAMILY2 = Bytes.toBytes("fam2");
private static byte[] TEST_FAMILY3 = Bytes.toBytes("fam3"); private static byte[] TEST_FAMILY3 = Bytes.toBytes("fam3");
@ -1073,7 +1073,7 @@ public class TestMasterObserver {
// create a table // create a table
HTableDescriptor htd = new HTableDescriptor(TEST_TABLE); HTableDescriptor htd = new HTableDescriptor(TEST_TABLE);
htd.addFamily(new HColumnDescriptor(TEST_FAMILY)); htd.addFamily(new HColumnDescriptor(TEST_FAMILY));
HBaseAdmin admin = UTIL.getHBaseAdmin(); Admin admin = UTIL.getHBaseAdmin();
tableCreationLatch = new CountDownLatch(1); tableCreationLatch = new CountDownLatch(1);
admin.createTable(htd); admin.createTable(htd);
@ -1236,7 +1236,7 @@ public class TestMasterObserver {
// create a table // create a table
HTableDescriptor htd = new HTableDescriptor(TEST_TABLE); HTableDescriptor htd = new HTableDescriptor(TEST_TABLE);
htd.addFamily(new HColumnDescriptor(TEST_FAMILY)); htd.addFamily(new HColumnDescriptor(TEST_FAMILY));
HBaseAdmin admin = UTIL.getHBaseAdmin(); Admin admin = UTIL.getHBaseAdmin();
tableCreationLatch = new CountDownLatch(1); tableCreationLatch = new CountDownLatch(1);
admin.createTable(htd); admin.createTable(htd);
@ -1294,7 +1294,7 @@ public class TestMasterObserver {
// create a table // create a table
HBaseAdmin admin = UTIL.getHBaseAdmin(); Admin admin = UTIL.getHBaseAdmin();
admin.createNamespace(NamespaceDescriptor.create(testNamespace).build()); admin.createNamespace(NamespaceDescriptor.create(testNamespace).build());
assertTrue("Test namespace should be created", cp.wasCreateNamespaceCalled()); assertTrue("Test namespace should be created", cp.wasCreateNamespaceCalled());
@ -1332,7 +1332,7 @@ public class TestMasterObserver {
assertTrue("Test namespace should not be created", cp.preCreateNamespaceCalledOnly()); assertTrue("Test namespace should not be created", cp.preCreateNamespaceCalledOnly());
} }
private void modifyTableSync(HBaseAdmin admin, TableName tableName, HTableDescriptor htd) private void modifyTableSync(Admin admin, TableName tableName, HTableDescriptor htd)
throws IOException { throws IOException {
admin.modifyTable(tableName, htd); admin.modifyTable(tableName, htd);
//wait until modify table finishes //wait until modify table finishes

View File

@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
@ -157,7 +158,7 @@ public class TestOpenTableInCoprocessor {
other.addFamily(new HColumnDescriptor(family)); other.addFamily(new HColumnDescriptor(family));
HBaseAdmin admin = UTIL.getHBaseAdmin(); Admin admin = UTIL.getHBaseAdmin();
admin.createTable(primary); admin.createTable(primary);
admin.createTable(other); admin.createTable(other);

View File

@ -31,6 +31,8 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
@ -52,7 +54,7 @@ import org.junit.experimental.categories.Category;
@Category(MediumTests.class) @Category(MediumTests.class)
public class TestRegionObserverBypass { public class TestRegionObserverBypass {
private static HBaseTestingUtility util; private static HBaseTestingUtility util;
private static final byte[] tableName = Bytes.toBytes("test"); private static final TableName tableName = TableName.valueOf("test");
private static final byte[] dummy = Bytes.toBytes("dummy"); private static final byte[] dummy = Bytes.toBytes("dummy");
private static final byte[] row1 = Bytes.toBytes("r1"); private static final byte[] row1 = Bytes.toBytes("r1");
private static final byte[] row2 = Bytes.toBytes("r2"); private static final byte[] row2 = Bytes.toBytes("r2");
@ -75,7 +77,7 @@ public class TestRegionObserverBypass {
@Before @Before
public void setUp() throws Exception { public void setUp() throws Exception {
HBaseAdmin admin = util.getHBaseAdmin(); Admin admin = util.getHBaseAdmin();
if (admin.tableExists(tableName)) { if (admin.tableExists(tableName)) {
if (admin.isTableEnabled(tableName)) { if (admin.isTableEnabled(tableName)) {
admin.disableTable(tableName); admin.disableTable(tableName);

View File

@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Durability;
@ -486,14 +487,14 @@ public class TestRegionObserverInterface {
*/ */
@Test @Test
public void testCompactionOverride() throws Exception { public void testCompactionOverride() throws Exception {
byte[] compactTable = Bytes.toBytes("TestCompactionOverride"); TableName compactTable = TableName.valueOf("TestCompactionOverride");
HBaseAdmin admin = util.getHBaseAdmin(); Admin admin = util.getHBaseAdmin();
if (admin.tableExists(compactTable)) { if (admin.tableExists(compactTable)) {
admin.disableTable(compactTable); admin.disableTable(compactTable);
admin.deleteTable(compactTable); admin.deleteTable(compactTable);
} }
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(compactTable)); HTableDescriptor htd = new HTableDescriptor(compactTable);
htd.addFamily(new HColumnDescriptor(A)); htd.addFamily(new HColumnDescriptor(A));
htd.addCoprocessor(EvenOnlyCompactor.class.getName()); htd.addCoprocessor(EvenOnlyCompactor.class.getName());
admin.createTable(htd); admin.createTable(htd);
@ -515,7 +516,7 @@ public class TestRegionObserverInterface {
// force a compaction // force a compaction
long ts = System.currentTimeMillis(); long ts = System.currentTimeMillis();
admin.flush(compactTable); admin.flush(compactTable.toBytes());
// wait for flush // wait for flush
for (int i=0; i<10; i++) { for (int i=0; i<10; i++) {
if (compactor.lastFlush >= ts) { if (compactor.lastFlush >= ts) {
@ -527,7 +528,7 @@ public class TestRegionObserverInterface {
LOG.debug("Flush complete"); LOG.debug("Flush complete");
ts = compactor.lastFlush; ts = compactor.lastFlush;
admin.majorCompact(compactTable); admin.majorCompact(compactTable.toBytes());
// wait for compaction // wait for compaction
for (int i=0; i<30; i++) { for (int i=0; i<30; i++) {
if (compactor.lastCompaction >= ts) { if (compactor.lastCompaction >= ts) {

View File

@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
@ -254,7 +255,7 @@ public class TestRegionObserverScannerOpenHook {
desc.addCoprocessor(NoDataFromCompaction.class.getName(), null, Coprocessor.PRIORITY_HIGHEST, desc.addCoprocessor(NoDataFromCompaction.class.getName(), null, Coprocessor.PRIORITY_HIGHEST,
null); null);
HBaseAdmin admin = UTIL.getHBaseAdmin(); Admin admin = UTIL.getHBaseAdmin();
admin.createTable(desc); admin.createTable(desc);
HTable table = new HTable(conf, desc.getTableName()); HTable table = new HTable(conf, desc.getTableName());

View File

@ -33,6 +33,7 @@ import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicLong;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.hbase.util.ByteStringer;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
@ -85,7 +86,7 @@ public class TestRowProcessorEndpoint {
static final Log LOG = LogFactory.getLog(TestRowProcessorEndpoint.class); static final Log LOG = LogFactory.getLog(TestRowProcessorEndpoint.class);
private static final byte[] TABLE = Bytes.toBytes("testtable"); private static final TableName TABLE = TableName.valueOf("testtable");
private final static byte[] ROW = Bytes.toBytes("testrow"); private final static byte[] ROW = Bytes.toBytes("testrow");
private final static byte[] ROW2 = Bytes.toBytes("testrow2"); private final static byte[] ROW2 = Bytes.toBytes("testrow2");
private final static byte[] FAM = Bytes.toBytes("friendlist"); private final static byte[] FAM = Bytes.toBytes("friendlist");

View File

@ -25,6 +25,7 @@ import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
@ -149,7 +150,7 @@ public abstract class TableSnapshotInputFormatTestBase {
} else { } else {
util.createTable(tableName, FAMILIES); util.createTable(tableName, FAMILIES);
} }
HBaseAdmin admin = util.getHBaseAdmin(); Admin admin = util.getHBaseAdmin();
// put some stuff in the table // put some stuff in the table
HTable table = new HTable(util.getConfiguration(), tableName); HTable table = new HTable(util.getConfiguration(), tableName);

View File

@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
@ -183,7 +184,7 @@ public class TestAssignmentListener {
@Test(timeout=60000) @Test(timeout=60000)
public void testAssignmentListener() throws IOException, InterruptedException { public void testAssignmentListener() throws IOException, InterruptedException {
AssignmentManager am = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager(); AssignmentManager am = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager();
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
DummyAssignmentListener listener = new DummyAssignmentListener(); DummyAssignmentListener listener = new DummyAssignmentListener();
am.registerListener(listener); am.registerListener(listener);

View File

@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver; import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
@ -82,7 +83,7 @@ public class TestAssignmentManagerOnCluster {
private final static byte[] FAMILY = Bytes.toBytes("FAMILY"); private final static byte[] FAMILY = Bytes.toBytes("FAMILY");
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
final static Configuration conf = TEST_UTIL.getConfiguration(); final static Configuration conf = TEST_UTIL.getConfiguration();
private static HBaseAdmin admin; private static Admin admin;
static void setupOnce() throws Exception { static void setupOnce() throws Exception {
// Using the our load balancer to control region plans // Using the our load balancer to control region plans
@ -302,7 +303,7 @@ public class TestAssignmentManagerOnCluster {
public void testMoveRegionOfDeletedTable() throws Exception { public void testMoveRegionOfDeletedTable() throws Exception {
TableName table = TableName table =
TableName.valueOf("testMoveRegionOfDeletedTable"); TableName.valueOf("testMoveRegionOfDeletedTable");
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
try { try {
HRegionInfo hri = createTableAndGetOneRegion(table); HRegionInfo hri = createTableAndGetOneRegion(table);
@ -807,11 +808,11 @@ public class TestAssignmentManagerOnCluster {
*/ */
@Test (timeout=60000) @Test (timeout=60000)
public void testAssignDisabledRegion() throws Exception { public void testAssignDisabledRegion() throws Exception {
String table = "testAssignDisabledRegion"; TableName table = TableName.valueOf("testAssignDisabledRegion");
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
MyMaster master = null; MyMaster master = null;
try { try {
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(table)); HTableDescriptor desc = new HTableDescriptor(table);
desc.addFamily(new HColumnDescriptor(FAMILY)); desc.addFamily(new HColumnDescriptor(FAMILY));
admin.createTable(desc); admin.createTable(desc);
@ -839,7 +840,7 @@ public class TestAssignmentManagerOnCluster {
am.unassign(hri, true); am.unassign(hri, true);
assertTrue(regionStates.isRegionOffline(hri)); assertTrue(regionStates.isRegionOffline(hri));
} finally { } finally {
TEST_UTIL.deleteTable(Bytes.toBytes(table)); TEST_UTIL.deleteTable(table);
} }
} }

View File

@ -781,7 +781,7 @@ public class TestDistributedLogSplitting {
makeHLog(hrs.getWAL(), regions, "table", "family", NUM_LOG_LINES, 100); makeHLog(hrs.getWAL(), regions, "table", "family", NUM_LOG_LINES, 100);
LOG.info("Disabling table\n"); LOG.info("Disabling table\n");
TEST_UTIL.getHBaseAdmin().disableTable(Bytes.toBytes("disableTable")); TEST_UTIL.getHBaseAdmin().disableTable(TableName.valueOf("disableTable"));
// abort RS // abort RS
LOG.info("Aborting region server: " + hrs.getServerName()); LOG.info("Aborting region server: " + hrs.getServerName());
@ -1361,7 +1361,7 @@ public class TestDistributedLogSplitting {
HTable installTable(ZooKeeperWatcher zkw, String tname, String fname, int nrs, HTable installTable(ZooKeeperWatcher zkw, String tname, String fname, int nrs,
int existingRegions) throws Exception { int existingRegions) throws Exception {
// Create a table with regions // Create a table with regions
byte [] table = Bytes.toBytes(tname); TableName table = TableName.valueOf(tname);
byte [] family = Bytes.toBytes(fname); byte [] family = Bytes.toBytes(fname);
LOG.info("Creating table with " + nrs + " regions"); LOG.info("Creating table with " + nrs + " regions");
HTable ht = TEST_UTIL.createTable(table, family); HTable ht = TEST_UTIL.createTable(table, family);
@ -1617,7 +1617,7 @@ public class TestDistributedLogSplitting {
final HRegionServer destRS = hrs; final HRegionServer destRS = hrs;
// the RS doesn't have regions of the specified table so we need move one to this RS // the RS doesn't have regions of the specified table so we need move one to this RS
List<HRegionInfo> tableRegions = List<HRegionInfo> tableRegions =
TEST_UTIL.getHBaseAdmin().getTableRegions(Bytes.toBytes(tableName)); TEST_UTIL.getHBaseAdmin().getTableRegions(TableName.valueOf(tableName));
final HRegionInfo hri = tableRegions.get(0); final HRegionInfo hri = tableRegions.get(0);
TEST_UTIL.getHBaseAdmin().move(hri.getEncodedNameAsBytes(), TEST_UTIL.getHBaseAdmin().move(hri.getEncodedNameAsBytes(),
Bytes.toBytes(destRS.getServerName().getServerName())); Bytes.toBytes(destRS.getServerName().getServerName()));

View File

@ -22,6 +22,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
@ -47,7 +48,7 @@ public class TestMaster {
private static final TableName TABLENAME = private static final TableName TABLENAME =
TableName.valueOf("TestMaster"); TableName.valueOf("TestMaster");
private static final byte[] FAMILYNAME = Bytes.toBytes("fam"); private static final byte[] FAMILYNAME = Bytes.toBytes("fam");
private static HBaseAdmin admin; private static Admin admin;
@BeforeClass @BeforeClass
public static void beforeAllTests() throws Exception { public static void beforeAllTests() throws Exception {
@ -147,9 +148,9 @@ public class TestMaster {
@Test @Test
public void testMoveThrowsPleaseHoldException() throws IOException { public void testMoveThrowsPleaseHoldException() throws IOException {
byte[] tableName = Bytes.toBytes("testMoveThrowsPleaseHoldException"); TableName tableName = TableName.valueOf("testMoveThrowsPleaseHoldException");
HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster(); HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster();
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); HTableDescriptor htd = new HTableDescriptor(tableName);
HColumnDescriptor hcd = new HColumnDescriptor("value"); HColumnDescriptor hcd = new HColumnDescriptor("value");
htd.addFamily(hcd); htd.addFamily(hcd);

View File

@ -67,7 +67,7 @@ public class TestMasterRestartAfterDisablingTable {
HMaster master = cluster.getMaster(); HMaster master = cluster.getMaster();
// Create a table with regions // Create a table with regions
byte[] table = Bytes.toBytes("tableRestart"); TableName table = TableName.valueOf("tableRestart");
byte[] family = Bytes.toBytes("family"); byte[] family = Bytes.toBytes("family");
log("Creating table with " + NUM_REGIONS_TO_CREATE + " regions"); log("Creating table with " + NUM_REGIONS_TO_CREATE + " regions");
HTable ht = TEST_UTIL.createTable(table, family); HTable ht = TEST_UTIL.createTable(table, family);

View File

@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.LargeTests; import org.apache.hadoop.hbase.LargeTests;
import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
@ -76,7 +77,7 @@ public class TestRollingRestart {
HMaster master = cluster.getMaster(); HMaster master = cluster.getMaster();
// Create a table with regions // Create a table with regions
byte [] table = Bytes.toBytes("tableRestart"); TableName table = TableName.valueOf("tableRestart");
byte [] family = Bytes.toBytes("family"); byte [] family = Bytes.toBytes("family");
log("Creating table with " + NUM_REGIONS_TO_CREATE + " regions"); log("Creating table with " + NUM_REGIONS_TO_CREATE + " regions");
HTable ht = TEST_UTIL.createTable(table, family); HTable ht = TEST_UTIL.createTable(table, family);

View File

@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver; import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver;
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
@ -114,7 +115,7 @@ public class TestTableLockManager {
Future<Object> shouldFinish = executor.submit(new Callable<Object>() { Future<Object> shouldFinish = executor.submit(new Callable<Object>() {
@Override @Override
public Object call() throws Exception { public Object call() throws Exception {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
admin.deleteColumn(TABLE_NAME, FAMILY); admin.deleteColumn(TABLE_NAME, FAMILY);
return null; return null;
} }
@ -123,7 +124,7 @@ public class TestTableLockManager {
deleteColumn.await(); deleteColumn.await();
try { try {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
admin.addColumn(TABLE_NAME, new HColumnDescriptor(NEW_FAMILY)); admin.addColumn(TABLE_NAME, new HColumnDescriptor(NEW_FAMILY));
fail("Was expecting TableLockTimeoutException"); fail("Was expecting TableLockTimeoutException");
} catch (LockTimeoutException ex) { } catch (LockTimeoutException ex) {
@ -166,7 +167,7 @@ public class TestTableLockManager {
Future<Object> alterTableFuture = executor.submit(new Callable<Object>() { Future<Object> alterTableFuture = executor.submit(new Callable<Object>() {
@Override @Override
public Object call() throws Exception { public Object call() throws Exception {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
admin.addColumn(TABLE_NAME, new HColumnDescriptor(NEW_FAMILY)); admin.addColumn(TABLE_NAME, new HColumnDescriptor(NEW_FAMILY));
LOG.info("Added new column family"); LOG.info("Added new column family");
HTableDescriptor tableDesc = admin.getTableDescriptor(TABLE_NAME); HTableDescriptor tableDesc = admin.getTableDescriptor(TABLE_NAME);
@ -177,7 +178,7 @@ public class TestTableLockManager {
Future<Object> disableTableFuture = executor.submit(new Callable<Object>() { Future<Object> disableTableFuture = executor.submit(new Callable<Object>() {
@Override @Override
public Object call() throws Exception { public Object call() throws Exception {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
admin.disableTable(TABLE_NAME); admin.disableTable(TABLE_NAME);
assertTrue(admin.isTableDisabled(TABLE_NAME)); assertTrue(admin.isTableDisabled(TABLE_NAME));
admin.deleteTable(TABLE_NAME); admin.deleteTable(TABLE_NAME);
@ -243,7 +244,7 @@ public class TestTableLockManager {
public void testDelete() throws Exception { public void testDelete() throws Exception {
prepareMiniCluster(); prepareMiniCluster();
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
admin.disableTable(TABLE_NAME); admin.disableTable(TABLE_NAME);
admin.deleteTable(TABLE_NAME); admin.deleteTable(TABLE_NAME);
@ -327,7 +328,7 @@ public class TestTableLockManager {
loadTool.setConf(TEST_UTIL.getConfiguration()); loadTool.setConf(TEST_UTIL.getConfiguration());
int numKeys = 10000; int numKeys = 10000;
final TableName tableName = TableName.valueOf("testTableReadLock"); final TableName tableName = TableName.valueOf("testTableReadLock");
final HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); final Admin admin = TEST_UTIL.getHBaseAdmin();
final HTableDescriptor desc = new HTableDescriptor(tableName); final HTableDescriptor desc = new HTableDescriptor(tableName);
final byte[] family = Bytes.toBytes("test_cf"); final byte[] family = Bytes.toBytes("test_cf");
desc.addFamily(new HColumnDescriptor(family)); desc.addFamily(new HColumnDescriptor(family));

View File

@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.HMaster;
@ -278,7 +279,7 @@ public class TestSnapshotFromMaster {
*/ */
@Test(timeout = 300000) @Test(timeout = 300000)
public void testSnapshotHFileArchiving() throws Exception { public void testSnapshotHFileArchiving() throws Exception {
HBaseAdmin admin = UTIL.getHBaseAdmin(); Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots // make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin); SnapshotTestingUtils.assertNoSnapshots(admin);

View File

@ -69,10 +69,10 @@ public class TestCreateTableHandler {
@Test (timeout=300000) @Test (timeout=300000)
public void testCreateTableCalledTwiceAndFirstOneInProgress() throws Exception { public void testCreateTableCalledTwiceAndFirstOneInProgress() throws Exception {
final byte[] tableName = Bytes.toBytes("testCreateTableCalledTwiceAndFirstOneInProgress"); final TableName tableName = TableName.valueOf("testCreateTableCalledTwiceAndFirstOneInProgress");
final MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); final MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
final HMaster m = cluster.getMaster(); final HMaster m = cluster.getMaster();
final HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); final HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILYNAME)); desc.addFamily(new HColumnDescriptor(FAMILYNAME));
final HRegionInfo[] hRegionInfos = new HRegionInfo[] { new HRegionInfo(desc.getTableName(), null, final HRegionInfo[] hRegionInfos = new HRegionInfo[] { new HRegionInfo(desc.getTableName(), null,
null) }; null) };
@ -96,10 +96,10 @@ public class TestCreateTableHandler {
@Test (timeout=300000) @Test (timeout=300000)
public void testCreateTableWithSplitRegion() throws Exception { public void testCreateTableWithSplitRegion() throws Exception {
final byte[] tableName = Bytes.toBytes("testCreateTableWithSplitRegion"); final TableName tableName = TableName.valueOf("testCreateTableWithSplitRegion");
final MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); final MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
final HMaster m = cluster.getMaster(); final HMaster m = cluster.getMaster();
final HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); final HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILYNAME)); desc.addFamily(new HColumnDescriptor(FAMILYNAME));
byte[] splitPoint = Bytes.toBytes("split-point"); byte[] splitPoint = Bytes.toBytes("split-point");
long ts = System.currentTimeMillis(); long ts = System.currentTimeMillis();

View File

@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.LargeTests; import org.apache.hadoop.hbase.LargeTests;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
@ -97,7 +98,7 @@ public class TestTableDeleteFamilyHandler {
@Test @Test
public void deleteColumnFamilyWithMultipleRegions() throws Exception { public void deleteColumnFamilyWithMultipleRegions() throws Exception {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
HTableDescriptor beforehtd = admin.getTableDescriptor(TABLENAME); HTableDescriptor beforehtd = admin.getTableDescriptor(TABLENAME);
FileSystem fs = TEST_UTIL.getDFSCluster().getFileSystem(); FileSystem fs = TEST_UTIL.getDFSCluster().getFileSystem();
@ -135,7 +136,7 @@ public class TestTableDeleteFamilyHandler {
// TEST - Disable and delete the column family // TEST - Disable and delete the column family
admin.disableTable(TABLENAME); admin.disableTable(TABLENAME);
admin.deleteColumn(TABLENAME.getName(), "cf2"); admin.deleteColumn(TABLENAME, Bytes.toBytes("cf2"));
// 5 - Check if only 2 column families exist in the descriptor // 5 - Check if only 2 column families exist in the descriptor
HTableDescriptor afterhtd = admin.getTableDescriptor(TABLENAME); HTableDescriptor afterhtd = admin.getTableDescriptor(TABLENAME);

View File

@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.LargeTests; import org.apache.hadoop.hbase.LargeTests;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
@ -80,7 +81,7 @@ public class TestTableDescriptorModification {
@Test @Test
public void testModifyTable() throws IOException { public void testModifyTable() throws IOException {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
// Create a table with one family // Create a table with one family
HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME); HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME);
baseHtd.addFamily(new HColumnDescriptor(FAMILY_0)); baseHtd.addFamily(new HColumnDescriptor(FAMILY_0));
@ -103,7 +104,7 @@ public class TestTableDescriptorModification {
@Test @Test
public void testAddColumn() throws IOException { public void testAddColumn() throws IOException {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
// Create a table with two families // Create a table with two families
HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME); HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME);
baseHtd.addFamily(new HColumnDescriptor(FAMILY_0)); baseHtd.addFamily(new HColumnDescriptor(FAMILY_0));
@ -123,7 +124,7 @@ public class TestTableDescriptorModification {
@Test @Test
public void testDeleteColumn() throws IOException { public void testDeleteColumn() throws IOException {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
// Create a table with two families // Create a table with two families
HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME); HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME);
baseHtd.addFamily(new HColumnDescriptor(FAMILY_0)); baseHtd.addFamily(new HColumnDescriptor(FAMILY_0));
@ -144,7 +145,7 @@ public class TestTableDescriptorModification {
private void verifyTableDescriptor(final TableName tableName, private void verifyTableDescriptor(final TableName tableName,
final byte[]... families) throws IOException { final byte[]... families) throws IOException {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
// Verify descriptor from master // Verify descriptor from master
HTableDescriptor htd = admin.getTableDescriptor(tableName); HTableDescriptor htd = admin.getTableDescriptor(tableName);

View File

@ -93,7 +93,8 @@ public class TestNamespaceUpgrade {
{"1","2","3","4","5","6","7","8","9"}; {"1","2","3","4","5","6","7","8","9"};
private final static String currentKeys[] = private final static String currentKeys[] =
{"1","2","3","4","5","6","7","8","9","A"}; {"1","2","3","4","5","6","7","8","9","A"};
private final static String tables[] = {"foo", "ns1.foo","ns.two.foo"}; private final static TableName tables[] =
{TableName.valueOf("foo"), TableName.valueOf("ns1.foo"), TableName.valueOf("ns.two.foo")};
@BeforeClass @BeforeClass
public static void setUpBeforeClass() throws Exception { public static void setUpBeforeClass() throws Exception {
@ -134,7 +135,7 @@ public class TestNamespaceUpgrade {
doFsCommand(shell, new String [] {"-lsr", "/"}); doFsCommand(shell, new String [] {"-lsr", "/"});
TEST_UTIL.startMiniHBaseCluster(1, 1); TEST_UTIL.startMiniHBaseCluster(1, 1);
for(String table: tables) { for(TableName table: tables) {
int count = 0; int count = 0;
for(Result res: new HTable(TEST_UTIL.getConfiguration(), table).getScanner(new Scan())) { for(Result res: new HTable(TEST_UTIL.getConfiguration(), table).getScanner(new Scan())) {
assertEquals(currentKeys[count++], Bytes.toString(res.getRow())); assertEquals(currentKeys[count++], Bytes.toString(res.getRow()));
@ -151,7 +152,7 @@ public class TestNamespaceUpgrade {
count++; count++;
} }
assertEquals(3, count); assertEquals(3, count);
assertFalse(TEST_UTIL.getHBaseAdmin().tableExists("_acl_")); assertFalse(TEST_UTIL.getHBaseAdmin().tableExists(TableName.valueOf("_acl_")));
//verify ACL table was compacted //verify ACL table was compacted
List<HRegion> regions = TEST_UTIL.getMiniHBaseCluster().getRegions(secureTable.getName()); List<HRegion> regions = TEST_UTIL.getMiniHBaseCluster().getRegions(secureTable.getName());
@ -198,8 +199,8 @@ public class TestNamespaceUpgrade {
public void testSnapshots() throws IOException, InterruptedException { public void testSnapshots() throws IOException, InterruptedException {
String snapshots[][] = {snapshot1Keys, snapshot2Keys}; String snapshots[][] = {snapshot1Keys, snapshot2Keys};
for(int i = 1; i <= snapshots.length; i++) { for(int i = 1; i <= snapshots.length; i++) {
for(String table: tables) { for(TableName table: tables) {
TEST_UTIL.getHBaseAdmin().cloneSnapshot(table+"_snapshot"+i, table+"_clone"+i); TEST_UTIL.getHBaseAdmin().cloneSnapshot(table+"_snapshot"+i, TableName.valueOf(table+"_clone"+i));
FSUtils.logFileSystemState(FileSystem.get(TEST_UTIL.getConfiguration()), FSUtils.logFileSystemState(FileSystem.get(TEST_UTIL.getConfiguration()),
FSUtils.getRootDir(TEST_UTIL.getConfiguration()), FSUtils.getRootDir(TEST_UTIL.getConfiguration()),
LOG); LOG);
@ -217,14 +218,15 @@ public class TestNamespaceUpgrade {
public void testRenameUsingSnapshots() throws Exception { public void testRenameUsingSnapshots() throws Exception {
String newNS = "newNS"; String newNS = "newNS";
TEST_UTIL.getHBaseAdmin().createNamespace(NamespaceDescriptor.create(newNS).build()); TEST_UTIL.getHBaseAdmin().createNamespace(NamespaceDescriptor.create(newNS).build());
for(String table: tables) { for(TableName table: tables) {
int count = 0; int count = 0;
for(Result res: new HTable(TEST_UTIL.getConfiguration(), table).getScanner(new for(Result res: new HTable(TEST_UTIL.getConfiguration(), table).getScanner(new
Scan())) { Scan())) {
assertEquals(currentKeys[count++], Bytes.toString(res.getRow())); assertEquals(currentKeys[count++], Bytes.toString(res.getRow()));
} }
TEST_UTIL.getHBaseAdmin().snapshot(table + "_snapshot3", table); TEST_UTIL.getHBaseAdmin().snapshot(table + "_snapshot3", table);
final String newTableName = newNS + TableName.NAMESPACE_DELIM + table + "_clone3"; final TableName newTableName =
TableName.valueOf(newNS + TableName.NAMESPACE_DELIM + table + "_clone3");
TEST_UTIL.getHBaseAdmin().cloneSnapshot(table + "_snapshot3", newTableName); TEST_UTIL.getHBaseAdmin().cloneSnapshot(table + "_snapshot3", newTableName);
Thread.sleep(1000); Thread.sleep(1000);
count = 0; count = 0;
@ -234,14 +236,14 @@ public class TestNamespaceUpgrade {
} }
FSUtils.logFileSystemState(TEST_UTIL.getTestFileSystem(), TEST_UTIL.getDefaultRootDirPath() FSUtils.logFileSystemState(TEST_UTIL.getTestFileSystem(), TEST_UTIL.getDefaultRootDirPath()
, LOG); , LOG);
Assert.assertEquals(newTableName, currentKeys.length, count); Assert.assertEquals(newTableName + "", currentKeys.length, count);
TEST_UTIL.getHBaseAdmin().flush(newTableName); TEST_UTIL.getHBaseAdmin().flush(newTableName.toBytes());
TEST_UTIL.getHBaseAdmin().majorCompact(newTableName); TEST_UTIL.getHBaseAdmin().majorCompact(newTableName.toBytes());
TEST_UTIL.waitFor(30000, new Waiter.Predicate<IOException>() { TEST_UTIL.waitFor(30000, new Waiter.Predicate<IOException>() {
@Override @Override
public boolean evaluate() throws IOException { public boolean evaluate() throws IOException {
try { try {
return TEST_UTIL.getHBaseAdmin().getCompactionState(newTableName) == return TEST_UTIL.getHBaseAdmin().getCompactionState(newTableName.toBytes()) ==
AdminProtos.GetRegionInfoResponse.CompactionState.NONE; AdminProtos.GetRegionInfoResponse.CompactionState.NONE;
} catch (InterruptedException e) { } catch (InterruptedException e) {
throw new IOException(e); throw new IOException(e);
@ -252,10 +254,11 @@ public class TestNamespaceUpgrade {
String nextNS = "nextNS"; String nextNS = "nextNS";
TEST_UTIL.getHBaseAdmin().createNamespace(NamespaceDescriptor.create(nextNS).build()); TEST_UTIL.getHBaseAdmin().createNamespace(NamespaceDescriptor.create(nextNS).build());
for(String table: tables) { for(TableName table: tables) {
String srcTable = newNS + TableName.NAMESPACE_DELIM + table + "_clone3"; TableName srcTable = TableName.valueOf(newNS + TableName.NAMESPACE_DELIM + table + "_clone3");
TEST_UTIL.getHBaseAdmin().snapshot(table + "_snapshot4", srcTable); TEST_UTIL.getHBaseAdmin().snapshot(table + "_snapshot4", srcTable);
String newTableName = nextNS + TableName.NAMESPACE_DELIM + table + "_clone4"; TableName newTableName =
TableName.valueOf(nextNS + TableName.NAMESPACE_DELIM + table + "_clone4");
TEST_UTIL.getHBaseAdmin().cloneSnapshot(table+"_snapshot4", newTableName); TEST_UTIL.getHBaseAdmin().cloneSnapshot(table+"_snapshot4", newTableName);
FSUtils.logFileSystemState(TEST_UTIL.getTestFileSystem(), TEST_UTIL.getDefaultRootDirPath(), FSUtils.logFileSystemState(TEST_UTIL.getTestFileSystem(), TEST_UTIL.getDefaultRootDirPath(),
LOG); LOG);
@ -264,7 +267,7 @@ public class TestNamespaceUpgrade {
Scan())) { Scan())) {
assertEquals(currentKeys[count++], Bytes.toString(res.getRow())); assertEquals(currentKeys[count++], Bytes.toString(res.getRow()));
} }
Assert.assertEquals(newTableName, currentKeys.length, count); Assert.assertEquals(newTableName + "", currentKeys.length, count);
} }
} }

View File

@ -25,6 +25,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.SmallTests; import org.apache.hadoop.hbase.SmallTests;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.junit.AfterClass; import org.junit.AfterClass;
import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertArrayEquals;
@ -59,7 +60,7 @@ public class TestProcedureManager {
@Test @Test
public void testSimpleProcedureManager() throws IOException { public void testSimpleProcedureManager() throws IOException {
HBaseAdmin admin = util.getHBaseAdmin(); Admin admin = util.getHBaseAdmin();
byte[] result = admin.execProcedureWithRet(SimpleMasterProcedureManager.SIMPLE_SIGNATURE, byte[] result = admin.execProcedureWithRet(SimpleMasterProcedureManager.SIMPLE_SIGNATURE,
"mytest", new HashMap<String, String>()); "mytest", new HashMap<String, String>());

View File

@ -113,7 +113,7 @@ public class TestEncryptionKeyRotation {
hcd.setEncryptionKey(EncryptionUtil.wrapKey(conf, hcd.setEncryptionKey(EncryptionUtil.wrapKey(conf,
conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, User.getCurrent().getShortName()), conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, User.getCurrent().getShortName()),
secondCFKey)); secondCFKey));
TEST_UTIL.getHBaseAdmin().modifyColumn(htd.getName(), hcd); TEST_UTIL.getHBaseAdmin().modifyColumn(htd.getTableName(), hcd);
Thread.sleep(5000); // Need a predicate for online schema change Thread.sleep(5000); // Need a predicate for online schema change
// And major compact // And major compact

View File

@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.LargeTests; import org.apache.hadoop.hbase.LargeTests;
import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController; import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.Stoppable;
@ -223,7 +224,7 @@ public class TestEndToEndSplitTransaction {
HTable table; HTable table;
TableName tableName; TableName tableName;
byte[] family; byte[] family;
HBaseAdmin admin; Admin admin;
HRegionServer rs; HRegionServer rs;
RegionSplitter(HTable table) throws IOException { RegionSplitter(HTable table) throws IOException {
@ -398,7 +399,7 @@ public class TestEndToEndSplitTransaction {
/* some utility methods for split tests */ /* some utility methods for split tests */
public static void flushAndBlockUntilDone(HBaseAdmin admin, HRegionServer rs, byte[] regionName) public static void flushAndBlockUntilDone(Admin admin, HRegionServer rs, byte[] regionName)
throws IOException, InterruptedException { throws IOException, InterruptedException {
log("flushing region: " + Bytes.toStringBinary(regionName)); log("flushing region: " + Bytes.toStringBinary(regionName));
admin.flush(regionName); admin.flush(regionName);
@ -409,7 +410,7 @@ public class TestEndToEndSplitTransaction {
} }
} }
public static void compactAndBlockUntilDone(HBaseAdmin admin, HRegionServer rs, byte[] regionName) public static void compactAndBlockUntilDone(Admin admin, HRegionServer rs, byte[] regionName)
throws IOException, InterruptedException { throws IOException, InterruptedException {
log("Compacting region: " + Bytes.toStringBinary(regionName)); log("Compacting region: " + Bytes.toStringBinary(regionName));
admin.majorCompact(regionName); admin.majorCompact(regionName);

View File

@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
@ -58,22 +59,21 @@ public class TestHRegionOnCluster {
TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS); TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
try { try {
final byte[] TABLENAME = Bytes final TableName TABLENAME = TableName.valueOf("testDataCorrectnessReplayingRecoveredEdits");
.toBytes("testDataCorrectnessReplayingRecoveredEdits");
final byte[] FAMILY = Bytes.toBytes("family"); final byte[] FAMILY = Bytes.toBytes("family");
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
HMaster master = cluster.getMaster(); HMaster master = cluster.getMaster();
// Create table // Create table
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(TABLENAME)); HTableDescriptor desc = new HTableDescriptor(TABLENAME);
desc.addFamily(new HColumnDescriptor(FAMILY)); desc.addFamily(new HColumnDescriptor(FAMILY));
HBaseAdmin hbaseAdmin = TEST_UTIL.getHBaseAdmin(); Admin hbaseAdmin = TEST_UTIL.getHBaseAdmin();
hbaseAdmin.createTable(desc); hbaseAdmin.createTable(desc);
assertTrue(hbaseAdmin.isTableAvailable(TABLENAME)); assertTrue(hbaseAdmin.isTableAvailable(TABLENAME));
// Put data: r1->v1 // Put data: r1->v1
Log.info("Loading r1 to v1 into " + Bytes.toString(TABLENAME)); Log.info("Loading r1 to v1 into " + TABLENAME);
HTable table = new HTable(TEST_UTIL.getConfiguration(), TABLENAME); HTable table = new HTable(TEST_UTIL.getConfiguration(), TABLENAME);
putDataAndVerify(table, "r1", FAMILY, "v1", 1); putDataAndVerify(table, "r1", FAMILY, "v1", 1);
@ -95,7 +95,7 @@ public class TestHRegionOnCluster {
} while (cluster.getServerWith(regionInfo.getRegionName()) == originServerNum); } while (cluster.getServerWith(regionInfo.getRegionName()) == originServerNum);
// Put data: r2->v2 // Put data: r2->v2
Log.info("Loading r2 to v2 into " + Bytes.toString(TABLENAME)); Log.info("Loading r2 to v2 into " + TABLENAME);
putDataAndVerify(table, "r2", FAMILY, "v2", 2); putDataAndVerify(table, "r2", FAMILY, "v2", 2);
TEST_UTIL.waitUntilAllRegionsAssigned(table.getName()); TEST_UTIL.waitUntilAllRegionsAssigned(table.getName());
@ -108,7 +108,7 @@ public class TestHRegionOnCluster {
} while (cluster.getServerWith(regionInfo.getRegionName()) == targetServerNum); } while (cluster.getServerWith(regionInfo.getRegionName()) == targetServerNum);
// Put data: r3->v3 // Put data: r3->v3
Log.info("Loading r3 to v3 into " + Bytes.toString(TABLENAME)); Log.info("Loading r3 to v3 into " + TABLENAME);
putDataAndVerify(table, "r3", FAMILY, "v3", 3); putDataAndVerify(table, "r3", FAMILY, "v3", 3);
// Kill target server // Kill target server
@ -125,7 +125,7 @@ public class TestHRegionOnCluster {
cluster.getRegionServerThreads().get(originServerNum).join(); cluster.getRegionServerThreads().get(originServerNum).join();
// Put data: r4->v4 // Put data: r4->v4
Log.info("Loading r4 to v4 into " + Bytes.toString(TABLENAME)); Log.info("Loading r4 to v4 into " + TABLENAME);
putDataAndVerify(table, "r4", FAMILY, "v4", 4); putDataAndVerify(table, "r4", FAMILY, "v4", 4);
} finally { } finally {

View File

@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
@ -91,7 +92,7 @@ public class TestRegionMergeTransactionOnCluster {
static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private static HMaster master; private static HMaster master;
private static HBaseAdmin admin; private static Admin admin;
static void setupOnce() throws Exception { static void setupOnce() throws Exception {
// Start a cluster // Start a cluster

View File

@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
@ -57,7 +58,7 @@ import org.junit.experimental.categories.Category;
* w.r.t. essential column family optimization * w.r.t. essential column family optimization
*/ */
public class TestSCVFWithMiniCluster { public class TestSCVFWithMiniCluster {
private static final String HBASE_TABLE_NAME = "TestSCVFWithMiniCluster"; private static final TableName HBASE_TABLE_NAME = TableName.valueOf("TestSCVFWithMiniCluster");
private static final byte[] FAMILY_A = Bytes.toBytes("a"); private static final byte[] FAMILY_A = Bytes.toBytes("a");
private static final byte[] FAMILY_B = Bytes.toBytes("b"); private static final byte[] FAMILY_B = Bytes.toBytes("b");
@ -77,7 +78,7 @@ public class TestSCVFWithMiniCluster {
util.startMiniCluster(1); util.startMiniCluster(1);
HBaseAdmin admin = util.getHBaseAdmin(); Admin admin = util.getHBaseAdmin();
destroy(admin, HBASE_TABLE_NAME); destroy(admin, HBASE_TABLE_NAME);
create(admin, HBASE_TABLE_NAME, FAMILY_A, FAMILY_B); create(admin, HBASE_TABLE_NAME, FAMILY_A, FAMILY_B);
admin.close(); admin.close();
@ -215,9 +216,9 @@ public class TestSCVFWithMiniCluster {
verify(scan); verify(scan);
} }
private static void create(HBaseAdmin admin, String tableName, byte[]... families) private static void create(Admin admin, TableName tableName, byte[]... families)
throws IOException { throws IOException {
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); HTableDescriptor desc = new HTableDescriptor(tableName);
for (byte[] family : families) { for (byte[] family : families) {
HColumnDescriptor colDesc = new HColumnDescriptor(family); HColumnDescriptor colDesc = new HColumnDescriptor(family);
colDesc.setMaxVersions(1); colDesc.setMaxVersions(1);
@ -231,7 +232,7 @@ public class TestSCVFWithMiniCluster {
} }
} }
private static void destroy(HBaseAdmin admin, String tableName) throws IOException { private static void destroy(Admin admin, TableName tableName) throws IOException {
try { try {
admin.disableTable(tableName); admin.disableTable(tableName);
admin.deleteTable(tableName); admin.deleteTable(tableName);

View File

@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.Tag;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
@ -114,7 +115,7 @@ public class TestTags {
// colDesc.setDataBlockEncoding(DataBlockEncoding.NONE); // colDesc.setDataBlockEncoding(DataBlockEncoding.NONE);
colDesc.setDataBlockEncoding(DataBlockEncoding.PREFIX_TREE); colDesc.setDataBlockEncoding(DataBlockEncoding.PREFIX_TREE);
desc.addFamily(colDesc); desc.addFamily(colDesc);
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
admin.createTable(desc); admin.createTable(desc);
byte[] value = Bytes.toBytes("value"); byte[] value = Bytes.toBytes("value");
table = new HTable(TEST_UTIL.getConfiguration(), tableName); table = new HTable(TEST_UTIL.getConfiguration(), tableName);
@ -192,7 +193,7 @@ public class TestTags {
// colDesc.setDataBlockEncoding(DataBlockEncoding.NONE); // colDesc.setDataBlockEncoding(DataBlockEncoding.NONE);
colDesc.setDataBlockEncoding(DataBlockEncoding.PREFIX_TREE); colDesc.setDataBlockEncoding(DataBlockEncoding.PREFIX_TREE);
desc.addFamily(colDesc); desc.addFamily(colDesc);
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
admin.createTable(desc); admin.createTable(desc);
table = new HTable(TEST_UTIL.getConfiguration(), tableName); table = new HTable(TEST_UTIL.getConfiguration(), tableName);
@ -295,7 +296,7 @@ public class TestTags {
colDesc.setBlockCacheEnabled(true); colDesc.setBlockCacheEnabled(true);
colDesc.setDataBlockEncoding(encoding); colDesc.setDataBlockEncoding(encoding);
desc.addFamily(colDesc); desc.addFamily(colDesc);
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
admin.createTable(desc); admin.createTable(desc);
try { try {
table = new HTable(TEST_UTIL.getConfiguration(), tableName); table = new HTable(TEST_UTIL.getConfiguration(), tableName);

View File

@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
@ -59,7 +60,7 @@ import org.junit.experimental.categories.Category;
public class TestLogRollAbort { public class TestLogRollAbort {
private static final Log LOG = LogFactory.getLog(TestLogRolling.class); private static final Log LOG = LogFactory.getLog(TestLogRolling.class);
private static MiniDFSCluster dfsCluster; private static MiniDFSCluster dfsCluster;
private static HBaseAdmin admin; private static Admin admin;
private static MiniHBaseCluster cluster; private static MiniHBaseCluster cluster;
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();

View File

@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.LargeTests; import org.apache.hadoop.hbase.LargeTests;
import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
@ -84,7 +85,7 @@ public class TestLogRolling {
private byte[] value; private byte[] value;
private FileSystem fs; private FileSystem fs;
private MiniDFSCluster dfsCluster; private MiniDFSCluster dfsCluster;
private HBaseAdmin admin; private Admin admin;
private MiniHBaseCluster cluster; private MiniHBaseCluster cluster;
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();

View File

@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.rest.client.Client; import org.apache.hadoop.hbase.rest.client.Client;
import org.apache.hadoop.hbase.rest.client.Cluster; import org.apache.hadoop.hbase.rest.client.Cluster;
@ -101,8 +102,8 @@ public class RowResourceBase {
@Before @Before
public void beforeMethod() throws Exception { public void beforeMethod() throws Exception {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(TABLE)) { if (admin.tableExists(TableName.valueOf(TABLE))) {
TEST_UTIL.deleteTable(Bytes.toBytes(TABLE)); TEST_UTIL.deleteTable(Bytes.toBytes(TABLE));
} }
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE)); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
@ -113,8 +114,8 @@ public class RowResourceBase {
@After @After
public void afterMethod() throws Exception { public void afterMethod() throws Exception {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(TABLE)) { if (admin.tableExists(TableName.valueOf(TABLE))) {
TEST_UTIL.deleteTable(Bytes.toBytes(TABLE)); TEST_UTIL.deleteTable(Bytes.toBytes(TABLE));
} }
} }

View File

@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
@ -50,7 +51,7 @@ import org.junit.experimental.categories.Category;
@Category(MediumTests.class) @Category(MediumTests.class)
public class TestGzipFilter { public class TestGzipFilter {
private static final String TABLE = "TestGzipFilter"; private static final TableName TABLE = TableName.valueOf("TestGzipFilter");
private static final String CFA = "a"; private static final String CFA = "a";
private static final String COLUMN_1 = CFA + ":1"; private static final String COLUMN_1 = CFA + ":1";
private static final String COLUMN_2 = CFA + ":2"; private static final String COLUMN_2 = CFA + ":2";
@ -68,11 +69,11 @@ public class TestGzipFilter {
REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration());
client = new Client(new Cluster().add("localhost", client = new Client(new Cluster().add("localhost",
REST_TEST_UTIL.getServletPort())); REST_TEST_UTIL.getServletPort()));
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(TABLE)) { if (admin.tableExists(TABLE)) {
return; return;
} }
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE)); HTableDescriptor htd = new HTableDescriptor(TABLE);
htd.addFamily(new HColumnDescriptor(CFA)); htd.addFamily(new HColumnDescriptor(CFA));
admin.createTable(htd); admin.createTable(htd);
} }

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.rest;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.rest.client.Client; import org.apache.hadoop.hbase.rest.client.Client;
import org.apache.hadoop.hbase.rest.client.Cluster; import org.apache.hadoop.hbase.rest.client.Cluster;
@ -49,7 +50,7 @@ import static org.junit.Assert.assertEquals;
@Category(MediumTests.class) @Category(MediumTests.class)
public class TestMultiRowResource { public class TestMultiRowResource {
private static final String TABLE = "TestRowResource"; private static final TableName TABLE = TableName.valueOf("TestRowResource");
private static final String CFA = "a"; private static final String CFA = "a";
private static final String CFB = "b"; private static final String CFB = "b";
private static final String COLUMN_1 = CFA + ":1"; private static final String COLUMN_1 = CFA + ":1";
@ -82,11 +83,11 @@ public class TestMultiRowResource {
marshaller = context.createMarshaller(); marshaller = context.createMarshaller();
unmarshaller = context.createUnmarshaller(); unmarshaller = context.createUnmarshaller();
client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort()));
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(TABLE)) { if (admin.tableExists(TABLE)) {
return; return;
} }
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE)); HTableDescriptor htd = new HTableDescriptor(TABLE);
htd.addFamily(new HColumnDescriptor(CFA)); htd.addFamily(new HColumnDescriptor(CFA));
htd.addFamily(new HColumnDescriptor(CFB)); htd.addFamily(new HColumnDescriptor(CFB));
admin.createTable(htd); admin.createTable(htd);

View File

@ -33,6 +33,7 @@ import javax.xml.bind.Unmarshaller;
import org.apache.commons.httpclient.Header; import org.apache.commons.httpclient.Header;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
@ -55,7 +56,7 @@ import org.junit.experimental.categories.Category;
@Category(MediumTests.class) @Category(MediumTests.class)
public class TestScannerResource { public class TestScannerResource {
private static final String TABLE = "TestScannerResource"; private static final TableName TABLE = TableName.valueOf("TestScannerResource");
private static final String NONEXISTENT_TABLE = "ThisTableDoesNotExist"; private static final String NONEXISTENT_TABLE = "ThisTableDoesNotExist";
private static final String CFA = "a"; private static final String CFA = "a";
private static final String CFB = "b"; private static final String CFB = "b";
@ -73,7 +74,7 @@ public class TestScannerResource {
private static int expectedRows2; private static int expectedRows2;
private static Configuration conf; private static Configuration conf;
static int insertData(Configuration conf, String tableName, String column, double prob) static int insertData(Configuration conf, TableName tableName, String column, double prob)
throws IOException { throws IOException {
Random rng = new Random(); Random rng = new Random();
int count = 0; int count = 0;
@ -163,11 +164,11 @@ public class TestScannerResource {
ScannerModel.class); ScannerModel.class);
marshaller = context.createMarshaller(); marshaller = context.createMarshaller();
unmarshaller = context.createUnmarshaller(); unmarshaller = context.createUnmarshaller();
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(TABLE)) { if (admin.tableExists(TABLE)) {
return; return;
} }
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE)); HTableDescriptor htd = new HTableDescriptor(TABLE);
htd.addFamily(new HColumnDescriptor(CFA)); htd.addFamily(new HColumnDescriptor(CFA));
htd.addFamily(new HColumnDescriptor(CFB)); htd.addFamily(new HColumnDescriptor(CFB));
admin.createTable(htd); admin.createTable(htd);

View File

@ -33,6 +33,7 @@ import javax.xml.bind.Unmarshaller;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
@ -75,7 +76,7 @@ public class TestScannersWithFilters {
private static final Log LOG = LogFactory.getLog(TestScannersWithFilters.class); private static final Log LOG = LogFactory.getLog(TestScannersWithFilters.class);
private static final String TABLE = "TestScannersWithFilters"; private static final TableName TABLE = TableName.valueOf("TestScannersWithFilters");
private static final byte [][] ROWS_ONE = { private static final byte [][] ROWS_ONE = {
Bytes.toBytes("testRowOne-0"), Bytes.toBytes("testRowOne-1"), Bytes.toBytes("testRowOne-0"), Bytes.toBytes("testRowOne-1"),
@ -128,9 +129,9 @@ public class TestScannersWithFilters {
unmarshaller = context.createUnmarshaller(); unmarshaller = context.createUnmarshaller();
client = new Client(new Cluster().add("localhost", client = new Client(new Cluster().add("localhost",
REST_TEST_UTIL.getServletPort())); REST_TEST_UTIL.getServletPort()));
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
if (!admin.tableExists(TABLE)) { if (!admin.tableExists(TABLE)) {
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE)); HTableDescriptor htd = new HTableDescriptor(TABLE);
htd.addFamily(new HColumnDescriptor(FAMILIES[0])); htd.addFamily(new HColumnDescriptor(FAMILIES[0]));
htd.addFamily(new HColumnDescriptor(FAMILIES[1])); htd.addFamily(new HColumnDescriptor(FAMILIES[1]));
admin.createTable(htd); admin.createTable(htd);

View File

@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
@ -66,7 +67,7 @@ import org.junit.experimental.categories.Category;
@Category(MediumTests.class) @Category(MediumTests.class)
public class TestScannersWithLabels { public class TestScannersWithLabels {
private static final String TABLE = "TestScannersWithLabels"; private static final TableName TABLE = TableName.valueOf("TestScannersWithLabels");
private static final String CFA = "a"; private static final String CFA = "a";
private static final String CFB = "b"; private static final String CFB = "b";
private static final String COLUMN_1 = CFA + ":1"; private static final String COLUMN_1 = CFA + ":1";
@ -86,7 +87,7 @@ public class TestScannersWithLabels {
private static Unmarshaller unmarshaller; private static Unmarshaller unmarshaller;
private static Configuration conf; private static Configuration conf;
private static int insertData(String tableName, String column, double prob) throws IOException { private static int insertData(TableName tableName, String column, double prob) throws IOException {
Random rng = new Random(); Random rng = new Random();
int count = 0; int count = 0;
HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName); HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName);
@ -142,11 +143,11 @@ public class TestScannersWithLabels {
ScannerModel.class); ScannerModel.class);
marshaller = context.createMarshaller(); marshaller = context.createMarshaller();
unmarshaller = context.createUnmarshaller(); unmarshaller = context.createUnmarshaller();
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(TABLE)) { if (admin.tableExists(TABLE)) {
return; return;
} }
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE)); HTableDescriptor htd = new HTableDescriptor(TABLE);
htd.addFamily(new HColumnDescriptor(CFA)); htd.addFamily(new HColumnDescriptor(CFA));
htd.addFamily(new HColumnDescriptor(CFB)); htd.addFamily(new HColumnDescriptor(CFB));
admin.createTable(htd); admin.createTable(htd);

View File

@ -29,6 +29,8 @@ import javax.xml.bind.JAXBException;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.rest.client.Client; import org.apache.hadoop.hbase.rest.client.Client;
import org.apache.hadoop.hbase.rest.client.Cluster; import org.apache.hadoop.hbase.rest.client.Cluster;
@ -95,8 +97,8 @@ public class TestSchemaResource {
TableSchemaModel model; TableSchemaModel model;
Response response; Response response;
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
assertFalse(admin.tableExists(TABLE1)); assertFalse(admin.tableExists(TableName.valueOf(TABLE1)));
// create the table // create the table
model = testTableSchemaModel.buildTestModel(TABLE1); model = testTableSchemaModel.buildTestModel(TABLE1);
@ -133,7 +135,7 @@ public class TestSchemaResource {
// delete the table and make sure HBase concurs // delete the table and make sure HBase concurs
response = client.delete(schemaPath); response = client.delete(schemaPath);
assertEquals(response.getCode(), 200); assertEquals(response.getCode(), 200);
assertFalse(admin.tableExists(TABLE1)); assertFalse(admin.tableExists(TableName.valueOf(TABLE1)));
} }
@Test @Test
@ -142,8 +144,8 @@ public class TestSchemaResource {
TableSchemaModel model; TableSchemaModel model;
Response response; Response response;
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
assertFalse(admin.tableExists(TABLE2)); assertFalse(admin.tableExists(TableName.valueOf(TABLE2)));
// create the table // create the table
model = testTableSchemaModel.buildTestModel(TABLE2); model = testTableSchemaModel.buildTestModel(TABLE2);
@ -184,7 +186,7 @@ public class TestSchemaResource {
// delete the table and make sure HBase concurs // delete the table and make sure HBase concurs
response = client.delete(schemaPath); response = client.delete(schemaPath);
assertEquals(response.getCode(), 200); assertEquals(response.getCode(), 200);
assertFalse(admin.tableExists(TABLE2)); assertFalse(admin.tableExists(TableName.valueOf(TABLE2)));
} }
} }

View File

@ -31,6 +31,7 @@ import javax.xml.bind.JAXBException;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
@ -56,7 +57,7 @@ import org.junit.experimental.categories.Category;
public class TestTableResource { public class TestTableResource {
private static final Log LOG = LogFactory.getLog(TestTableResource.class); private static final Log LOG = LogFactory.getLog(TestTableResource.class);
private static String TABLE = "TestTableResource"; private static TableName TABLE = TableName.valueOf("TestTableResource");
private static String COLUMN_FAMILY = "test"; private static String COLUMN_FAMILY = "test";
private static String COLUMN = COLUMN_FAMILY + ":qualifier"; private static String COLUMN = COLUMN_FAMILY + ":qualifier";
private static Map<HRegionInfo, ServerName> regionMap; private static Map<HRegionInfo, ServerName> regionMap;
@ -78,11 +79,11 @@ public class TestTableResource {
TableInfoModel.class, TableInfoModel.class,
TableListModel.class, TableListModel.class,
TableRegionModel.class); TableRegionModel.class);
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(TABLE)) { if (admin.tableExists(TABLE)) {
return; return;
} }
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE)); HTableDescriptor htd = new HTableDescriptor(TABLE);
htd.addFamily(new HColumnDescriptor(COLUMN_FAMILY)); htd.addFamily(new HColumnDescriptor(COLUMN_FAMILY));
admin.createTable(htd); admin.createTable(htd);
HTable table = new HTable(TEST_UTIL.getConfiguration(), TABLE); HTable table = new HTable(TEST_UTIL.getConfiguration(), TABLE);
@ -106,7 +107,7 @@ public class TestTableResource {
Map<HRegionInfo, ServerName> m = table.getRegionLocations(); Map<HRegionInfo, ServerName> m = table.getRegionLocations();
assertEquals(m.size(), 1); assertEquals(m.size(), 1);
// tell the master to split the table // tell the master to split the table
admin.split(TABLE); admin.split(TABLE.toBytes());
// give some time for the split to happen // give some time for the split to happen
long timeout = System.currentTimeMillis() + (15 * 1000); long timeout = System.currentTimeMillis() + (15 * 1000);
@ -139,7 +140,7 @@ public class TestTableResource {
assertTrue(tables.hasNext()); assertTrue(tables.hasNext());
while (tables.hasNext()) { while (tables.hasNext()) {
TableModel table = tables.next(); TableModel table = tables.next();
if (table.getName().equals(TABLE)) { if (table.getName().equals(TABLE.getNameAsString())) {
found = true; found = true;
break; break;
} }
@ -148,7 +149,7 @@ public class TestTableResource {
} }
void checkTableInfo(TableInfoModel model) { void checkTableInfo(TableInfoModel model) {
assertEquals(model.getName(), TABLE); assertEquals(model.getName(), TABLE.getNameAsString());
Iterator<TableRegionModel> regions = model.getRegions().iterator(); Iterator<TableRegionModel> regions = model.getRegions().iterator();
assertTrue(regions.hasNext()); assertTrue(regions.hasNext());
while (regions.hasNext()) { while (regions.hasNext()) {

View File

@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.ParseFilter; import org.apache.hadoop.hbase.filter.ParseFilter;
@ -74,7 +75,7 @@ import org.xml.sax.XMLReader;
@Category(MediumTests.class) @Category(MediumTests.class)
public class TestTableScan { public class TestTableScan {
private static final String TABLE = "TestScanResource"; private static final TableName TABLE = TableName.valueOf("TestScanResource");
private static final String CFA = "a"; private static final String CFA = "a";
private static final String CFB = "b"; private static final String CFB = "b";
private static final String COLUMN_1 = CFA + ":1"; private static final String COLUMN_1 = CFA + ":1";
@ -96,9 +97,9 @@ public class TestTableScan {
REST_TEST_UTIL.startServletContainer(conf); REST_TEST_UTIL.startServletContainer(conf);
client = new Client(new Cluster().add("localhost", client = new Client(new Cluster().add("localhost",
REST_TEST_UTIL.getServletPort())); REST_TEST_UTIL.getServletPort()));
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
if (!admin.tableExists(TABLE)) { if (!admin.tableExists(TABLE)) {
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE)); HTableDescriptor htd = new HTableDescriptor(TABLE);
htd.addFamily(new HColumnDescriptor(CFA)); htd.addFamily(new HColumnDescriptor(CFA));
htd.addFamily(new HColumnDescriptor(CFB)); htd.addFamily(new HColumnDescriptor(CFB));
admin.createTable(htd); admin.createTable(htd);

View File

@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
@ -58,7 +59,7 @@ import org.junit.experimental.categories.Category;
@Category(MediumTests.class) @Category(MediumTests.class)
public class TestRemoteTable { public class TestRemoteTable {
private static final String TABLE = "TestRemoteTable"; private static final TableName TABLE = TableName.valueOf("TestRemoteTable");
private static final byte[] ROW_1 = Bytes.toBytes("testrow1"); private static final byte[] ROW_1 = Bytes.toBytes("testrow1");
private static final byte[] ROW_2 = Bytes.toBytes("testrow2"); private static final byte[] ROW_2 = Bytes.toBytes("testrow2");
private static final byte[] ROW_3 = Bytes.toBytes("testrow3"); private static final byte[] ROW_3 = Bytes.toBytes("testrow3");
@ -88,12 +89,12 @@ public class TestRemoteTable {
@Before @Before
public void before() throws Exception { public void before() throws Exception {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(TABLE)) { if (admin.tableExists(TABLE)) {
if (admin.isTableEnabled(TABLE)) admin.disableTable(TABLE); if (admin.isTableEnabled(TABLE)) admin.disableTable(TABLE);
admin.deleteTable(TABLE); admin.deleteTable(TABLE);
} }
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE)); HTableDescriptor htd = new HTableDescriptor(TABLE);
htd.addFamily(new HColumnDescriptor(COLUMN_1).setMaxVersions(3)); htd.addFamily(new HColumnDescriptor(COLUMN_1).setMaxVersions(3));
htd.addFamily(new HColumnDescriptor(COLUMN_2).setMaxVersions(3)); htd.addFamily(new HColumnDescriptor(COLUMN_2).setMaxVersions(3));
htd.addFamily(new HColumnDescriptor(COLUMN_3).setMaxVersions(3)); htd.addFamily(new HColumnDescriptor(COLUMN_3).setMaxVersions(3));
@ -116,7 +117,7 @@ public class TestRemoteTable {
remoteTable = new RemoteHTable( remoteTable = new RemoteHTable(
new Client(new Cluster().add("localhost", new Client(new Cluster().add("localhost",
REST_TEST_UTIL.getServletPort())), REST_TEST_UTIL.getServletPort())),
TEST_UTIL.getConfiguration(), TABLE); TEST_UTIL.getConfiguration(), TABLE.toBytes());
} }
@After @After

View File

@ -49,10 +49,13 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.Tag;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
@ -205,7 +208,7 @@ public class TestAccessController extends SecureTestUtil {
@Before @Before
public void setUp() throws Exception { public void setUp() throws Exception {
// Create the test table (owner added to the _acl_ table) // Create the test table (owner added to the _acl_ table)
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
HTableDescriptor htd = new HTableDescriptor(TEST_TABLE.getTableName()); HTableDescriptor htd = new HTableDescriptor(TEST_TABLE.getTableName());
HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY); HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY);
hcd.setMaxVersions(100); hcd.setMaxVersions(100);
@ -911,7 +914,7 @@ public class TestAccessController extends SecureTestUtil {
HTable table = new HTable(conf, tableName); HTable table = new HTable(conf, tableName);
try { try {
HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration()); Admin admin = TEST_UTIL.getHBaseAdmin();
TEST_UTIL.waitTableEnabled(admin, tableName.getName()); TEST_UTIL.waitTableEnabled(admin, tableName.getName());
LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf); LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
loader.doBulkLoad(loadPath, table); loader.doBulkLoad(loadPath, table);
@ -1031,7 +1034,7 @@ public class TestAccessController extends SecureTestUtil {
final byte[] qualifier = Bytes.toBytes("q"); final byte[] qualifier = Bytes.toBytes("q");
// create table // create table
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(tableName)) { if (admin.tableExists(tableName)) {
admin.disableTable(tableName); admin.disableTable(tableName);
admin.deleteTable(tableName); admin.deleteTable(tableName);
@ -1305,7 +1308,7 @@ public class TestAccessController extends SecureTestUtil {
final byte[] qualifier = Bytes.toBytes("q"); final byte[] qualifier = Bytes.toBytes("q");
// create table // create table
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(tableName)) { if (admin.tableExists(tableName)) {
admin.disableTable(tableName); admin.disableTable(tableName);
admin.deleteTable(tableName); admin.deleteTable(tableName);
@ -1419,7 +1422,7 @@ public class TestAccessController extends SecureTestUtil {
final byte[] qualifier = Bytes.toBytes("q"); final byte[] qualifier = Bytes.toBytes("q");
// create table // create table
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(tableName)) { if (admin.tableExists(tableName)) {
admin.disableTable(tableName); admin.disableTable(tableName);
admin.deleteTable(tableName); admin.deleteTable(tableName);
@ -1884,7 +1887,7 @@ public class TestAccessController extends SecureTestUtil {
Permission.Action.ADMIN, Permission.Action.CREATE, Permission.Action.READ, Permission.Action.ADMIN, Permission.Action.CREATE, Permission.Action.READ,
Permission.Action.WRITE); Permission.Action.WRITE);
final HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); final Admin admin = TEST_UTIL.getHBaseAdmin();
HTableDescriptor htd = new HTableDescriptor(TEST_TABLE2); HTableDescriptor htd = new HTableDescriptor(TEST_TABLE2);
htd.addFamily(new HColumnDescriptor(TEST_FAMILY)); htd.addFamily(new HColumnDescriptor(TEST_FAMILY));
admin.createTable(htd); admin.createTable(htd);
@ -1955,7 +1958,7 @@ public class TestAccessController extends SecureTestUtil {
AccessTestAction listTablesAction = new AccessTestAction() { AccessTestAction listTablesAction = new AccessTestAction() {
@Override @Override
public Object run() throws Exception { public Object run() throws Exception {
HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration()); Admin admin = TEST_UTIL.getHBaseAdmin();
try { try {
admin.listTables(); admin.listTables();
} finally { } finally {
@ -1968,7 +1971,7 @@ public class TestAccessController extends SecureTestUtil {
AccessTestAction getTableDescAction = new AccessTestAction() { AccessTestAction getTableDescAction = new AccessTestAction() {
@Override @Override
public Object run() throws Exception { public Object run() throws Exception {
HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration()); Admin admin = TEST_UTIL.getHBaseAdmin();
try { try {
admin.getTableDescriptor(TEST_TABLE.getTableName()); admin.getTableDescriptor(TEST_TABLE.getTableName());
} finally { } finally {
@ -1997,12 +2000,14 @@ public class TestAccessController extends SecureTestUtil {
AccessTestAction deleteTableAction = new AccessTestAction() { AccessTestAction deleteTableAction = new AccessTestAction() {
@Override @Override
public Object run() throws Exception { public Object run() throws Exception {
HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration()); HConnection unmanagedConnection = HConnectionManager.createConnection(TEST_UTIL.getConfiguration());
Admin admin = unmanagedConnection.getAdmin();
try { try {
admin.disableTable(TEST_TABLE.getTableName()); admin.disableTable(TEST_TABLE.getTableName());
admin.deleteTable(TEST_TABLE.getTableName()); admin.deleteTable(TEST_TABLE.getTableName());
} finally { } finally {
admin.close(); admin.close();
unmanagedConnection.close();
} }
return null; return null;
} }

View File

@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
@ -123,7 +124,7 @@ public class TestCellACLWithMultipleVersions extends SecureTestUtil {
@Before @Before
public void setUp() throws Exception { public void setUp() throws Exception {
// Create the test table (owner added to the _acl_ table) // Create the test table (owner added to the _acl_ table)
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
HTableDescriptor htd = new HTableDescriptor(TEST_TABLE.getTableName()); HTableDescriptor htd = new HTableDescriptor(TEST_TABLE.getTableName());
HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY1); HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY1);
hcd.setMaxVersions(4); hcd.setMaxVersions(4);

View File

@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
@ -124,7 +125,7 @@ public class TestCellACLs extends SecureTestUtil {
@Before @Before
public void setUp() throws Exception { public void setUp() throws Exception {
// Create the test table (owner added to the _acl_ table) // Create the test table (owner added to the _acl_ table)
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
HTableDescriptor htd = new HTableDescriptor(TEST_TABLE.getTableName()); HTableDescriptor htd = new HTableDescriptor(TEST_TABLE.getTableName());
HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY); HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY);
hcd.setMaxVersions(4); hcd.setMaxVersions(4);

View File

@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
@ -111,7 +112,7 @@ public class TestScanEarlyTermination extends SecureTestUtil {
@Before @Before
public void setUp() throws Exception { public void setUp() throws Exception {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
HTableDescriptor htd = new HTableDescriptor(TEST_TABLE.getTableName()); HTableDescriptor htd = new HTableDescriptor(TEST_TABLE.getTableName());
htd.setOwner(USER_OWNER); htd.setOwner(USER_OWNER);
HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY1); HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY1);

View File

@ -39,6 +39,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.LargeTests; import org.apache.hadoop.hbase.LargeTests;
@ -292,7 +293,7 @@ public class TestTablePermissions {
.add(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes("v1"))); .add(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes("v1")));
table.put(new Put(Bytes.toBytes("row2")) table.put(new Put(Bytes.toBytes("row2"))
.add(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes("v2"))); .add(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes("v2")));
HBaseAdmin admin = UTIL.getHBaseAdmin(); Admin admin = UTIL.getHBaseAdmin();
admin.split(TEST_TABLE.getName()); admin.split(TEST_TABLE.getName());
// wait for split // wait for split

View File

@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
@ -725,7 +726,7 @@ public class TestVisibilityLabels {
@Test @Test
public void testUserShouldNotDoDDLOpOnLabelsTable() throws Exception { public void testUserShouldNotDoDDLOpOnLabelsTable() throws Exception {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
try { try {
admin.disableTable(LABELS_TABLE_NAME); admin.disableTable(LABELS_TABLE_NAME);
fail("Lables table should not get disabled by user."); fail("Lables table should not get disabled by user.");

View File

@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
@ -518,7 +519,7 @@ public class TestVisibilityLabelsWithDeletes {
TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
HTable table = null; HTable table = null;
try { try {
HBaseAdmin hBaseAdmin = TEST_UTIL.getHBaseAdmin(); Admin hBaseAdmin = TEST_UTIL.getHBaseAdmin();
HColumnDescriptor colDesc = new HColumnDescriptor(fam); HColumnDescriptor colDesc = new HColumnDescriptor(fam);
colDesc.setMaxVersions(5); colDesc.setMaxVersions(5);
HTableDescriptor desc = new HTableDescriptor(tableName); HTableDescriptor desc = new HTableDescriptor(tableName);
@ -600,7 +601,7 @@ public class TestVisibilityLabelsWithDeletes {
TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
HTable table = null; HTable table = null;
try { try {
HBaseAdmin hBaseAdmin = TEST_UTIL.getHBaseAdmin(); Admin hBaseAdmin = TEST_UTIL.getHBaseAdmin();
HColumnDescriptor colDesc = new HColumnDescriptor(fam); HColumnDescriptor colDesc = new HColumnDescriptor(fam);
colDesc.setMaxVersions(5); colDesc.setMaxVersions(5);
HTableDescriptor desc = new HTableDescriptor(tableName); HTableDescriptor desc = new HTableDescriptor(tableName);
@ -682,7 +683,7 @@ public class TestVisibilityLabelsWithDeletes {
TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
HTable table = null; HTable table = null;
try { try {
HBaseAdmin hBaseAdmin = TEST_UTIL.getHBaseAdmin(); Admin hBaseAdmin = TEST_UTIL.getHBaseAdmin();
HColumnDescriptor colDesc = new HColumnDescriptor(fam); HColumnDescriptor colDesc = new HColumnDescriptor(fam);
colDesc.setMaxVersions(5); colDesc.setMaxVersions(5);
HTableDescriptor desc = new HTableDescriptor(tableName); HTableDescriptor desc = new HTableDescriptor(tableName);
@ -738,7 +739,7 @@ public class TestVisibilityLabelsWithDeletes {
TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
HTable table = null; HTable table = null;
try { try {
HBaseAdmin hBaseAdmin = TEST_UTIL.getHBaseAdmin(); Admin hBaseAdmin = TEST_UTIL.getHBaseAdmin();
HColumnDescriptor colDesc = new HColumnDescriptor(fam); HColumnDescriptor colDesc = new HColumnDescriptor(fam);
colDesc.setMaxVersions(5); colDesc.setMaxVersions(5);
HTableDescriptor desc = new HTableDescriptor(tableName); HTableDescriptor desc = new HTableDescriptor(tableName);
@ -927,7 +928,7 @@ public class TestVisibilityLabelsWithDeletes {
private HTable doPuts(TableName tableName) throws IOException, InterruptedIOException, private HTable doPuts(TableName tableName) throws IOException, InterruptedIOException,
RetriesExhaustedWithDetailsException, InterruptedException { RetriesExhaustedWithDetailsException, InterruptedException {
HTable table; HTable table;
HBaseAdmin hBaseAdmin = TEST_UTIL.getHBaseAdmin(); Admin hBaseAdmin = TEST_UTIL.getHBaseAdmin();
HColumnDescriptor colDesc = new HColumnDescriptor(fam); HColumnDescriptor colDesc = new HColumnDescriptor(fam);
colDesc.setMaxVersions(5); colDesc.setMaxVersions(5);
HTableDescriptor desc = new HTableDescriptor(tableName); HTableDescriptor desc = new HTableDescriptor(tableName);
@ -969,7 +970,7 @@ public class TestVisibilityLabelsWithDeletes {
private HTable doPutsWithDiffCols(TableName tableName) throws IOException, private HTable doPutsWithDiffCols(TableName tableName) throws IOException,
InterruptedIOException, RetriesExhaustedWithDetailsException, InterruptedException { InterruptedIOException, RetriesExhaustedWithDetailsException, InterruptedException {
HTable table; HTable table;
HBaseAdmin hBaseAdmin = TEST_UTIL.getHBaseAdmin(); Admin hBaseAdmin = TEST_UTIL.getHBaseAdmin();
HColumnDescriptor colDesc = new HColumnDescriptor(fam); HColumnDescriptor colDesc = new HColumnDescriptor(fam);
colDesc.setMaxVersions(5); colDesc.setMaxVersions(5);
HTableDescriptor desc = new HTableDescriptor(tableName); HTableDescriptor desc = new HTableDescriptor(tableName);
@ -1004,7 +1005,7 @@ public class TestVisibilityLabelsWithDeletes {
private HTable doPutsWithoutVisibility(TableName tableName) throws IOException, private HTable doPutsWithoutVisibility(TableName tableName) throws IOException,
InterruptedIOException, RetriesExhaustedWithDetailsException, InterruptedException { InterruptedIOException, RetriesExhaustedWithDetailsException, InterruptedException {
HTable table; HTable table;
HBaseAdmin hBaseAdmin = TEST_UTIL.getHBaseAdmin(); Admin hBaseAdmin = TEST_UTIL.getHBaseAdmin();
HColumnDescriptor colDesc = new HColumnDescriptor(fam); HColumnDescriptor colDesc = new HColumnDescriptor(fam);
colDesc.setMaxVersions(5); colDesc.setMaxVersions(5);
HTableDescriptor desc = new HTableDescriptor(tableName); HTableDescriptor desc = new HTableDescriptor(tableName);
@ -1454,7 +1455,7 @@ public class TestVisibilityLabelsWithDeletes {
TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
HTable table = null; HTable table = null;
try { try {
HBaseAdmin hBaseAdmin = TEST_UTIL.getHBaseAdmin(); Admin hBaseAdmin = TEST_UTIL.getHBaseAdmin();
HColumnDescriptor colDesc = new HColumnDescriptor(fam); HColumnDescriptor colDesc = new HColumnDescriptor(fam);
colDesc.setMaxVersions(5); colDesc.setMaxVersions(5);
HTableDescriptor desc = new HTableDescriptor(tableName); HTableDescriptor desc = new HTableDescriptor(tableName);
@ -1507,7 +1508,7 @@ public class TestVisibilityLabelsWithDeletes {
TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
HTable table = null; HTable table = null;
try { try {
HBaseAdmin hBaseAdmin = TEST_UTIL.getHBaseAdmin(); Admin hBaseAdmin = TEST_UTIL.getHBaseAdmin();
HColumnDescriptor colDesc = new HColumnDescriptor(fam); HColumnDescriptor colDesc = new HColumnDescriptor(fam);
colDesc.setMaxVersions(5); colDesc.setMaxVersions(5);
HTableDescriptor desc = new HTableDescriptor(tableName); HTableDescriptor desc = new HTableDescriptor(tableName);
@ -2916,7 +2917,7 @@ public class TestVisibilityLabelsWithDeletes {
TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
HTable table = null; HTable table = null;
try { try {
HBaseAdmin hBaseAdmin = TEST_UTIL.getHBaseAdmin(); Admin hBaseAdmin = TEST_UTIL.getHBaseAdmin();
HColumnDescriptor colDesc = new HColumnDescriptor(fam); HColumnDescriptor colDesc = new HColumnDescriptor(fam);
colDesc.setMaxVersions(5); colDesc.setMaxVersions(5);
HTableDescriptor desc = new HTableDescriptor(tableName); HTableDescriptor desc = new HTableDescriptor(tableName);

View File

@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotEnabledException; import org.apache.hadoop.hbase.TableNotEnabledException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
@ -84,7 +85,7 @@ public class SnapshotTestingUtils {
* @throws IOException * @throws IOException
* if the admin operation fails * if the admin operation fails
*/ */
public static void assertNoSnapshots(HBaseAdmin admin) throws IOException { public static void assertNoSnapshots(Admin admin) throws IOException {
assertEquals("Have some previous snapshots", 0, admin.listSnapshots() assertEquals("Have some previous snapshots", 0, admin.listSnapshots()
.size()); .size());
} }
@ -94,7 +95,7 @@ public class SnapshotTestingUtils {
* name and table match the passed in parameters. * name and table match the passed in parameters.
*/ */
public static List<SnapshotDescription> assertExistsMatchingSnapshot( public static List<SnapshotDescription> assertExistsMatchingSnapshot(
HBaseAdmin admin, String snapshotName, TableName tableName) Admin admin, String snapshotName, TableName tableName)
throws IOException { throws IOException {
// list the snapshot // list the snapshot
List<SnapshotDescription> snapshots = admin.listSnapshots(); List<SnapshotDescription> snapshots = admin.listSnapshots();
@ -114,7 +115,7 @@ public class SnapshotTestingUtils {
/** /**
* Make sure that there is only one snapshot returned from the master * Make sure that there is only one snapshot returned from the master
*/ */
public static void assertOneSnapshotThatMatches(HBaseAdmin admin, public static void assertOneSnapshotThatMatches(Admin admin,
SnapshotDescription snapshot) throws IOException { SnapshotDescription snapshot) throws IOException {
assertOneSnapshotThatMatches(admin, snapshot.getName(), assertOneSnapshotThatMatches(admin, snapshot.getName(),
TableName.valueOf(snapshot.getTable())); TableName.valueOf(snapshot.getTable()));
@ -125,7 +126,7 @@ public class SnapshotTestingUtils {
* name and table match the passed in parameters. * name and table match the passed in parameters.
*/ */
public static List<SnapshotDescription> assertOneSnapshotThatMatches( public static List<SnapshotDescription> assertOneSnapshotThatMatches(
HBaseAdmin admin, String snapshotName, TableName tableName) Admin admin, String snapshotName, TableName tableName)
throws IOException { throws IOException {
// list the snapshot // list the snapshot
List<SnapshotDescription> snapshots = admin.listSnapshots(); List<SnapshotDescription> snapshots = admin.listSnapshots();
@ -142,7 +143,7 @@ public class SnapshotTestingUtils {
* name and table match the passed in parameters. * name and table match the passed in parameters.
*/ */
public static List<SnapshotDescription> assertOneSnapshotThatMatches( public static List<SnapshotDescription> assertOneSnapshotThatMatches(
HBaseAdmin admin, byte[] snapshot, TableName tableName) throws IOException { Admin admin, byte[] snapshot, TableName tableName) throws IOException {
return assertOneSnapshotThatMatches(admin, Bytes.toString(snapshot), return assertOneSnapshotThatMatches(admin, Bytes.toString(snapshot),
tableName); tableName);
} }
@ -153,7 +154,7 @@ public class SnapshotTestingUtils {
*/ */
public static void confirmSnapshotValid( public static void confirmSnapshotValid(
SnapshotDescription snapshotDescriptor, TableName tableName, SnapshotDescription snapshotDescriptor, TableName tableName,
byte[] testFamily, Path rootDir, HBaseAdmin admin, FileSystem fs) byte[] testFamily, Path rootDir, Admin admin, FileSystem fs)
throws IOException { throws IOException {
ArrayList nonEmptyTestFamilies = new ArrayList(1); ArrayList nonEmptyTestFamilies = new ArrayList(1);
nonEmptyTestFamilies.add(testFamily); nonEmptyTestFamilies.add(testFamily);
@ -166,7 +167,7 @@ public class SnapshotTestingUtils {
*/ */
public static void confirmEmptySnapshotValid( public static void confirmEmptySnapshotValid(
SnapshotDescription snapshotDescriptor, TableName tableName, SnapshotDescription snapshotDescriptor, TableName tableName,
byte[] testFamily, Path rootDir, HBaseAdmin admin, FileSystem fs) byte[] testFamily, Path rootDir, Admin admin, FileSystem fs)
throws IOException { throws IOException {
ArrayList emptyTestFamilies = new ArrayList(1); ArrayList emptyTestFamilies = new ArrayList(1);
emptyTestFamilies.add(testFamily); emptyTestFamilies.add(testFamily);
@ -183,7 +184,7 @@ public class SnapshotTestingUtils {
public static void confirmSnapshotValid( public static void confirmSnapshotValid(
SnapshotDescription snapshotDescriptor, TableName tableName, SnapshotDescription snapshotDescriptor, TableName tableName,
List<byte[]> nonEmptyTestFamilies, List<byte[]> emptyTestFamilies, List<byte[]> nonEmptyTestFamilies, List<byte[]> emptyTestFamilies,
Path rootDir, HBaseAdmin admin, FileSystem fs) throws IOException { Path rootDir, Admin admin, FileSystem fs) throws IOException {
final Configuration conf = admin.getConfiguration(); final Configuration conf = admin.getConfiguration();
// check snapshot dir // check snapshot dir
@ -265,14 +266,14 @@ public class SnapshotTestingUtils {
* Take snapshot with maximum of numTries attempts, ignoring CorruptedSnapshotException * Take snapshot with maximum of numTries attempts, ignoring CorruptedSnapshotException
* except for the last CorruptedSnapshotException * except for the last CorruptedSnapshotException
*/ */
public static void snapshot(HBaseAdmin admin, public static void snapshot(Admin admin,
final String snapshotName, final String tableName, final String snapshotName, final String tableName,
SnapshotDescription.Type type, int numTries) throws IOException { SnapshotDescription.Type type, int numTries) throws IOException {
int tries = 0; int tries = 0;
CorruptedSnapshotException lastEx = null; CorruptedSnapshotException lastEx = null;
while (tries++ < numTries) { while (tries++ < numTries) {
try { try {
admin.snapshot(snapshotName, tableName, type); admin.snapshot(snapshotName, TableName.valueOf(tableName), type);
return; return;
} catch (CorruptedSnapshotException cse) { } catch (CorruptedSnapshotException cse) {
LOG.warn("Got CorruptedSnapshotException", cse); LOG.warn("Got CorruptedSnapshotException", cse);
@ -282,12 +283,12 @@ public class SnapshotTestingUtils {
throw lastEx; throw lastEx;
} }
public static void cleanupSnapshot(HBaseAdmin admin, byte[] tableName) public static void cleanupSnapshot(Admin admin, byte[] tableName)
throws IOException { throws IOException {
SnapshotTestingUtils.cleanupSnapshot(admin, Bytes.toString(tableName)); SnapshotTestingUtils.cleanupSnapshot(admin, Bytes.toString(tableName));
} }
public static void cleanupSnapshot(HBaseAdmin admin, String snapshotName) public static void cleanupSnapshot(Admin admin, String snapshotName)
throws IOException { throws IOException {
// delete the taken snapshot // delete the taken snapshot
admin.deleteSnapshot(snapshotName); admin.deleteSnapshot(snapshotName);
@ -356,7 +357,7 @@ public class SnapshotTestingUtils {
* not empty. Note that this will leave the table disabled * not empty. Note that this will leave the table disabled
* in the case of an offline snapshot. * in the case of an offline snapshot.
*/ */
public static void createSnapshotAndValidate(HBaseAdmin admin, public static void createSnapshotAndValidate(Admin admin,
TableName tableName, String familyName, String snapshotNameString, TableName tableName, String familyName, String snapshotNameString,
Path rootDir, FileSystem fs, boolean onlineSnapshot) Path rootDir, FileSystem fs, boolean onlineSnapshot)
throws Exception { throws Exception {
@ -370,7 +371,7 @@ public class SnapshotTestingUtils {
* Take a snapshot of the specified table and verify the given families. * Take a snapshot of the specified table and verify the given families.
* Note that this will leave the table disabled in the case of an offline snapshot. * Note that this will leave the table disabled in the case of an offline snapshot.
*/ */
public static void createSnapshotAndValidate(HBaseAdmin admin, public static void createSnapshotAndValidate(Admin admin,
TableName tableName, List<byte[]> nonEmptyFamilyNames, List<byte[]> emptyFamilyNames, TableName tableName, List<byte[]> nonEmptyFamilyNames, List<byte[]> emptyFamilyNames,
String snapshotNameString, Path rootDir, FileSystem fs, boolean onlineSnapshot) String snapshotNameString, Path rootDir, FileSystem fs, boolean onlineSnapshot)
throws Exception { throws Exception {
@ -702,7 +703,7 @@ public class SnapshotTestingUtils {
table.put(put); table.put(put);
} }
public static void deleteAllSnapshots(final HBaseAdmin admin) public static void deleteAllSnapshots(final Admin admin)
throws IOException { throws IOException {
// Delete all the snapshots // Delete all the snapshots
for (SnapshotDescription snapshot: admin.listSnapshots()) { for (SnapshotDescription snapshot: admin.listSnapshots()) {
@ -729,7 +730,7 @@ public class SnapshotTestingUtils {
} }
} }
public static void verifyReplicasCameOnline(TableName tableName, HBaseAdmin admin, public static void verifyReplicasCameOnline(TableName tableName, Admin admin,
int regionReplication) throws IOException { int regionReplication) throws IOException {
List<HRegionInfo> regions = admin.getTableRegions(tableName); List<HRegionInfo> regions = admin.getTableRegions(tableName);
HashSet<HRegionInfo> set = new HashSet<HRegionInfo>(); HashSet<HRegionInfo> set = new HashSet<HRegionInfo>();

View File

@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
@ -77,7 +78,7 @@ public class TestExportSnapshot {
private byte[] snapshotName; private byte[] snapshotName;
private int tableNumFiles; private int tableNumFiles;
private TableName tableName; private TableName tableName;
private HBaseAdmin admin; private Admin admin;
public static void setUpBaseConf(Configuration conf) { public static void setUpBaseConf(Configuration conf) {
conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true); conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);

View File

@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.LargeTests; import org.apache.hadoop.hbase.LargeTests;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.ScannerCallable; import org.apache.hadoop.hbase.client.ScannerCallable;
@ -77,11 +78,9 @@ public class TestFlushSnapshotFromClient {
private static final Log LOG = LogFactory.getLog(TestFlushSnapshotFromClient.class); private static final Log LOG = LogFactory.getLog(TestFlushSnapshotFromClient.class);
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
private static final int NUM_RS = 2; private static final int NUM_RS = 2;
private static final String STRING_TABLE_NAME = "test";
private static final byte[] TEST_FAM = Bytes.toBytes("fam"); private static final byte[] TEST_FAM = Bytes.toBytes("fam");
private static final byte[] TEST_QUAL = Bytes.toBytes("q"); private static final byte[] TEST_QUAL = Bytes.toBytes("q");
private static final TableName TABLE_NAME = private static final TableName TABLE_NAME = TableName.valueOf("test");
TableName.valueOf(STRING_TABLE_NAME);
private final int DEFAULT_NUM_ROWS = 100; private final int DEFAULT_NUM_ROWS = 100;
/** /**
@ -142,7 +141,7 @@ public class TestFlushSnapshotFromClient {
*/ */
@Test (timeout=300000) @Test (timeout=300000)
public void testFlushTableSnapshot() throws Exception { public void testFlushTableSnapshot() throws Exception {
HBaseAdmin admin = UTIL.getHBaseAdmin(); Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots // make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin); SnapshotTestingUtils.assertNoSnapshots(admin);
@ -157,7 +156,7 @@ public class TestFlushSnapshotFromClient {
// take a snapshot of the enabled table // take a snapshot of the enabled table
String snapshotString = "offlineTableSnapshot"; String snapshotString = "offlineTableSnapshot";
byte[] snapshot = Bytes.toBytes(snapshotString); byte[] snapshot = Bytes.toBytes(snapshotString);
admin.snapshot(snapshotString, STRING_TABLE_NAME, SnapshotDescription.Type.FLUSH); admin.snapshot(snapshotString, TABLE_NAME, SnapshotDescription.Type.FLUSH);
LOG.debug("Snapshot completed."); LOG.debug("Snapshot completed.");
// make sure we have the snapshot // make sure we have the snapshot
@ -181,7 +180,7 @@ public class TestFlushSnapshotFromClient {
*/ */
@Test(timeout=30000) @Test(timeout=30000)
public void testSkipFlushTableSnapshot() throws Exception { public void testSkipFlushTableSnapshot() throws Exception {
HBaseAdmin admin = UTIL.getHBaseAdmin(); Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots // make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin); SnapshotTestingUtils.assertNoSnapshots(admin);
@ -196,7 +195,7 @@ public class TestFlushSnapshotFromClient {
// take a snapshot of the enabled table // take a snapshot of the enabled table
String snapshotString = "skipFlushTableSnapshot"; String snapshotString = "skipFlushTableSnapshot";
byte[] snapshot = Bytes.toBytes(snapshotString); byte[] snapshot = Bytes.toBytes(snapshotString);
admin.snapshot(snapshotString, STRING_TABLE_NAME, SnapshotDescription.Type.SKIPFLUSH); admin.snapshot(snapshotString, TABLE_NAME, SnapshotDescription.Type.SKIPFLUSH);
LOG.debug("Snapshot completed."); LOG.debug("Snapshot completed.");
// make sure we have the snapshot // make sure we have the snapshot
@ -225,7 +224,7 @@ public class TestFlushSnapshotFromClient {
*/ */
@Test (timeout=300000) @Test (timeout=300000)
public void testFlushTableSnapshotWithProcedure() throws Exception { public void testFlushTableSnapshotWithProcedure() throws Exception {
HBaseAdmin admin = UTIL.getHBaseAdmin(); Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots // make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin); SnapshotTestingUtils.assertNoSnapshots(admin);
@ -241,7 +240,7 @@ public class TestFlushSnapshotFromClient {
String snapshotString = "offlineTableSnapshot"; String snapshotString = "offlineTableSnapshot";
byte[] snapshot = Bytes.toBytes(snapshotString); byte[] snapshot = Bytes.toBytes(snapshotString);
Map<String, String> props = new HashMap<String, String>(); Map<String, String> props = new HashMap<String, String>();
props.put("table", STRING_TABLE_NAME); props.put("table", TABLE_NAME.getNameAsString());
admin.execProcedure(SnapshotManager.ONLINE_SNAPSHOT_CONTROLLER_DESCRIPTION, admin.execProcedure(SnapshotManager.ONLINE_SNAPSHOT_CONTROLLER_DESCRIPTION,
snapshotString, props); snapshotString, props);
@ -265,19 +264,19 @@ public class TestFlushSnapshotFromClient {
@Test (timeout=300000) @Test (timeout=300000)
public void testSnapshotFailsOnNonExistantTable() throws Exception { public void testSnapshotFailsOnNonExistantTable() throws Exception {
HBaseAdmin admin = UTIL.getHBaseAdmin(); Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots // make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin); SnapshotTestingUtils.assertNoSnapshots(admin);
String tableName = "_not_a_table"; TableName tableName = TableName.valueOf("_not_a_table");
// make sure the table doesn't exist // make sure the table doesn't exist
boolean fail = false; boolean fail = false;
do { do {
try { try {
admin.getTableDescriptor(Bytes.toBytes(tableName)); admin.getTableDescriptor(tableName);
fail = true; fail = true;
LOG.error("Table:" + tableName + " already exists, checking a new name"); LOG.error("Table:" + tableName + " already exists, checking a new name");
tableName = tableName+"!"; tableName = TableName.valueOf(tableName+"!");
} catch (TableNotFoundException e) { } catch (TableNotFoundException e) {
fail = false; fail = false;
} }
@ -294,7 +293,7 @@ public class TestFlushSnapshotFromClient {
@Test(timeout = 300000) @Test(timeout = 300000)
public void testAsyncFlushSnapshot() throws Exception { public void testAsyncFlushSnapshot() throws Exception {
HBaseAdmin admin = UTIL.getHBaseAdmin(); Admin admin = UTIL.getHBaseAdmin();
SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName("asyncSnapshot") SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName("asyncSnapshot")
.setTable(TABLE_NAME.getNameAsString()) .setTable(TABLE_NAME.getNameAsString())
.setType(SnapshotDescription.Type.FLUSH) .setType(SnapshotDescription.Type.FLUSH)
@ -316,7 +315,7 @@ public class TestFlushSnapshotFromClient {
@Test (timeout=300000) @Test (timeout=300000)
public void testSnapshotStateAfterMerge() throws Exception { public void testSnapshotStateAfterMerge() throws Exception {
int numRows = DEFAULT_NUM_ROWS; int numRows = DEFAULT_NUM_ROWS;
HBaseAdmin admin = UTIL.getHBaseAdmin(); Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots // make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin); SnapshotTestingUtils.assertNoSnapshots(admin);
// load the table so we have some data // load the table so we have some data
@ -324,12 +323,12 @@ public class TestFlushSnapshotFromClient {
// Take a snapshot // Take a snapshot
String snapshotBeforeMergeName = "snapshotBeforeMerge"; String snapshotBeforeMergeName = "snapshotBeforeMerge";
admin.snapshot(snapshotBeforeMergeName, STRING_TABLE_NAME, SnapshotDescription.Type.FLUSH); admin.snapshot(snapshotBeforeMergeName, TABLE_NAME, SnapshotDescription.Type.FLUSH);
// Clone the table // Clone the table
String cloneBeforeMergeName = "cloneBeforeMerge"; TableName cloneBeforeMergeName = TableName.valueOf("cloneBeforeMerge");
admin.cloneSnapshot(snapshotBeforeMergeName, cloneBeforeMergeName); admin.cloneSnapshot(snapshotBeforeMergeName, cloneBeforeMergeName);
SnapshotTestingUtils.waitForTableToBeOnline(UTIL, TableName.valueOf(cloneBeforeMergeName)); SnapshotTestingUtils.waitForTableToBeOnline(UTIL, cloneBeforeMergeName);
// Merge two regions // Merge two regions
List<HRegionInfo> regions = admin.getTableRegions(TABLE_NAME); List<HRegionInfo> regions = admin.getTableRegions(TABLE_NAME);
@ -351,13 +350,13 @@ public class TestFlushSnapshotFromClient {
assertEquals(numRegionsAfterMerge, admin.getTableRegions(TABLE_NAME).size()); assertEquals(numRegionsAfterMerge, admin.getTableRegions(TABLE_NAME).size());
// Clone the table // Clone the table
String cloneAfterMergeName = "cloneAfterMerge"; TableName cloneAfterMergeName = TableName.valueOf("cloneAfterMerge");
admin.cloneSnapshot(snapshotBeforeMergeName, cloneAfterMergeName); admin.cloneSnapshot(snapshotBeforeMergeName, cloneAfterMergeName);
SnapshotTestingUtils.waitForTableToBeOnline(UTIL, TableName.valueOf(cloneAfterMergeName)); SnapshotTestingUtils.waitForTableToBeOnline(UTIL, cloneAfterMergeName);
SnapshotTestingUtils.verifyRowCount(UTIL, TABLE_NAME, numRows); SnapshotTestingUtils.verifyRowCount(UTIL, TABLE_NAME, numRows);
SnapshotTestingUtils.verifyRowCount(UTIL, TableName.valueOf(cloneBeforeMergeName), numRows); SnapshotTestingUtils.verifyRowCount(UTIL, cloneBeforeMergeName, numRows);
SnapshotTestingUtils.verifyRowCount(UTIL, TableName.valueOf(cloneAfterMergeName), numRows); SnapshotTestingUtils.verifyRowCount(UTIL, cloneAfterMergeName, numRows);
// test that we can delete the snapshot // test that we can delete the snapshot
UTIL.deleteTable(cloneAfterMergeName); UTIL.deleteTable(cloneAfterMergeName);
@ -367,7 +366,7 @@ public class TestFlushSnapshotFromClient {
@Test (timeout=300000) @Test (timeout=300000)
public void testTakeSnapshotAfterMerge() throws Exception { public void testTakeSnapshotAfterMerge() throws Exception {
int numRows = DEFAULT_NUM_ROWS; int numRows = DEFAULT_NUM_ROWS;
HBaseAdmin admin = UTIL.getHBaseAdmin(); Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots // make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin); SnapshotTestingUtils.assertNoSnapshots(admin);
// load the table so we have some data // load the table so we have some data
@ -393,16 +392,16 @@ public class TestFlushSnapshotFromClient {
// Take a snapshot // Take a snapshot
String snapshotName = "snapshotAfterMerge"; String snapshotName = "snapshotAfterMerge";
SnapshotTestingUtils.snapshot(admin, snapshotName, STRING_TABLE_NAME, SnapshotTestingUtils.snapshot(admin, snapshotName, TABLE_NAME.getNameAsString(),
SnapshotDescription.Type.FLUSH, 3); SnapshotDescription.Type.FLUSH, 3);
// Clone the table // Clone the table
String cloneName = "cloneMerge"; TableName cloneName = TableName.valueOf("cloneMerge");
admin.cloneSnapshot(snapshotName, cloneName); admin.cloneSnapshot(snapshotName, cloneName);
SnapshotTestingUtils.waitForTableToBeOnline(UTIL, TableName.valueOf(cloneName)); SnapshotTestingUtils.waitForTableToBeOnline(UTIL, cloneName);
SnapshotTestingUtils.verifyRowCount(UTIL, TABLE_NAME, numRows); SnapshotTestingUtils.verifyRowCount(UTIL, TABLE_NAME, numRows);
SnapshotTestingUtils.verifyRowCount(UTIL, TableName.valueOf(cloneName), numRows); SnapshotTestingUtils.verifyRowCount(UTIL, cloneName, numRows);
// test that we can delete the snapshot // test that we can delete the snapshot
UTIL.deleteTable(cloneName); UTIL.deleteTable(cloneName);
@ -414,7 +413,7 @@ public class TestFlushSnapshotFromClient {
@Test (timeout=300000) @Test (timeout=300000)
public void testFlushCreateListDestroy() throws Exception { public void testFlushCreateListDestroy() throws Exception {
LOG.debug("------- Starting Snapshot test -------------"); LOG.debug("------- Starting Snapshot test -------------");
HBaseAdmin admin = UTIL.getHBaseAdmin(); Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots // make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin); SnapshotTestingUtils.assertNoSnapshots(admin);
// load the table so we have some data // load the table so we have some data
@ -423,8 +422,7 @@ public class TestFlushSnapshotFromClient {
String snapshotName = "flushSnapshotCreateListDestroy"; String snapshotName = "flushSnapshotCreateListDestroy";
FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem(); FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
SnapshotTestingUtils.createSnapshotAndValidate(admin, SnapshotTestingUtils.createSnapshotAndValidate(admin, TABLE_NAME, Bytes.toString(TEST_FAM),
TableName.valueOf(STRING_TABLE_NAME), Bytes.toString(TEST_FAM),
snapshotName, rootDir, fs, true); snapshotName, rootDir, fs, true);
} }
@ -435,12 +433,10 @@ public class TestFlushSnapshotFromClient {
*/ */
@Test(timeout=300000) @Test(timeout=300000)
public void testConcurrentSnapshottingAttempts() throws IOException, InterruptedException { public void testConcurrentSnapshottingAttempts() throws IOException, InterruptedException {
final String STRING_TABLE2_NAME = STRING_TABLE_NAME + "2"; final TableName TABLE2_NAME = TableName.valueOf(TABLE_NAME + "2");
final TableName TABLE2_NAME =
TableName.valueOf(STRING_TABLE2_NAME);
int ssNum = 20; int ssNum = 20;
HBaseAdmin admin = UTIL.getHBaseAdmin(); Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots // make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin); SnapshotTestingUtils.assertNoSnapshots(admin);
// create second testing table // create second testing table
@ -460,7 +456,7 @@ public class TestFlushSnapshotFromClient {
@Override @Override
public void run() { public void run() {
try { try {
HBaseAdmin admin = UTIL.getHBaseAdmin(); Admin admin = UTIL.getHBaseAdmin();
LOG.info("Submitting snapshot request: " + ClientSnapshotDescriptionUtils.toString(ss)); LOG.info("Submitting snapshot request: " + ClientSnapshotDescriptionUtils.toString(ss));
admin.takeSnapshotAsync(ss); admin.takeSnapshotAsync(ss);
} catch (Exception e) { } catch (Exception e) {
@ -541,7 +537,7 @@ public class TestFlushSnapshotFromClient {
private void waitRegionsAfterMerge(final long numRegionsAfterMerge) private void waitRegionsAfterMerge(final long numRegionsAfterMerge)
throws IOException, InterruptedException { throws IOException, InterruptedException {
HBaseAdmin admin = UTIL.getHBaseAdmin(); Admin admin = UTIL.getHBaseAdmin();
// Verify that there's one region less // Verify that there's one region less
long startTime = System.currentTimeMillis(); long startTime = System.currentTimeMillis();
while (admin.getTableRegions(TABLE_NAME).size() != numRegionsAfterMerge) { while (admin.getTableRegions(TABLE_NAME).size() != numRegionsAfterMerge) {

View File

@ -25,6 +25,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.LargeTests; import org.apache.hadoop.hbase.LargeTests;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.MasterFileSystem;
@ -60,7 +61,7 @@ public class TestRestoreFlushSnapshotFromClient {
private int snapshot0Rows; private int snapshot0Rows;
private int snapshot1Rows; private int snapshot1Rows;
private TableName tableName; private TableName tableName;
private HBaseAdmin admin; private Admin admin;
@BeforeClass @BeforeClass
public static void setUpBeforeClass() throws Exception { public static void setUpBeforeClass() throws Exception {
@ -162,7 +163,7 @@ public class TestRestoreFlushSnapshotFromClient {
@Test(expected=SnapshotDoesNotExistException.class) @Test(expected=SnapshotDoesNotExistException.class)
public void testCloneNonExistentSnapshot() throws IOException, InterruptedException { public void testCloneNonExistentSnapshot() throws IOException, InterruptedException {
String snapshotName = "random-snapshot-" + System.currentTimeMillis(); String snapshotName = "random-snapshot-" + System.currentTimeMillis();
String tableName = "random-table-" + System.currentTimeMillis(); TableName tableName = TableName.valueOf("random-table-" + System.currentTimeMillis());
admin.cloneSnapshot(snapshotName, tableName); admin.cloneSnapshot(snapshotName, tableName);
} }

View File

@ -62,6 +62,7 @@ import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Get;
@ -261,7 +262,7 @@ public class TestHBaseFsck {
* This method is used to undeploy a region -- close it and attempt to * This method is used to undeploy a region -- close it and attempt to
* remove its state from the Master. * remove its state from the Master.
*/ */
private void undeployRegion(HBaseAdmin admin, ServerName sn, private void undeployRegion(Admin admin, ServerName sn,
HRegionInfo hri) throws IOException, InterruptedException { HRegionInfo hri) throws IOException, InterruptedException {
try { try {
HBaseFsckRepair.closeRegionSilentlyAndWait(admin, sn, hri); HBaseFsckRepair.closeRegionSilentlyAndWait(admin, sn, hri);
@ -482,7 +483,7 @@ public class TestHBaseFsck {
Path tableinfo = null; Path tableinfo = null;
try { try {
setupTable(table); setupTable(table);
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
Path hbaseTableDir = FSUtils.getTableDir( Path hbaseTableDir = FSUtils.getTableDir(
FSUtils.getRootDir(conf), table); FSUtils.getRootDir(conf), table);
@ -585,8 +586,7 @@ public class TestHBaseFsck {
/** /**
* Get region info from local cluster. * Get region info from local cluster.
*/ */
Map<ServerName, List<String>> getDeployedHRIs( Map<ServerName, List<String>> getDeployedHRIs(final Admin admin) throws IOException {
final HBaseAdmin admin) throws IOException {
ClusterStatus status = admin.getClusterStatus(); ClusterStatus status = admin.getClusterStatus();
Collection<ServerName> regionServers = status.getServers(); Collection<ServerName> regionServers = status.getServers();
Map<ServerName, List<String>> mm = Map<ServerName, List<String>> mm =
@ -645,7 +645,7 @@ public class TestHBaseFsck {
// different regions with the same start/endkeys since it doesn't // different regions with the same start/endkeys since it doesn't
// differentiate on ts/regionId! We actually need to recheck // differentiate on ts/regionId! We actually need to recheck
// deployments! // deployments!
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
while (findDeployedHSI(getDeployedHRIs(admin), hriDupe) == null) { while (findDeployedHSI(getDeployedHRIs(admin), hriDupe) == null) {
Thread.sleep(250); Thread.sleep(250);
} }
@ -803,7 +803,7 @@ public class TestHBaseFsck {
} }
} }
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
HBaseFsckRepair.closeRegionSilentlyAndWait(admin, HBaseFsckRepair.closeRegionSilentlyAndWait(admin,
cluster.getRegionServer(k).getServerName(), hbi.getHdfsHRI()); cluster.getRegionServer(k).getServerName(), hbi.getHdfsHRI());
admin.offline(regionName); admin.offline(regionName);
@ -1388,7 +1388,7 @@ public class TestHBaseFsck {
HRegionInfo hri = location.getRegionInfo(); HRegionInfo hri = location.getRegionInfo();
// do a regular split // do a regular split
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
byte[] regionName = location.getRegionInfo().getRegionName(); byte[] regionName = location.getRegionInfo().getRegionName();
admin.split(location.getRegionInfo().getRegionName(), Bytes.toBytes("BM")); admin.split(location.getRegionInfo().getRegionName(), Bytes.toBytes("BM"));
TestEndToEndSplitTransaction.blockUntilRegionSplit( TestEndToEndSplitTransaction.blockUntilRegionSplit(
@ -1438,7 +1438,7 @@ public class TestHBaseFsck {
HRegionInfo hri = location.getRegionInfo(); HRegionInfo hri = location.getRegionInfo();
// do a regular split // do a regular split
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
byte[] regionName = location.getRegionInfo().getRegionName(); byte[] regionName = location.getRegionInfo().getRegionName();
admin.split(location.getRegionInfo().getRegionName(), Bytes.toBytes("BM")); admin.split(location.getRegionInfo().getRegionName(), Bytes.toBytes("BM"));
TestEndToEndSplitTransaction.blockUntilRegionSplit( TestEndToEndSplitTransaction.blockUntilRegionSplit(
@ -1831,7 +1831,7 @@ public class TestHBaseFsck {
assertEquals(hfcc.getMissing().size(), missing); assertEquals(hfcc.getMissing().size(), missing);
// its been fixed, verify that we can enable // its been fixed, verify that we can enable
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
admin.enableTableAsync(table); admin.enableTableAsync(table);
while (!admin.isTableEnabled(table)) { while (!admin.isTableEnabled(table)) {
try { try {
@ -2211,7 +2211,14 @@ public class TestHBaseFsck {
HRegionInfo hri = metaLocation.getRegionInfo(); HRegionInfo hri = metaLocation.getRegionInfo();
if (unassign) { if (unassign) {
LOG.info("Undeploying meta region " + hri + " from server " + hsa); LOG.info("Undeploying meta region " + hri + " from server " + hsa);
undeployRegion(new HBaseAdmin(conf), hsa, hri); HConnection unmanagedConnection = HConnectionManager.createConnection(conf);
Admin admin = unmanagedConnection.getAdmin();
try {
undeployRegion(admin, hsa, hri);
} finally {
admin.close();
unmanagedConnection.close();
}
} }
if (regionInfoOnly) { if (regionInfoOnly) {
@ -2291,7 +2298,7 @@ public class TestHBaseFsck {
assertNotEquals(region1, region2); assertNotEquals(region1, region2);
// do a region merge // do a region merge
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Admin admin = TEST_UTIL.getHBaseAdmin();
admin.mergeRegions(region1.getEncodedNameAsBytes(), admin.mergeRegions(region1.getEncodedNameAsBytes(),
region2.getEncodedNameAsBytes(), false); region2.getEncodedNameAsBytes(), false);

Some files were not shown because too many files have changed in this diff Show More