HBASE-26096 Cleanup the deprecated methods in HBTU related classes and format code (#3503)

Signed-off-by: Xiaolin Ha <haxiaolin@apache.org>
Signed-off-by: Yulin Niu <niuyulin@apache.org>
This commit is contained in:
Duo Zhang 2021-07-29 10:18:38 +08:00 committed by GitHub
parent 332d9d13b1
commit 5f0950558f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 756 additions and 1344 deletions

View File

@ -49,26 +49,19 @@ public class HBaseCommonTestingUtil {
* Compression algorithms to use in parameterized JUnit 4 tests * Compression algorithms to use in parameterized JUnit 4 tests
*/ */
public static final List<Object[]> COMPRESSION_ALGORITHMS_PARAMETERIZED = public static final List<Object[]> COMPRESSION_ALGORITHMS_PARAMETERIZED =
Arrays.asList(new Object[][] { Arrays.asList(new Object[][] { { Compression.Algorithm.NONE }, { Compression.Algorithm.GZ } });
{ Compression.Algorithm.NONE },
{ Compression.Algorithm.GZ }
});
/** /**
* This is for unit tests parameterized with a two booleans. * This is for unit tests parameterized with a two booleans.
*/ */
public static final List<Object[]> BOOLEAN_PARAMETERIZED = public static final List<Object[]> BOOLEAN_PARAMETERIZED =
Arrays.asList(new Object[][] { Arrays.asList(new Object[][] { { false }, { true } });
{false},
{true}
});
/** /**
* Compression algorithms to use in testing * Compression algorithms to use in testing
*/ */
public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS = { public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS =
Compression.Algorithm.NONE, Compression.Algorithm.GZ { Compression.Algorithm.NONE, Compression.Algorithm.GZ };
};
protected final Configuration conf; protected final Configuration conf;
@ -82,7 +75,6 @@ public class HBaseCommonTestingUtil {
/** /**
* Returns this classes's instance of {@link Configuration}. * Returns this classes's instance of {@link Configuration}.
*
* @return Instance of Configuration. * @return Instance of Configuration.
*/ */
public Configuration getConfiguration() { public Configuration getConfiguration() {
@ -92,8 +84,7 @@ public class HBaseCommonTestingUtil {
/** /**
* System property key to get base test directory value * System property key to get base test directory value
*/ */
public static final String BASE_TEST_DIRECTORY_KEY = public static final String BASE_TEST_DIRECTORY_KEY = "test.build.data.basedirectory";
"test.build.data.basedirectory";
/** /**
* Default base directory for test output. * Default base directory for test output.
@ -127,13 +118,11 @@ public class HBaseCommonTestingUtil {
/** /**
* Sets up a directory for a test to use. * Sets up a directory for a test to use.
*
* @return New directory path, if created. * @return New directory path, if created.
*/ */
protected Path setupDataTestDir() { protected Path setupDataTestDir() {
if (this.dataTestDir != null) { if (this.dataTestDir != null) {
LOG.warn("Data test dir already setup in " + LOG.warn("Data test dir already setup in " + dataTestDir.getAbsolutePath());
dataTestDir.getAbsolutePath());
return null; return null;
} }
Path testPath = getRandomDir(); Path testPath = getRandomDir();
@ -151,7 +140,7 @@ public class HBaseCommonTestingUtil {
} }
/** /**
* Returns A dir with a random (uuid) name under the test dir * Returns a dir with a random (uuid) name under the test dir
* @see #getBaseTestDir() * @see #getBaseTestDir()
*/ */
public Path getRandomDir() { public Path getRandomDir() {
@ -159,8 +148,7 @@ public class HBaseCommonTestingUtil {
} }
public static UUID getRandomUUID() { public static UUID getRandomUUID() {
return new UUID(ThreadLocalRandom.current().nextLong(), return new UUID(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong());
ThreadLocalRandom.current().nextLong());
} }
protected void createSubDir(String propertyName, Path parent, String subDirName) { protected void createSubDir(String propertyName, Path parent, String subDirName) {
@ -212,8 +200,7 @@ public class HBaseCommonTestingUtil {
* @see #setupDataTestDir() * @see #setupDataTestDir()
*/ */
private Path getBaseTestDir() { private Path getBaseTestDir() {
String PathName = System.getProperty( String PathName = System.getProperty(BASE_TEST_DIRECTORY_KEY, DEFAULT_BASE_TEST_DIRECTORY);
BASE_TEST_DIRECTORY_KEY, DEFAULT_BASE_TEST_DIRECTORY);
return new Path(PathName); return new Path(PathName);
} }
@ -248,8 +235,7 @@ public class HBaseCommonTestingUtil {
/** /**
* Wrapper method for {@link Waiter#waitFor(Configuration, long, Predicate)}. * Wrapper method for {@link Waiter#waitFor(Configuration, long, Predicate)}.
*/ */
public <E extends Exception> long waitFor(long timeout, Predicate<E> predicate) public <E extends Exception> long waitFor(long timeout, Predicate<E> predicate) throws E {
throws E {
return Waiter.waitFor(this.conf, timeout, predicate); return Waiter.waitFor(this.conf, timeout, predicate);
} }
@ -264,8 +250,8 @@ public class HBaseCommonTestingUtil {
/** /**
* Wrapper method for {@link Waiter#waitFor(Configuration, long, long, boolean, Predicate)}. * Wrapper method for {@link Waiter#waitFor(Configuration, long, long, boolean, Predicate)}.
*/ */
public <E extends Exception> long waitFor(long timeout, long interval, public <E extends Exception> long waitFor(long timeout, long interval, boolean failIfTimeout,
boolean failIfTimeout, Predicate<E> predicate) throws E { Predicate<E> predicate) throws E {
return Waiter.waitFor(this.conf, timeout, interval, failIfTimeout, predicate); return Waiter.waitFor(this.conf, timeout, interval, failIfTimeout, predicate);
} }
@ -331,12 +317,11 @@ public class HBaseCommonTestingUtil {
} }
/** /**
* Returns a random port. These ports cannot be registered with IANA and are * Returns a random port. These ports cannot be registered with IANA and are intended for
* intended for dynamic allocation (see http://bit.ly/dynports). * dynamic allocation (see http://bit.ly/dynports).
*/ */
private int randomPort() { private int randomPort() {
return MIN_RANDOM_PORT return MIN_RANDOM_PORT + random.nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
+ random.nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
} }
interface AvailablePortChecker { interface AvailablePortChecker {

View File

@ -211,7 +211,7 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
TableDescriptor desc = admin.getDescriptor(t); TableDescriptor desc = admin.getDescriptor(t);
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(desc); TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(desc);
builder.setCoprocessor(SlowMeCoproScanOperations.class.getName()); builder.setCoprocessor(SlowMeCoproScanOperations.class.getName());
HBaseTestingUtil.modifyTableSync(admin, builder.build()); admin.modifyTable(builder.build());
} }
@Test @Test

View File

@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.logging.Log4jUtils;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Job;
@ -73,7 +74,7 @@ public abstract class MultiTableInputFormatTestBase {
@BeforeClass @BeforeClass
public static void setUpBeforeClass() throws Exception { public static void setUpBeforeClass() throws Exception {
// switch TIF to log at DEBUG level // switch TIF to log at DEBUG level
TEST_UTIL.enableDebug(MultiTableInputFormatBase.class); Log4jUtils.enableDebug(MultiTableInputFormatBase.class);
// start mini hbase cluster // start mini hbase cluster
TEST_UTIL.startMiniCluster(3); TEST_UTIL.startMiniCluster(3);
// create and fill table // create and fill table

View File

@ -22,6 +22,7 @@ import java.util.List;
import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.logging.Log4jUtils;
import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests; import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests;
import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Job;
@ -43,7 +44,7 @@ public class TestMultiTableInputFormat extends MultiTableInputFormatTestBase {
@BeforeClass @BeforeClass
public static void setupLogging() { public static void setupLogging() {
TEST_UTIL.enableDebug(MultiTableInputFormat.class); Log4jUtils.enableDebug(MultiTableInputFormat.class);
} }
@Override @Override

View File

@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.logging.Log4jUtils;
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests; import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests;
@ -53,8 +54,8 @@ public class TestMultiTableSnapshotInputFormat extends MultiTableInputFormatTest
@BeforeClass @BeforeClass
public static void setUpSnapshots() throws Exception { public static void setUpSnapshots() throws Exception {
TEST_UTIL.enableDebug(MultiTableSnapshotInputFormat.class); Log4jUtils.enableDebug(MultiTableSnapshotInputFormat.class);
TEST_UTIL.enableDebug(MultiTableSnapshotInputFormatImpl.class); Log4jUtils.enableDebug(MultiTableSnapshotInputFormatImpl.class);
// take a snapshot of every table we have. // take a snapshot of every table we have.
for (String tableName : TABLES) { for (String tableName : TABLES) {

View File

@ -28,28 +28,27 @@ import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability; import org.apache.yetus.audience.InterfaceStability;
/** /**
* This class defines methods that can help with managing HBase clusters * This class defines methods that can help with managing HBase clusters from unit tests and system
* from unit tests and system tests. There are 3 types of cluster deployments: * tests. There are 3 types of cluster deployments:
* <ul> * <ul>
* <li><b>SingleProcessHBaseCluster:</b> each server is run in the same JVM in separate threads, * <li><b>SingleProcessHBaseCluster:</b> each server is run in the same JVM in separate threads,
* used by unit tests</li> * used by unit tests</li>
* <li><b>DistributedHBaseCluster:</b> the cluster is pre-deployed, system and integration tests can * <li><b>DistributedHBaseCluster:</b> the cluster is pre-deployed, system and integration tests can
* interact with the cluster. </li> * interact with the cluster.</li>
* <li><b>ProcessBasedLocalHBaseCluster:</b> each server is deployed locally but in separate * <li><b>ProcessBasedLocalHBaseCluster:</b> each server is deployed locally but in separate JVMs.
* JVMs. </li> * </li>
* </ul> * </ul>
* <p> * <p>
* HBaseCluster unifies the way tests interact with the cluster, so that the same test can * HBaseCluster unifies the way tests interact with the cluster, so that the same test can be run
* be run against a mini-cluster during unit test execution, or a distributed cluster having * against a mini-cluster during unit test execution, or a distributed cluster having tens/hundreds
* tens/hundreds of nodes during execution of integration tests. * of nodes during execution of integration tests.
*
* <p> * <p>
* HBaseCluster exposes client-side public interfaces to tests, so that tests does not assume * HBaseCluster exposes client-side public interfaces to tests, so that tests does not assume
* running in a particular mode. Not all the tests are suitable to be run on an actual cluster, * running in a particular mode. Not all the tests are suitable to be run on an actual cluster, and
* and some tests will still need to mock stuff and introspect internal state. For those use * some tests will still need to mock stuff and introspect internal state. For those use cases from
* cases from unit tests, or if more control is needed, you can use the subclasses directly. * unit tests, or if more control is needed, you can use the subclasses directly. In that sense,
* In that sense, this class does not abstract away <strong>every</strong> interface that * this class does not abstract away <strong>every</strong> interface that SingleProcessHBaseCluster
* SingleProcessHBaseCluster or DistributedHBaseCluster provide. * or DistributedHBaseCluster provide.
*/ */
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.PHOENIX) @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.PHOENIX)
@InterfaceStability.Evolving @InterfaceStability.Evolving
@ -85,24 +84,23 @@ public abstract class HBaseClusterInterface implements Closeable, Configurable {
public abstract ClusterMetrics getClusterMetrics() throws IOException; public abstract ClusterMetrics getClusterMetrics() throws IOException;
/** /**
* Returns a ClusterStatus for this HBase cluster as observed at the * Returns a ClusterStatus for this HBase cluster as observed at the starting of the HBaseCluster
* starting of the HBaseCluster
*/ */
public ClusterMetrics getInitialClusterMetrics() throws IOException { public ClusterMetrics getInitialClusterMetrics() throws IOException {
return initialClusterStatus; return initialClusterStatus;
} }
/** /**
* Starts a new region server on the given hostname or if this is a mini/local cluster, * Starts a new region server on the given hostname or if this is a mini/local cluster, starts a
* starts a region server locally. * region server locally.
* @param hostname the hostname to start the regionserver on * @param hostname the hostname to start the regionserver on
* @throws IOException if something goes wrong * @throws IOException if something goes wrong
*/ */
public abstract void startRegionServer(String hostname, int port) throws IOException; public abstract void startRegionServer(String hostname, int port) throws IOException;
/** /**
* Kills the region server process if this is a distributed cluster, otherwise * Kills the region server process if this is a distributed cluster, otherwise this causes the
* this causes the region server to exit doing basic clean up only. * region server to exit doing basic clean up only.
* @throws IOException if something goes wrong * @throws IOException if something goes wrong
*/ */
public abstract void killRegionServer(ServerName serverName) throws IOException; public abstract void killRegionServer(ServerName serverName) throws IOException;
@ -110,9 +108,9 @@ public abstract class HBaseClusterInterface implements Closeable, Configurable {
/** /**
* Keeping track of killed servers and being able to check if a particular server was killed makes * Keeping track of killed servers and being able to check if a particular server was killed makes
* it possible to do fault tolerance testing for dead servers in a deterministic way. A concrete * it possible to do fault tolerance testing for dead servers in a deterministic way. A concrete
* example of such case is - killing servers and waiting for all regions of a particular table * example of such case is - killing servers and waiting for all regions of a particular table to
* to be assigned. We can check for server column in META table and that its value is not one * be assigned. We can check for server column in META table and that its value is not one of the
* of the killed servers. * killed servers.
*/ */
public abstract boolean isKilledRS(ServerName serverName); public abstract boolean isKilledRS(ServerName serverName);
@ -137,8 +135,8 @@ public abstract class HBaseClusterInterface implements Closeable, Configurable {
} }
Threads.sleep(100); Threads.sleep(100);
} }
throw new IOException("did timeout " + timeout + "ms waiting for region server to start: " throw new IOException(
+ hostname); "did timeout " + timeout + "ms waiting for region server to start: " + hostname);
} }
/** /**
@ -163,23 +161,23 @@ public abstract class HBaseClusterInterface implements Closeable, Configurable {
public abstract void resumeRegionServer(ServerName serverName) throws IOException; public abstract void resumeRegionServer(ServerName serverName) throws IOException;
/** /**
* Starts a new zookeeper node on the given hostname or if this is a mini/local cluster, * Starts a new zookeeper node on the given hostname or if this is a mini/local cluster, silently
* silently logs warning message. * logs warning message.
* @param hostname the hostname to start the regionserver on * @param hostname the hostname to start the regionserver on
* @throws IOException if something goes wrong * @throws IOException if something goes wrong
*/ */
public abstract void startZkNode(String hostname, int port) throws IOException; public abstract void startZkNode(String hostname, int port) throws IOException;
/** /**
* Kills the zookeeper node process if this is a distributed cluster, otherwise, * Kills the zookeeper node process if this is a distributed cluster, otherwise, this causes
* this causes master to exit doing basic clean up only. * master to exit doing basic clean up only.
* @throws IOException if something goes wrong * @throws IOException if something goes wrong
*/ */
public abstract void killZkNode(ServerName serverName) throws IOException; public abstract void killZkNode(ServerName serverName) throws IOException;
/** /**
* Stops the region zookeeper if this is a distributed cluster, otherwise * Stops the region zookeeper if this is a distributed cluster, otherwise silently logs warning
* silently logs warning message. * message.
* @throws IOException if something goes wrong * @throws IOException if something goes wrong
*/ */
public abstract void stopZkNode(ServerName serverName) throws IOException; public abstract void stopZkNode(ServerName serverName) throws IOException;
@ -188,33 +186,30 @@ public abstract class HBaseClusterInterface implements Closeable, Configurable {
* Wait for the specified zookeeper node to join the cluster * Wait for the specified zookeeper node to join the cluster
* @throws IOException if something goes wrong or timeout occurs * @throws IOException if something goes wrong or timeout occurs
*/ */
public abstract void waitForZkNodeToStart(ServerName serverName, long timeout) public abstract void waitForZkNodeToStart(ServerName serverName, long timeout) throws IOException;
throws IOException;
/** /**
* Wait for the specified zookeeper node to stop the thread / process. * Wait for the specified zookeeper node to stop the thread / process.
* @throws IOException if something goes wrong or timeout occurs * @throws IOException if something goes wrong or timeout occurs
*/ */
public abstract void waitForZkNodeToStop(ServerName serverName, long timeout) public abstract void waitForZkNodeToStop(ServerName serverName, long timeout) throws IOException;
throws IOException;
/** /**
* Starts a new datanode on the given hostname or if this is a mini/local cluster, * Starts a new datanode on the given hostname or if this is a mini/local cluster, silently logs
* silently logs warning message. * warning message.
* @throws IOException if something goes wrong * @throws IOException if something goes wrong
*/ */
public abstract void startDataNode(ServerName serverName) throws IOException; public abstract void startDataNode(ServerName serverName) throws IOException;
/** /**
* Kills the datanode process if this is a distributed cluster, otherwise, * Kills the datanode process if this is a distributed cluster, otherwise, this causes master to
* this causes master to exit doing basic clean up only. * exit doing basic clean up only.
* @throws IOException if something goes wrong * @throws IOException if something goes wrong
*/ */
public abstract void killDataNode(ServerName serverName) throws IOException; public abstract void killDataNode(ServerName serverName) throws IOException;
/** /**
* Stops the datanode if this is a distributed cluster, otherwise * Stops the datanode if this is a distributed cluster, otherwise silently logs warning message.
* silently logs warning message.
* @throws IOException if something goes wrong * @throws IOException if something goes wrong
*/ */
public abstract void stopDataNode(ServerName serverName) throws IOException; public abstract void stopDataNode(ServerName serverName) throws IOException;
@ -268,16 +263,16 @@ public abstract class HBaseClusterInterface implements Closeable, Configurable {
throws IOException; throws IOException;
/** /**
* Starts a new master on the given hostname or if this is a mini/local cluster, * Starts a new master on the given hostname or if this is a mini/local cluster, starts a master
* starts a master locally. * locally.
* @param hostname the hostname to start the master on * @param hostname the hostname to start the master on
* @throws IOException if something goes wrong * @throws IOException if something goes wrong
*/ */
public abstract void startMaster(String hostname, int port) throws IOException; public abstract void startMaster(String hostname, int port) throws IOException;
/** /**
* Kills the master process if this is a distributed cluster, otherwise, * Kills the master process if this is a distributed cluster, otherwise, this causes master to
* this causes master to exit doing basic clean up only. * exit doing basic clean up only.
* @throws IOException if something goes wrong * @throws IOException if something goes wrong
*/ */
public abstract void killMaster(ServerName serverName) throws IOException; public abstract void killMaster(ServerName serverName) throws IOException;
@ -292,31 +287,23 @@ public abstract class HBaseClusterInterface implements Closeable, Configurable {
* Wait for the specified master to stop the thread / process. * Wait for the specified master to stop the thread / process.
* @throws IOException if something goes wrong or timeout occurs * @throws IOException if something goes wrong or timeout occurs
*/ */
public abstract void waitForMasterToStop(ServerName serverName, long timeout) public abstract void waitForMasterToStop(ServerName serverName, long timeout) throws IOException;
throws IOException;
/** /**
* Blocks until there is an active master and that master has completed * Blocks until there is an active master and that master has completed initialization.
* initialization. * @return true if an active master becomes available. false if there are no masters left.
*
* @return true if an active master becomes available. false if there are no
* masters left.
* @throws IOException if something goes wrong or timeout occurs * @throws IOException if something goes wrong or timeout occurs
*/ */
public boolean waitForActiveAndReadyMaster() public boolean waitForActiveAndReadyMaster() throws IOException {
throws IOException {
return waitForActiveAndReadyMaster(Long.MAX_VALUE); return waitForActiveAndReadyMaster(Long.MAX_VALUE);
} }
/** /**
* Blocks until there is an active master and that master has completed * Blocks until there is an active master and that master has completed initialization.
* initialization.
* @param timeout the timeout limit in ms * @param timeout the timeout limit in ms
* @return true if an active master becomes available. false if there are no * @return true if an active master becomes available. false if there are no masters left.
* masters left.
*/ */
public abstract boolean waitForActiveAndReadyMaster(long timeout) public abstract boolean waitForActiveAndReadyMaster(long timeout) throws IOException;
throws IOException;
/** /**
* Wait for HBase Cluster to shut down. * Wait for HBase Cluster to shut down.
@ -329,10 +316,9 @@ public abstract class HBaseClusterInterface implements Closeable, Configurable {
public abstract void shutdown() throws IOException; public abstract void shutdown() throws IOException;
/** /**
* Restores the cluster to it's initial state if this is a real cluster, * Restores the cluster to it's initial state if this is a real cluster, otherwise does nothing.
* otherwise does nothing. * This is a best effort restore. If the servers are not reachable, or insufficient permissions,
* This is a best effort restore. If the servers are not reachable, or insufficient * etc. restoration might be partial.
* permissions, etc. restoration might be partial.
* @return whether restoration is complete * @return whether restoration is complete
*/ */
public boolean restoreInitialStatus() throws IOException { public boolean restoreInitialStatus() throws IOException {
@ -340,10 +326,9 @@ public abstract class HBaseClusterInterface implements Closeable, Configurable {
} }
/** /**
* Restores the cluster to given state if this is a real cluster, * Restores the cluster to given state if this is a real cluster, otherwise does nothing. This is
* otherwise does nothing. * a best effort restore. If the servers are not reachable, or insufficient permissions, etc.
* This is a best effort restore. If the servers are not reachable, or insufficient * restoration might be partial.
* permissions, etc. restoration might be partial.
* @return whether restoration is complete * @return whether restoration is complete
*/ */
public boolean restoreClusterMetrics(ClusterMetrics desiredStatus) throws IOException { public boolean restoreClusterMetrics(ClusterMetrics desiredStatus) throws IOException {
@ -368,16 +353,16 @@ public abstract class HBaseClusterInterface implements Closeable, Configurable {
throws IOException; throws IOException;
/** /**
* @return whether we are interacting with a distributed cluster as opposed to an * @return whether we are interacting with a distributed cluster as opposed to an in-process
* in-process mini/local cluster. * mini/local cluster.
*/ */
public boolean isDistributedCluster() { public boolean isDistributedCluster() {
return false; return false;
} }
/** /**
* Closes all the resources held open for this cluster. Note that this call does not shutdown * Closes all the resources held open for this cluster. Note that this call does not shutdown the
* the cluster. * cluster.
* @see #shutdown() * @see #shutdown()
*/ */
@Override @Override
@ -385,8 +370,6 @@ public abstract class HBaseClusterInterface implements Closeable, Configurable {
/** /**
* Wait for the namenode. * Wait for the namenode.
*
* @throws InterruptedException
*/ */
public void waitForNamenodeAvailable() throws InterruptedException { public void waitForNamenodeAvailable() throws InterruptedException {
} }

View File

@ -97,7 +97,7 @@ public final class MiniClusterRule extends ExternalResource {
} }
/** /**
* @return the underlying instance of {@link HBaseTestingUtil} * Returns the underlying instance of {@link HBaseTestingUtil}
*/ */
public HBaseTestingUtil getTestingUtility() { public HBaseTestingUtil getTestingUtility() {
return testingUtility; return testingUtility;

View File

@ -47,10 +47,9 @@ import org.slf4j.LoggerFactory;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse;
/** /**
* This class creates a single process HBase cluster. * This class creates a single process HBase cluster. each server. The master uses the 'default'
* each server. The master uses the 'default' FileSystem. The RegionServers, * FileSystem. The RegionServers, if we are running on DistributedFilesystem, create a FileSystem
* if we are running on DistributedFilesystem, create a FileSystem instance * instance each and will close down their instance on the way out.
* each and will close down their instance on the way out.
*/ */
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.PHOENIX) @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.PHOENIX)
@InterfaceStability.Evolving @InterfaceStability.Evolving
@ -119,18 +118,17 @@ public class SingleProcessHBaseCluster extends HBaseClusterInterface {
} }
/** /**
* Subclass so can get at protected methods (none at moment). Also, creates * Subclass so can get at protected methods (none at moment). Also, creates a FileSystem instance
* a FileSystem instance per instantiation. Adds a shutdown own FileSystem * per instantiation. Adds a shutdown own FileSystem on the way out. Shuts down own Filesystem
* on the way out. Shuts down own Filesystem only, not All filesystems as * only, not All filesystems as the FileSystem system exit hook does.
* the FileSystem system exit hook does.
*/ */
public static class MiniHBaseClusterRegionServer extends HRegionServer { public static class MiniHBaseClusterRegionServer extends HRegionServer {
private Thread shutdownThread = null; private Thread shutdownThread = null;
private User user = null; private User user = null;
/** /**
* List of RegionServers killed so far. ServerName also comprises startCode of a server, * List of RegionServers killed so far. ServerName also comprises startCode of a server, so any
* so any restarted instances of the same server will have different ServerName and will not * restarted instances of the same server will have different ServerName and will not coincide
* coincide with past dead ones. So there's no need to cleanup this list. * with past dead ones. So there's no need to cleanup this list.
*/ */
static Set<ServerName> killedServers = new HashSet<>(); static Set<ServerName> killedServers = new HashSet<>();
@ -141,8 +139,8 @@ public class SingleProcessHBaseCluster extends HBaseClusterInterface {
} }
@Override @Override
protected void handleReportForDutyResponse( protected void handleReportForDutyResponse(final RegionServerStartupResponse c)
final RegionServerStartupResponse c) throws IOException { throws IOException {
super.handleReportForDutyResponse(c); super.handleReportForDutyResponse(c);
// Run this thread to shutdown our filesystem on way out. // Run this thread to shutdown our filesystem on way out.
this.shutdownThread = new SingleFileSystemShutdownThread(getFileSystem()); this.shutdownThread = new SingleFileSystemShutdownThread(getFileSystem());
@ -196,15 +194,17 @@ public class SingleProcessHBaseCluster extends HBaseClusterInterface {
} }
/** /**
* Alternate shutdown hook. * Alternate shutdown hook. Just shuts down the passed fs, not all as default filesystem hook
* Just shuts down the passed fs, not all as default filesystem hook does. * does.
*/ */
static class SingleFileSystemShutdownThread extends Thread { static class SingleFileSystemShutdownThread extends Thread {
private final FileSystem fs; private final FileSystem fs;
SingleFileSystemShutdownThread(final FileSystem fs) { SingleFileSystemShutdownThread(final FileSystem fs) {
super("Shutdown of " + fs); super("Shutdown of " + fs);
this.fs = fs; this.fs = fs;
} }
@Override @Override
public void run() { public void run() {
try { try {
@ -240,8 +240,7 @@ public class SingleProcessHBaseCluster extends HBaseClusterInterface {
if (rsPorts != null) { if (rsPorts != null) {
rsConf.setInt(HConstants.REGIONSERVER_PORT, rsPorts.get(i)); rsConf.setInt(HConstants.REGIONSERVER_PORT, rsPorts.get(i));
} }
User user = HBaseTestingUtil.getDifferentUser(rsConf, User user = HBaseTestingUtil.getDifferentUser(rsConf, ".hfs." + index++);
".hfs."+index++);
hbaseCluster.addRegionServer(rsConf, i, user); hbaseCluster.addRegionServer(rsConf, i, user);
} }
@ -296,7 +295,7 @@ public class SingleProcessHBaseCluster extends HBaseClusterInterface {
@Override @Override
public void waitForRegionServerToStop(ServerName serverName, long timeout) throws IOException { public void waitForRegionServerToStop(ServerName serverName, long timeout) throws IOException {
//ignore timeout for now // ignore timeout for now
waitOnRegionServer(getRegionServerIndex(serverName)); waitOnRegionServer(getRegionServerIndex(serverName));
} }
@ -392,7 +391,7 @@ public class SingleProcessHBaseCluster extends HBaseClusterInterface {
@Override @Override
public void waitForMasterToStop(ServerName serverName, long timeout) throws IOException { public void waitForMasterToStop(ServerName serverName, long timeout) throws IOException {
//ignore timeout for now // ignore timeout for now
waitOnMaster(getMasterIndex(serverName)); waitOnMaster(getMasterIndex(serverName));
} }
@ -400,20 +399,18 @@ public class SingleProcessHBaseCluster extends HBaseClusterInterface {
* Starts a region server thread running * Starts a region server thread running
* @return New RegionServerThread * @return New RegionServerThread
*/ */
public JVMClusterUtil.RegionServerThread startRegionServer() public JVMClusterUtil.RegionServerThread startRegionServer() throws IOException {
throws IOException {
final Configuration newConf = HBaseConfiguration.create(conf); final Configuration newConf = HBaseConfiguration.create(conf);
return startRegionServer(newConf); return startRegionServer(newConf);
} }
private JVMClusterUtil.RegionServerThread startRegionServer(Configuration configuration) private JVMClusterUtil.RegionServerThread startRegionServer(Configuration configuration)
throws IOException { throws IOException {
User rsUser = User rsUser = HBaseTestingUtil.getDifferentUser(configuration, ".hfs." + index++);
HBaseTestingUtil.getDifferentUser(configuration, ".hfs."+index++);
JVMClusterUtil.RegionServerThread t = null; JVMClusterUtil.RegionServerThread t = null;
try { try {
t = hbaseCluster.addRegionServer( t =
configuration, hbaseCluster.getRegionServers().size(), rsUser); hbaseCluster.addRegionServer(configuration, hbaseCluster.getRegionServers().size(), rsUser);
t.start(); t.start();
t.waitForServerOnline(); t.waitForServerOnline();
} catch (InterruptedException ie) { } catch (InterruptedException ie) {
@ -423,10 +420,9 @@ public class SingleProcessHBaseCluster extends HBaseClusterInterface {
} }
/** /**
* Starts a region server thread and waits until its processed by master. Throws an exception * Starts a region server thread and waits until its processed by master. Throws an exception when
* when it can't start a region server or when the region server is not processed by master * it can't start a region server or when the region server is not processed by master within the
* within the timeout. * timeout.
*
* @return New RegionServerThread * @return New RegionServerThread
*/ */
public JVMClusterUtil.RegionServerThread startRegionServerAndWait(long timeout) public JVMClusterUtil.RegionServerThread startRegionServerAndWait(long timeout)
@ -463,7 +459,6 @@ public class SingleProcessHBaseCluster extends HBaseClusterInterface {
/** /**
* Shut down the specified region server cleanly * Shut down the specified region server cleanly
*
* @param serverNumber Used as index into a list. * @param serverNumber Used as index into a list.
* @return the region server that was stopped * @return the region server that was stopped
*/ */
@ -481,8 +476,7 @@ public class SingleProcessHBaseCluster extends HBaseClusterInterface {
*/ */
public JVMClusterUtil.RegionServerThread stopRegionServer(int serverNumber, public JVMClusterUtil.RegionServerThread stopRegionServer(int serverNumber,
final boolean shutdownFS) { final boolean shutdownFS) {
JVMClusterUtil.RegionServerThread server = JVMClusterUtil.RegionServerThread server = hbaseCluster.getRegionServers().get(serverNumber);
hbaseCluster.getRegionServers().get(serverNumber);
LOG.info("Stopping " + server.toString()); LOG.info("Stopping " + server.toString());
server.getRegionServer().stop("Stopping rs " + serverNumber); server.getRegionServer().stop("Stopping rs " + serverNumber);
return server; return server;
@ -493,8 +487,7 @@ public class SingleProcessHBaseCluster extends HBaseClusterInterface {
* @param serverNumber Used as index into a list. * @param serverNumber Used as index into a list.
*/ */
public JVMClusterUtil.RegionServerThread suspendRegionServer(int serverNumber) { public JVMClusterUtil.RegionServerThread suspendRegionServer(int serverNumber) {
JVMClusterUtil.RegionServerThread server = JVMClusterUtil.RegionServerThread server = hbaseCluster.getRegionServers().get(serverNumber);
hbaseCluster.getRegionServers().get(serverNumber);
LOG.info("Suspending {}", server.toString()); LOG.info("Suspending {}", server.toString());
server.suspend(); server.suspend();
return server; return server;
@ -505,8 +498,7 @@ public class SingleProcessHBaseCluster extends HBaseClusterInterface {
* @param serverNumber Used as index into a list. * @param serverNumber Used as index into a list.
*/ */
public JVMClusterUtil.RegionServerThread resumeRegionServer(int serverNumber) { public JVMClusterUtil.RegionServerThread resumeRegionServer(int serverNumber) {
JVMClusterUtil.RegionServerThread server = JVMClusterUtil.RegionServerThread server = hbaseCluster.getRegionServers().get(serverNumber);
hbaseCluster.getRegionServers().get(serverNumber);
LOG.info("Resuming {}", server.toString()); LOG.info("Resuming {}", server.toString());
server.resume(); server.resume();
return server; return server;
@ -520,16 +512,13 @@ public class SingleProcessHBaseCluster extends HBaseClusterInterface {
return this.hbaseCluster.waitOnRegionServer(serverNumber); return this.hbaseCluster.waitOnRegionServer(serverNumber);
} }
/** /**
* Starts a master thread running * Starts a master thread running
*
* @return New RegionServerThread * @return New RegionServerThread
*/ */
public JVMClusterUtil.MasterThread startMaster() throws IOException { public JVMClusterUtil.MasterThread startMaster() throws IOException {
Configuration c = HBaseConfiguration.create(conf); Configuration c = HBaseConfiguration.create(conf);
User user = User user = HBaseTestingUtil.getDifferentUser(c, ".hfs." + index++);
HBaseTestingUtil.getDifferentUser(c, ".hfs."+index++);
JVMClusterUtil.MasterThread t = null; JVMClusterUtil.MasterThread t = null;
try { try {
@ -556,7 +545,7 @@ public class SingleProcessHBaseCluster extends HBaseClusterInterface {
* @return the active MasterThread, null if none is active. * @return the active MasterThread, null if none is active.
*/ */
public MasterThread getMasterThread() { public MasterThread getMasterThread() {
for (MasterThread mt: hbaseCluster.getLiveMasters()) { for (MasterThread mt : hbaseCluster.getLiveMasters()) {
if (mt.getMaster().isActiveMaster()) { if (mt.getMaster().isActiveMaster()) {
return mt; return mt;
} }
@ -585,7 +574,6 @@ public class SingleProcessHBaseCluster extends HBaseClusterInterface {
/** /**
* Shut down the specified master cleanly * Shut down the specified master cleanly
*
* @param serverNumber Used as index into a list. * @param serverNumber Used as index into a list.
* @return the region server that was stopped * @return the region server that was stopped
*/ */
@ -601,10 +589,8 @@ public class SingleProcessHBaseCluster extends HBaseClusterInterface {
* test and you shut down one before end of the test. * test and you shut down one before end of the test.
* @return the master that was stopped * @return the master that was stopped
*/ */
public JVMClusterUtil.MasterThread stopMaster(int serverNumber, public JVMClusterUtil.MasterThread stopMaster(int serverNumber, final boolean shutdownFS) {
final boolean shutdownFS) { JVMClusterUtil.MasterThread server = hbaseCluster.getMasters().get(serverNumber);
JVMClusterUtil.MasterThread server =
hbaseCluster.getMasters().get(serverNumber);
LOG.info("Stopping " + server.toString()); LOG.info("Stopping " + server.toString());
server.getMaster().stop("Stopping master " + serverNumber); server.getMaster().stop("Stopping master " + serverNumber);
return server; return server;
@ -619,24 +605,18 @@ public class SingleProcessHBaseCluster extends HBaseClusterInterface {
} }
/** /**
* Blocks until there is an active master and that master has completed * Blocks until there is an active master and that master has completed initialization.
* initialization. * @return true if an active master becomes available. false if there are no masters left.
*
* @return true if an active master becomes available. false if there are no
* masters left.
*/ */
@Override @Override
public boolean waitForActiveAndReadyMaster(long timeout) throws IOException { public boolean waitForActiveAndReadyMaster(long timeout) throws IOException {
List<JVMClusterUtil.MasterThread> mts;
long start = EnvironmentEdgeManager.currentTime(); long start = EnvironmentEdgeManager.currentTime();
while (!(mts = getMasterThreads()).isEmpty() while (EnvironmentEdgeManager.currentTime() - start < timeout) {
&& (EnvironmentEdgeManager.currentTime() - start) < timeout) { for (JVMClusterUtil.MasterThread mt : getMasterThreads()) {
for (JVMClusterUtil.MasterThread mt : mts) {
if (mt.getMaster().isActiveMaster() && mt.getMaster().isInitialized()) { if (mt.getMaster().isActiveMaster() && mt.getMaster().isInitialized()) {
return true; return true;
} }
} }
Threads.sleep(100); Threads.sleep(100);
} }
return false; return false;
@ -722,9 +702,8 @@ public class SingleProcessHBaseCluster extends HBaseClusterInterface {
* Call flushCache on all regions on all participating regionservers. * Call flushCache on all regions on all participating regionservers.
*/ */
public void compact(boolean major) throws IOException { public void compact(boolean major) throws IOException {
for (JVMClusterUtil.RegionServerThread t: for (JVMClusterUtil.RegionServerThread t : this.hbaseCluster.getRegionServers()) {
this.hbaseCluster.getRegionServers()) { for (HRegion r : t.getRegionServer().getOnlineRegionsLocalContext()) {
for(HRegion r: t.getRegionServer().getOnlineRegionsLocalContext()) {
r.compact(major); r.compact(major);
} }
} }
@ -734,10 +713,9 @@ public class SingleProcessHBaseCluster extends HBaseClusterInterface {
* Call flushCache on all regions of the specified table. * Call flushCache on all regions of the specified table.
*/ */
public void compact(TableName tableName, boolean major) throws IOException { public void compact(TableName tableName, boolean major) throws IOException {
for (JVMClusterUtil.RegionServerThread t: for (JVMClusterUtil.RegionServerThread t : this.hbaseCluster.getRegionServers()) {
this.hbaseCluster.getRegionServers()) { for (HRegion r : t.getRegionServer().getOnlineRegionsLocalContext()) {
for(HRegion r: t.getRegionServer().getOnlineRegionsLocalContext()) { if (r.getTableDescriptor().getTableName().equals(tableName)) {
if(r.getTableDescriptor().getTableName().equals(tableName)) {
r.compact(major); r.compact(major);
} }
} }
@ -760,7 +738,7 @@ public class SingleProcessHBaseCluster extends HBaseClusterInterface {
} }
/** /**
* @return List of live region server threads (skips the aborted and the killed) * Returns List of live region server threads (skips the aborted and the killed)
*/ */
public List<JVMClusterUtil.RegionServerThread> getLiveRegionServerThreads() { public List<JVMClusterUtil.RegionServerThread> getLiveRegionServerThreads() {
return this.hbaseCluster.getLiveRegionServers(); return this.hbaseCluster.getLiveRegionServers();
@ -775,10 +753,8 @@ public class SingleProcessHBaseCluster extends HBaseClusterInterface {
} }
public HRegionServer getRegionServer(ServerName serverName) { public HRegionServer getRegionServer(ServerName serverName) {
return hbaseCluster.getRegionServers().stream() return hbaseCluster.getRegionServers().stream().map(t -> t.getRegionServer())
.map(t -> t.getRegionServer()) .filter(r -> r.getServerName().equals(serverName)).findFirst().orElse(null);
.filter(r -> r.getServerName().equals(serverName))
.findFirst().orElse(null);
} }
public List<HRegion> getRegions(byte[] tableName) { public List<HRegion> getRegions(byte[] tableName) {
@ -791,7 +767,7 @@ public class SingleProcessHBaseCluster extends HBaseClusterInterface {
HRegionServer hrs = rst.getRegionServer(); HRegionServer hrs = rst.getRegionServer();
for (Region region : hrs.getOnlineRegionsLocalContext()) { for (Region region : hrs.getOnlineRegionsLocalContext()) {
if (region.getTableDescriptor().getTableName().equals(tableName)) { if (region.getTableDescriptor().getTableName().equals(tableName)) {
ret.add((HRegion)region); ret.add((HRegion) region);
} }
} }
} }
@ -809,12 +785,12 @@ public class SingleProcessHBaseCluster extends HBaseClusterInterface {
/** /**
* Get the location of the specified region * Get the location of the specified region
* @param regionName Name of the region in bytes * @param regionName Name of the region in bytes
* @return Index into List of {@link SingleProcessHBaseCluster#getRegionServerThreads()} * @return Index into List of {@link SingleProcessHBaseCluster#getRegionServerThreads()} of HRS
* of HRS carrying hbase:meta. Returns -1 if none found. * carrying hbase:meta. Returns -1 if none found.
*/ */
public int getServerWith(byte[] regionName) { public int getServerWith(byte[] regionName) {
int index = 0; int index = 0;
for (JVMClusterUtil.RegionServerThread rst: getRegionServerThreads()) { for (JVMClusterUtil.RegionServerThread rst : getRegionServerThreads()) {
HRegionServer hrs = rst.getRegionServer(); HRegionServer hrs = rst.getRegionServer();
if (!hrs.isStopped()) { if (!hrs.isStopped()) {
Region region = hrs.getOnlineRegion(regionName); Region region = hrs.getOnlineRegion(regionName);
@ -864,8 +840,8 @@ public class SingleProcessHBaseCluster extends HBaseClusterInterface {
} }
/** /**
* Do a simulated kill all masters and regionservers. Useful when it is * Do a simulated kill all masters and regionservers. Useful when it is impossible to bring the
* impossible to bring the mini-cluster back for clean shutdown. * mini-cluster back for clean shutdown.
*/ */
public void killAll() { public void killAll() {
// Do backups first. // Do backups first.
@ -897,18 +873,17 @@ public class SingleProcessHBaseCluster extends HBaseClusterInterface {
HRegionServer hrs = rst.getRegionServer(); HRegionServer hrs = rst.getRegionServer();
for (Region region : hrs.getRegions(tableName)) { for (Region region : hrs.getRegions(tableName)) {
if (region.getTableDescriptor().getTableName().equals(tableName)) { if (region.getTableDescriptor().getTableName().equals(tableName)) {
ret.add((HRegion)region); ret.add((HRegion) region);
} }
} }
} }
return ret; return ret;
} }
protected int getRegionServerIndex(ServerName serverName) { protected int getRegionServerIndex(ServerName serverName) {
//we have a small number of region servers, this should be fine for now. // we have a small number of region servers, this should be fine for now.
List<RegionServerThread> servers = getRegionServerThreads(); List<RegionServerThread> servers = getRegionServerThreads();
for (int i=0; i < servers.size(); i++) { for (int i = 0; i < servers.size(); i++) {
if (servers.get(i).getRegionServer().getServerName().equals(serverName)) { if (servers.get(i).getRegionServer().getServerName().equals(serverName)) {
return i; return i;
} }

View File

@ -45,8 +45,8 @@ import org.apache.yetus.audience.InterfaceStability;
@InterfaceStability.Evolving @InterfaceStability.Evolving
public final class StartTestingClusterOption { public final class StartTestingClusterOption {
/** /**
* Number of masters to start up. We'll start this many hbase masters. If numMasters > 1, you * Number of masters to start up. We'll start this many hbase masters. If numMasters > 1, you can
* can find the active/primary master with {@link SingleProcessHBaseCluster#getMaster()}. * find the active/primary master with {@link SingleProcessHBaseCluster#getMaster()}.
*/ */
private final int numMasters; private final int numMasters;
@ -63,9 +63,8 @@ public final class StartTestingClusterOption {
private final Class<? extends HMaster> masterClass; private final Class<? extends HMaster> masterClass;
/** /**
* Number of region servers to start up. * Number of region servers to start up. If this value is > 1, then make sure config
* If this value is > 1, then make sure config "hbase.regionserver.info.port" is -1 * "hbase.regionserver.info.port" is -1 (i.e. no ui per regionserver) otherwise bind errors.
* (i.e. no ui per regionserver) otherwise bind errors.
*/ */
private final int numRegionServers; private final int numRegionServers;
/** /**
@ -174,15 +173,15 @@ public final class StartTestingClusterOption {
@Override @Override
public String toString() { public String toString() {
return "StartMiniClusterOption{" + "numMasters=" + numMasters + ", masterClass=" + masterClass return "StartMiniClusterOption{" + "numMasters=" + numMasters + ", masterClass=" + masterClass +
+ ", numRegionServers=" + numRegionServers + ", rsPorts=" + StringUtils.join(rsPorts) ", numRegionServers=" + numRegionServers + ", rsPorts=" + StringUtils.join(rsPorts) +
+ ", rsClass=" + rsClass + ", numDataNodes=" + numDataNodes ", rsClass=" + rsClass + ", numDataNodes=" + numDataNodes + ", dataNodeHosts=" +
+ ", dataNodeHosts=" + Arrays.toString(dataNodeHosts) + ", numZkServers=" + numZkServers Arrays.toString(dataNodeHosts) + ", numZkServers=" + numZkServers + ", createRootDir=" +
+ ", createRootDir=" + createRootDir + ", createWALDir=" + createWALDir + '}'; createRootDir + ", createWALDir=" + createWALDir + '}';
} }
/** /**
* @return a new builder. * Returns a new builder.
*/ */
public static Builder builder() { public static Builder builder() {
return new Builder(); return new Builder();
@ -190,7 +189,7 @@ public final class StartTestingClusterOption {
/** /**
* Builder pattern for creating an {@link StartTestingClusterOption}. * Builder pattern for creating an {@link StartTestingClusterOption}.
* * <p/>
* The default values of its fields should be considered public and constant. Changing the default * The default values of its fields should be considered public and constant. Changing the default
* values may cause other tests fail. * values may cause other tests fail.
*/ */
@ -214,7 +213,7 @@ public final class StartTestingClusterOption {
if (dataNodeHosts != null && dataNodeHosts.length != 0) { if (dataNodeHosts != null && dataNodeHosts.length != 0) {
numDataNodes = dataNodeHosts.length; numDataNodes = dataNodeHosts.length;
} }
return new StartTestingClusterOption(numMasters,numAlwaysStandByMasters, masterClass, return new StartTestingClusterOption(numMasters, numAlwaysStandByMasters, masterClass,
numRegionServers, rsPorts, rsClass, numDataNodes, dataNodeHosts, numZkServers, numRegionServers, rsPorts, rsClass, numDataNodes, dataNodeHosts, numZkServers,
createRootDir, createWALDir); createRootDir, createWALDir);
} }

View File

@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.ClusterMetrics.Option;
import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
@ -108,9 +109,10 @@ public class BaseTestHBaseFsck {
* Debugging method to dump the contents of meta. * Debugging method to dump the contents of meta.
*/ */
protected void dumpMeta(TableName tableName) throws IOException { protected void dumpMeta(TableName tableName) throws IOException {
List<byte[]> metaRows = TEST_UTIL.getMetaTableRows(tableName); List<RegionInfo> regions =
for (byte[] row : metaRows) { MetaTableAccessor.getTableRegions(TEST_UTIL.getConnection(), tableName);
LOG.info(Bytes.toString(row)); for (RegionInfo region : regions) {
LOG.info(region.getRegionNameAsString());
} }
} }
@ -210,7 +212,6 @@ public class BaseTestHBaseFsck {
LOG.info(hri.toString() + hsa.toString()); LOG.info(hri.toString() + hsa.toString());
} }
TEST_UTIL.getMetaTableRows(htd.getTableName());
LOG.info("*** After delete:"); LOG.info("*** After delete:");
dumpMeta(htd.getTableName()); dumpMeta(htd.getTableName());
} }

View File

@ -54,7 +54,7 @@ public class HBaseZKTestingUtil extends HBaseCommonTestingUtil {
} }
/** /**
* @return Where the cluster will write data on the local subsystem. Creates it if it does not * Returns Where the cluster will write data on the local subsystem. Creates it if it does not
* exist already. A subdir of {@code HBaseCommonTestingUtility#getBaseTestDir()} * exist already. A subdir of {@code HBaseCommonTestingUtility#getBaseTestDir()}
*/ */
Path getClusterTestDir() { Path getClusterTestDir() {
@ -159,7 +159,7 @@ public class HBaseZKTestingUtil extends HBaseCommonTestingUtil {
* users. Don't close it, it will be closed automatically when the cluster shutdowns * users. Don't close it, it will be closed automatically when the cluster shutdowns
* @return The ZKWatcher instance. * @return The ZKWatcher instance.
*/ */
public synchronized ZKWatcher getZooKeeperWatcher() throws IOException { public ZKWatcher getZooKeeperWatcher() throws IOException {
if (zooKeeperWatcher == null) { if (zooKeeperWatcher == null) {
zooKeeperWatcher = new ZKWatcher(conf, "testing utility", new Abortable() { zooKeeperWatcher = new ZKWatcher(conf, "testing utility", new Abortable() {
@Override @Override
@ -177,27 +177,7 @@ public class HBaseZKTestingUtil extends HBaseCommonTestingUtil {
} }
/** /**
* Gets a ZKWatcher. * Returns true if we removed the test dirs
*/
public static ZKWatcher getZooKeeperWatcher(HBaseZKTestingUtil testUtil) throws IOException {
return new ZKWatcher(testUtil.getConfiguration(), "unittest", new Abortable() {
boolean aborted = false;
@Override
public void abort(String why, Throwable e) {
aborted = true;
throw new RuntimeException("Fatal ZK error, why=" + why, e);
}
@Override
public boolean isAborted() {
return aborted;
}
});
}
/**
* @return True if we removed the test dirs
*/ */
@Override @Override
public boolean cleanupTestDir() { public boolean cleanupTestDir() {