HBASE-21071 HBaseTestingUtility::startMiniCluster() to use builder pattern

Signed-off-by: Duo Zhang <zhangduo@apache.org>
Signed-off-by: Michael Stack <stack@apache.org>
This commit is contained in:
Mingliang Liu 2018-08-20 21:42:34 -07:00 committed by Michael Stack
parent 9e2732edbb
commit 6dd5383033
73 changed files with 798 additions and 310 deletions

View File

@ -65,7 +65,6 @@ public class TestRefreshHFilesEndpoint {
private static final Logger LOG = LoggerFactory.getLogger(TestRefreshHFilesEndpoint.class);
private static final HBaseTestingUtility HTU = new HBaseTestingUtility();
private static final int NUM_MASTER = 1;
private static final int NUM_RS = 2;
private static final TableName TABLE_NAME = TableName.valueOf("testRefreshRegionHFilesEP");
private static final byte[] FAMILY = Bytes.toBytes("family");
@ -84,7 +83,7 @@ public class TestRefreshHFilesEndpoint {
CONF.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2);
CONF.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, RefreshHFilesEndpoint.class.getName());
cluster = HTU.startMiniCluster(NUM_MASTER, NUM_RS);
cluster = HTU.startMiniCluster(NUM_RS);
// Create table
table = HTU.createTable(TABLE_NAME, FAMILY, SPLIT_KEY);

View File

@ -29,6 +29,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.StartMiniClusterOption;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Result;
@ -57,7 +58,10 @@ public abstract class TableSnapshotInputFormatTestBase {
public void setupCluster() throws Exception {
setupConf(UTIL.getConfiguration());
UTIL.startMiniCluster(NUM_REGION_SERVERS, true);
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numRegionServers(NUM_REGION_SERVERS).numDataNodes(NUM_REGION_SERVERS)
.createRootDir(true).build();
UTIL.startMiniCluster(option);
rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
fs = rootDir.getFileSystem(UTIL.getConfiguration());
}

View File

@ -62,6 +62,7 @@ import org.apache.hadoop.hbase.HadoopShims;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.PerformanceEvaluation;
import org.apache.hadoop.hbase.PrivateCellUtil;
import org.apache.hadoop.hbase.StartMiniClusterOption;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Tag;
import org.apache.hadoop.hbase.TagType;
@ -612,7 +613,9 @@ public class TestHFileOutputFormat2 {
for (int i = 0; i < hostCount; ++i) {
hostnames[i] = "datanode_" + i;
}
util.startMiniCluster(1, hostCount, hostnames);
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numRegionServers(hostCount).dataNodeHosts(hostnames).build();
util.startMiniCluster(option);
Map<String, Table> allTables = new HashMap<>(tableStr.size());
List<HFileOutputFormat2.TableInfo> tableInfo = new ArrayList<>(tableStr.size());

View File

@ -99,7 +99,7 @@ public class TestExportSnapshot {
@BeforeClass
public static void setUpBeforeClass() throws Exception {
setUpBaseConf(TEST_UTIL.getConfiguration());
TEST_UTIL.startMiniCluster(1, 3);
TEST_UTIL.startMiniCluster(3);
TEST_UTIL.startMiniMapReduceCluster();
}

View File

@ -47,7 +47,7 @@ public class TestMobExportSnapshot extends TestExportSnapshot {
@BeforeClass
public static void setUpBeforeClass() throws Exception {
setUpBaseConf(TEST_UTIL.getConfiguration());
TEST_UTIL.startMiniCluster(1, 3);
TEST_UTIL.startMiniCluster(3);
TEST_UTIL.startMiniMapReduceCluster();
}

View File

@ -53,7 +53,7 @@ public class TestMobSecureExportSnapshot extends TestMobExportSnapshot {
// setup configuration
SecureTestUtil.enableSecurity(TEST_UTIL.getConfiguration());
TEST_UTIL.startMiniCluster(1, 3);
TEST_UTIL.startMiniCluster(3);
TEST_UTIL.startMiniMapReduceCluster();
// Wait for the ACL table to become available

View File

@ -52,7 +52,7 @@ public class TestSecureExportSnapshot extends TestExportSnapshot {
// setup configuration
SecureTestUtil.enableSecurity(TEST_UTIL.getConfiguration());
TEST_UTIL.startMiniCluster(1, 3);
TEST_UTIL.startMiniCluster(3);
TEST_UTIL.startMiniMapReduceCluster();
// Wait for the ACL table to become available

View File

@ -89,7 +89,7 @@ public class TestStatusResource {
@BeforeClass
public static void setUpBeforeClass() throws Exception {
conf = TEST_UTIL.getConfiguration();
TEST_UTIL.startMiniCluster(1, 1);
TEST_UTIL.startMiniCluster();
TEST_UTIL.createTable(TableName.valueOf("TestStatusResource"), Bytes.toBytes("D"));
TEST_UTIL.createTable(TableName.valueOf("TestStatusResource2"), Bytes.toBytes("D"));
REST_TEST_UTIL.startServletContainer(conf);

View File

@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.StartMiniClusterOption;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.client.Admin;
@ -83,7 +84,9 @@ public class TestRSGroupsOfflineMode {
TEST_UTIL.getConfiguration().set(
ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
"1");
TEST_UTIL.startMiniCluster(2, 3);
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numMasters(2).numRegionServers(3).numDataNodes(3).build();
TEST_UTIL.startMiniCluster(option);
cluster = TEST_UTIL.getHBaseCluster();
master = ((MiniHBaseCluster)cluster).getMaster();
master.balanceSwitch(false);

View File

@ -765,197 +765,266 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
}
}
/**
* Start up a minicluster of hbase, dfs, and zookeeper where WAL's walDir is created separately.
* All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
* @param createWALDir Whether to create a new WAL directory.
* @return The mini HBase cluster created.
* @see #shutdownMiniCluster()
* @deprecated Use {@link #startMiniCluster(StartMiniClusterOption)} instead.
*/
@Deprecated
public MiniHBaseCluster startMiniCluster(boolean createWALDir) throws Exception {
StartMiniClusterOption option = StartMiniClusterOption.builder()
.createWALDir(createWALDir).build();
return startMiniCluster(option);
}
/**
* Start up a minicluster of hbase, dfs, and zookeeper.
* @throws Exception
* @return Mini hbase cluster instance created.
* All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
* @param numSlaves Slave node number, for both HBase region server and HDFS data node.
* @param createRootDir Whether to create a new root or data directory path.
* @return The mini HBase cluster created.
* @see #shutdownMiniCluster()
* @deprecated Use {@link #startMiniCluster(StartMiniClusterOption)} instead.
*/
@Deprecated
public MiniHBaseCluster startMiniCluster(int numSlaves, boolean createRootDir)
throws Exception {
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numRegionServers(numSlaves).numDataNodes(numSlaves).createRootDir(createRootDir).build();
return startMiniCluster(option);
}
/**
* Start up a minicluster of hbase, dfs, and zookeeper.
* All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
* @param numSlaves Slave node number, for both HBase region server and HDFS data node.
* @param createRootDir Whether to create a new root or data directory path.
* @param createWALDir Whether to create a new WAL directory.
* @return The mini HBase cluster created.
* @see #shutdownMiniCluster()
* @deprecated Use {@link #startMiniCluster(StartMiniClusterOption)} instead.
*/
@Deprecated
public MiniHBaseCluster startMiniCluster(int numSlaves, boolean createRootDir,
boolean createWALDir) throws Exception {
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numRegionServers(numSlaves).numDataNodes(numSlaves).createRootDir(createRootDir)
.createWALDir(createWALDir).build();
return startMiniCluster(option);
}
/**
* Start up a minicluster of hbase, dfs, and zookeeper.
* All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
* @param numMasters Master node number.
* @param numSlaves Slave node number, for both HBase region server and HDFS data node.
* @param createRootDir Whether to create a new root or data directory path.
* @return The mini HBase cluster created.
* @see #shutdownMiniCluster()
* @deprecated Use {@link #startMiniCluster(StartMiniClusterOption)} instead.
*/
@Deprecated
public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves, boolean createRootDir)
throws Exception {
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numMasters(numMasters).numRegionServers(numSlaves).createRootDir(createRootDir)
.numDataNodes(numSlaves).build();
return startMiniCluster(option);
}
/**
* Start up a minicluster of hbase, dfs, and zookeeper.
* All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
* @param numMasters Master node number.
* @param numSlaves Slave node number, for both HBase region server and HDFS data node.
* @return The mini HBase cluster created.
* @see #shutdownMiniCluster()
* @deprecated Use {@link #startMiniCluster(StartMiniClusterOption)} instead.
*/
@Deprecated
public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves) throws Exception {
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numMasters(numMasters).numRegionServers(numSlaves).numDataNodes(numSlaves).build();
return startMiniCluster(option);
}
/**
* Start up a minicluster of hbase, dfs, and zookeeper.
* All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
* @param numMasters Master node number.
* @param numSlaves Slave node number, for both HBase region server and HDFS data node.
* @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
* HDFS data node number.
* @param createRootDir Whether to create a new root or data directory path.
* @return The mini HBase cluster created.
* @see #shutdownMiniCluster()
* @deprecated Use {@link #startMiniCluster(StartMiniClusterOption)} instead.
*/
@Deprecated
public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves, String[] dataNodeHosts,
boolean createRootDir) throws Exception {
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numMasters(numMasters).numRegionServers(numSlaves).createRootDir(createRootDir)
.numDataNodes(numSlaves).dataNodeHosts(dataNodeHosts).build();
return startMiniCluster(option);
}
/**
* Start up a minicluster of hbase, dfs, and zookeeper.
* All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
* @param numMasters Master node number.
* @param numSlaves Slave node number, for both HBase region server and HDFS data node.
* @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
* HDFS data node number.
* @return The mini HBase cluster created.
* @see #shutdownMiniCluster()
* @deprecated Use {@link #startMiniCluster(StartMiniClusterOption)} instead.
*/
@Deprecated
public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves, String[] dataNodeHosts)
throws Exception {
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numMasters(numMasters).numRegionServers(numSlaves)
.numDataNodes(numSlaves).dataNodeHosts(dataNodeHosts).build();
return startMiniCluster(option);
}
/**
* Start up a minicluster of hbase, dfs, and zookeeper.
* All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
* @param numMasters Master node number.
* @param numRegionServers Number of region servers.
* @param numDataNodes Number of datanodes.
* @return The mini HBase cluster created.
* @see #shutdownMiniCluster()
* @deprecated Use {@link #startMiniCluster(StartMiniClusterOption)} instead.
*/
@Deprecated
public MiniHBaseCluster startMiniCluster(int numMasters, int numRegionServers, int numDataNodes)
throws Exception {
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numMasters(numMasters).numRegionServers(numRegionServers).numDataNodes(numDataNodes)
.build();
return startMiniCluster(option);
}
/**
* Start up a minicluster of hbase, dfs, and zookeeper.
* All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
* @param numMasters Master node number.
* @param numSlaves Slave node number, for both HBase region server and HDFS data node.
* @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
* HDFS data node number.
* @param masterClass The class to use as HMaster, or null for default.
* @param rsClass The class to use as HRegionServer, or null for default.
* @return The mini HBase cluster created.
* @see #shutdownMiniCluster()
* @deprecated Use {@link #startMiniCluster(StartMiniClusterOption)} instead.
*/
@Deprecated
public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves, String[] dataNodeHosts,
Class<? extends HMaster> masterClass,
Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> rsClass)
throws Exception {
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numMasters(numMasters).masterClass(masterClass)
.numRegionServers(numSlaves).rsClass(rsClass)
.numDataNodes(numSlaves).dataNodeHosts(dataNodeHosts)
.build();
return startMiniCluster(option);
}
/**
* Start up a minicluster of hbase, dfs, and zookeeper.
* All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
* @param numMasters Master node number.
* @param numRegionServers Number of region servers.
* @param numDataNodes Number of datanodes.
* @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
* HDFS data node number.
* @param masterClass The class to use as HMaster, or null for default.
* @param rsClass The class to use as HRegionServer, or null for default.
* @return The mini HBase cluster created.
* @see #shutdownMiniCluster()
* @deprecated Use {@link #startMiniCluster(StartMiniClusterOption)} instead.
*/
@Deprecated
public MiniHBaseCluster startMiniCluster(int numMasters, int numRegionServers, int numDataNodes,
String[] dataNodeHosts, Class<? extends HMaster> masterClass,
Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> rsClass)
throws Exception {
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numMasters(numMasters).masterClass(masterClass)
.numRegionServers(numRegionServers).rsClass(rsClass)
.numDataNodes(numDataNodes).dataNodeHosts(dataNodeHosts)
.build();
return startMiniCluster(option);
}
/**
* Start up a minicluster of hbase, dfs, and zookeeper.
* All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
* @param numMasters Master node number.
* @param numRegionServers Number of region servers.
* @param numDataNodes Number of datanodes.
* @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
* HDFS data node number.
* @param masterClass The class to use as HMaster, or null for default.
* @param rsClass The class to use as HRegionServer, or null for default.
* @param createRootDir Whether to create a new root or data directory path.
* @param createWALDir Whether to create a new WAL directory.
* @return The mini HBase cluster created.
* @see #shutdownMiniCluster()
* @deprecated Use {@link #startMiniCluster(StartMiniClusterOption)} instead.
*/
@Deprecated
public MiniHBaseCluster startMiniCluster(int numMasters, int numRegionServers, int numDataNodes,
String[] dataNodeHosts, Class<? extends HMaster> masterClass,
Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> rsClass, boolean createRootDir,
boolean createWALDir) throws Exception {
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numMasters(numMasters).masterClass(masterClass)
.numRegionServers(numRegionServers).rsClass(rsClass)
.numDataNodes(numDataNodes).dataNodeHosts(dataNodeHosts)
.createRootDir(createRootDir).createWALDir(createWALDir)
.build();
return startMiniCluster(option);
}
/**
* Start up a minicluster of hbase, dfs and zookeeper clusters with given slave node number.
* All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
* @param numSlaves slave node number, for both HBase region server and HDFS data node.
* @see #startMiniCluster(StartMiniClusterOption option)
* @see #shutdownMiniDFSCluster()
*/
public MiniHBaseCluster startMiniCluster(int numSlaves) throws Exception {
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numRegionServers(numSlaves).numDataNodes(numSlaves).build();
return startMiniCluster(option);
}
/**
* Start up a minicluster of hbase, dfs and zookeeper all using default options.
* Option default value can be found in {@link StartMiniClusterOption.Builder}.
* @see #startMiniCluster(StartMiniClusterOption option)
* @see #shutdownMiniDFSCluster()
*/
public MiniHBaseCluster startMiniCluster() throws Exception {
return startMiniCluster(1, 1);
return startMiniCluster(StartMiniClusterOption.builder().build());
}
/**
* Start up a minicluster of hbase, dfs, and zookeeper where WAL's walDir is created separately.
* @throws Exception
* @return Mini hbase cluster instance created.
* Start up a mini cluster of hbase, optionally dfs and zookeeper if needed.
* It modifies Configuration. It homes the cluster data directory under a random
* subdirectory in a directory under System property test.build.data, to be cleaned up on exit.
* @see #shutdownMiniDFSCluster()
*/
public MiniHBaseCluster startMiniCluster(boolean withWALDir) throws Exception {
return startMiniCluster(1, 1, 1, null, null, null, false, withWALDir);
}
/**
* Start up a minicluster of hbase, dfs, and zookeeper.
* Set the <code>create</code> flag to create root or data directory path or not
* (will overwrite if dir already exists)
* @throws Exception
* @return Mini hbase cluster instance created.
* @see #shutdownMiniDFSCluster()
*/
public MiniHBaseCluster startMiniCluster(final int numSlaves, boolean create)
throws Exception {
return startMiniCluster(1, numSlaves, create);
}
/**
* Start up a minicluster of hbase, optionally dfs, and zookeeper.
* Modifies Configuration. Homes the cluster data directory under a random
* subdirectory in a directory under System property test.build.data.
* Directory is cleaned up on exit.
* @param numSlaves Number of slaves to start up. We'll start this many
* datanodes and regionservers. If numSlaves is > 1, then make sure
* hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise
* bind errors.
* @throws Exception
* @see #shutdownMiniCluster()
* @return Mini hbase cluster instance created.
*/
public MiniHBaseCluster startMiniCluster(final int numSlaves)
throws Exception {
return startMiniCluster(1, numSlaves, false);
}
public MiniHBaseCluster startMiniCluster(final int numSlaves, boolean create, boolean withWALDir)
throws Exception {
return startMiniCluster(1, numSlaves, numSlaves, null, null, null, create, withWALDir);
}
/**
* Start minicluster. Whether to create a new root or data dir path even if such a path
* has been created earlier is decided based on flag <code>create</code>
* @throws Exception
* @see #shutdownMiniCluster()
* @return Mini hbase cluster instance created.
*/
public MiniHBaseCluster startMiniCluster(final int numMasters,
final int numSlaves, boolean create)
throws Exception {
return startMiniCluster(numMasters, numSlaves, null, create);
}
/**
* start minicluster
* @throws Exception
* @see #shutdownMiniCluster()
* @return Mini hbase cluster instance created.
*/
public MiniHBaseCluster startMiniCluster(final int numMasters,
final int numSlaves)
throws Exception {
return startMiniCluster(numMasters, numSlaves, null, false);
}
public MiniHBaseCluster startMiniCluster(final int numMasters,
final int numSlaves, final String[] dataNodeHosts, boolean create)
throws Exception {
return startMiniCluster(numMasters, numSlaves, numSlaves, dataNodeHosts,
null, null, create, false);
}
/**
* Start up a minicluster of hbase, optionally dfs, and zookeeper.
* Modifies Configuration. Homes the cluster data directory under a random
* subdirectory in a directory under System property test.build.data.
* Directory is cleaned up on exit.
* @param numMasters Number of masters to start up. We'll start this many
* hbase masters. If numMasters > 1, you can find the active/primary master
* with {@link MiniHBaseCluster#getMaster()}.
* @param numSlaves Number of slaves to start up. We'll start this many
* regionservers. If dataNodeHosts == null, this also indicates the number of
* datanodes to start. If dataNodeHosts != null, the number of datanodes is
* based on dataNodeHosts.length.
* If numSlaves is > 1, then make sure
* hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise
* bind errors.
* @param dataNodeHosts hostnames DNs to run on.
* This is useful if you want to run datanode on distinct hosts for things
* like HDFS block location verification.
* If you start MiniDFSCluster without host names,
* all instances of the datanodes will have the same host name.
* @throws Exception
* @see #shutdownMiniCluster()
* @return Mini hbase cluster instance created.
*/
public MiniHBaseCluster startMiniCluster(final int numMasters,
final int numSlaves, final String[] dataNodeHosts) throws Exception {
return startMiniCluster(numMasters, numSlaves, numSlaves, dataNodeHosts,
null, null);
}
/**
* Same as {@link #startMiniCluster(int, int)}, but with custom number of datanodes.
* @param numDataNodes Number of data nodes.
*/
public MiniHBaseCluster startMiniCluster(final int numMasters,
final int numSlaves, final int numDataNodes) throws Exception {
return startMiniCluster(numMasters, numSlaves, numDataNodes, null, null, null);
}
/**
* Start up a minicluster of hbase, optionally dfs, and zookeeper.
* Modifies Configuration. Homes the cluster data directory under a random
* subdirectory in a directory under System property test.build.data.
* Directory is cleaned up on exit.
* @param numMasters Number of masters to start up. We'll start this many
* hbase masters. If numMasters > 1, you can find the active/primary master
* with {@link MiniHBaseCluster#getMaster()}.
* @param numSlaves Number of slaves to start up. We'll start this many
* regionservers. If dataNodeHosts == null, this also indicates the number of
* datanodes to start. If dataNodeHosts != null, the number of datanodes is
* based on dataNodeHosts.length.
* If numSlaves is > 1, then make sure
* hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise
* bind errors.
* @param dataNodeHosts hostnames DNs to run on.
* This is useful if you want to run datanode on distinct hosts for things
* like HDFS block location verification.
* If you start MiniDFSCluster without host names,
* all instances of the datanodes will have the same host name.
* @param masterClass The class to use as HMaster, or null for default
* @param regionserverClass The class to use as HRegionServer, or null for
* default
* @throws Exception
* @see #shutdownMiniCluster()
* @return Mini hbase cluster instance created.
*/
public MiniHBaseCluster startMiniCluster(final int numMasters,
final int numSlaves, final String[] dataNodeHosts, Class<? extends HMaster> masterClass,
Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
throws Exception {
return startMiniCluster(
numMasters, numSlaves, numSlaves, dataNodeHosts, masterClass, regionserverClass);
}
public MiniHBaseCluster startMiniCluster(final int numMasters,
final int numSlaves, int numDataNodes, final String[] dataNodeHosts,
Class<? extends HMaster> masterClass,
Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
throws Exception {
return startMiniCluster(numMasters, numSlaves, numDataNodes, dataNodeHosts,
masterClass, regionserverClass, false, false);
}
/**
* Same as {@link #startMiniCluster(int, int, String[], Class, Class)}, but with custom
* number of datanodes.
* @param numDataNodes Number of data nodes.
* @param create Set this flag to create a new
* root or data directory path or not (will overwrite if exists already).
*/
public MiniHBaseCluster startMiniCluster(final int numMasters,
final int numSlaves, int numDataNodes, final String[] dataNodeHosts,
Class<? extends HMaster> masterClass,
Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass,
boolean create, boolean withWALDir)
throws Exception {
if (dataNodeHosts != null && dataNodeHosts.length != 0) {
numDataNodes = dataNodeHosts.length;
}
LOG.info("Starting up minicluster with " + numMasters + " master(s) and " +
numSlaves + " regionserver(s) and " + numDataNodes + " datanode(s)");
public MiniHBaseCluster startMiniCluster(StartMiniClusterOption option) throws Exception {
LOG.info("Starting up minicluster with option: {}", option);
// If we already put up a cluster, fail.
if (miniClusterRunning) {
@ -968,54 +1037,35 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
// Bring up mini dfs cluster. This spews a bunch of warnings about missing
// scheme. Complaints are 'Scheme is undefined for build/test/data/dfs/name1'.
if(this.dfsCluster == null) {
if (dfsCluster == null) {
LOG.info("STARTING DFS");
dfsCluster = startMiniDFSCluster(numDataNodes, dataNodeHosts);
} else LOG.info("NOT STARTING DFS");
dfsCluster = startMiniDFSCluster(option.getNumDataNodes(), option.getDataNodeHosts());
} else {
LOG.info("NOT STARTING DFS");
}
// Start up a zk cluster.
if (getZkCluster() == null) {
startMiniZKCluster();
startMiniZKCluster(option.getNumZkServers());
}
// Start the MiniHBaseCluster
return startMiniHBaseCluster(numMasters, numSlaves, null, masterClass,
regionserverClass, create, withWALDir);
}
public MiniHBaseCluster startMiniHBaseCluster(final int numMasters, final int numSlaves)
throws IOException, InterruptedException {
return startMiniHBaseCluster(numMasters, numSlaves, null);
}
public MiniHBaseCluster startMiniHBaseCluster(final int numMasters, final int numSlaves,
List<Integer> rsPorts) throws IOException, InterruptedException {
return startMiniHBaseCluster(numMasters, numSlaves, rsPorts, null, null, false, false);
return startMiniHBaseCluster(option);
}
/**
* Starts up mini hbase cluster. Usually used after call to
* {@link #startMiniCluster(int, int)} when doing stepped startup of clusters.
* Starts up mini hbase cluster.
* Usually you won't want this. You'll usually want {@link #startMiniCluster()}.
* @param rsPorts Ports that RegionServer should use; pass ports if you want to test cluster
* restart where for sure the regionservers come up on same address+port (but
* just with different startcode); by default mini hbase clusters choose new
* arbitrary ports on each cluster start.
* @param create Whether to create a
* root or data directory path or not; will overwrite if exists already.
* This is useful when doing stepped startup of clusters.
* @return Reference to the hbase mini hbase cluster.
* @throws IOException
* @throws InterruptedException
* @see #startMiniCluster()
* @see #startMiniCluster(StartMiniClusterOption)
* @see #shutdownMiniHBaseCluster()
*/
public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
final int numSlaves, List<Integer> rsPorts, Class<? extends HMaster> masterClass,
Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass,
boolean create, boolean withWALDir)
throws IOException, InterruptedException {
public MiniHBaseCluster startMiniHBaseCluster(StartMiniClusterOption option)
throws IOException, InterruptedException {
// Now do the mini hbase cluster. Set the hbase.rootdir in config.
createRootDir(create);
if (withWALDir) {
createRootDir(option.isCreateRootDir());
if (option.isCreateWALDir()) {
createWALRootDir();
}
// Set the hbase.fs.tmp.dir config to make sure that we have some default value. This is
@ -1025,16 +1075,17 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
// These settings will make the server waits until this exact number of
// regions servers are connected.
if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1) == -1) {
conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, numSlaves);
conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, option.getNumRegionServers());
}
if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1) == -1) {
conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, numSlaves);
conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, option.getNumRegionServers());
}
Configuration c = new Configuration(this.conf);
TraceUtil.initTracer(c);
this.hbaseCluster =
new MiniHBaseCluster(c, numMasters, numSlaves, rsPorts, masterClass, regionserverClass);
new MiniHBaseCluster(c, option.getNumMasters(), option.getNumRegionServers(),
option.getRsPorts(), option.getMasterClass(), option.getRsClass());
// Don't leave here till we've done a successful scan of the hbase:meta
Table t = getConnection().getTable(TableName.META_TABLE_NAME);
ResultScanner s = t.getScanner(new Scan());
@ -1045,9 +1096,83 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
t.close();
getAdmin(); // create immediately the hbaseAdmin
LOG.info("Minicluster is up; activeMaster=" + this.getHBaseCluster().getMaster());
LOG.info("Minicluster is up; activeMaster={}", getHBaseCluster().getMaster());
return (MiniHBaseCluster)this.hbaseCluster;
return (MiniHBaseCluster) hbaseCluster;
}
/**
* Starts up mini hbase cluster using default options.
* Default options can be found in {@link StartMiniClusterOption.Builder}.
* @see #startMiniHBaseCluster(StartMiniClusterOption)
* @see #shutdownMiniHBaseCluster()
*/
public MiniHBaseCluster startMiniHBaseCluster() throws IOException, InterruptedException {
return startMiniHBaseCluster(StartMiniClusterOption.builder().build());
}
/**
* Starts up mini hbase cluster.
* Usually you won't want this. You'll usually want {@link #startMiniCluster()}.
* All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
* @param numMasters Master node number.
* @param numRegionServers Number of region servers.
* @return The mini HBase cluster created.
* @see #shutdownMiniHBaseCluster()
* @deprecated Use {@link #startMiniHBaseCluster(StartMiniClusterOption)} instead.
*/
@Deprecated
public MiniHBaseCluster startMiniHBaseCluster(int numMasters, int numRegionServers)
throws IOException, InterruptedException {
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numMasters(numMasters).numRegionServers(numRegionServers).build();
return startMiniHBaseCluster(option);
}
/**
* Starts up mini hbase cluster.
* Usually you won't want this. You'll usually want {@link #startMiniCluster()}.
* All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
* @param numMasters Master node number.
* @param numRegionServers Number of region servers.
* @param rsPorts Ports that RegionServer should use.
* @return The mini HBase cluster created.
* @see #shutdownMiniHBaseCluster()
* @deprecated Use {@link #startMiniHBaseCluster(StartMiniClusterOption)} instead.
*/
@Deprecated
public MiniHBaseCluster startMiniHBaseCluster(int numMasters, int numRegionServers,
List<Integer> rsPorts) throws IOException, InterruptedException {
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numMasters(numMasters).numRegionServers(numRegionServers).rsPorts(rsPorts).build();
return startMiniHBaseCluster(option);
}
/**
* Starts up mini hbase cluster.
* Usually you won't want this. You'll usually want {@link #startMiniCluster()}.
* All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
* @param numMasters Master node number.
* @param numRegionServers Number of region servers.
* @param rsPorts Ports that RegionServer should use.
* @param masterClass The class to use as HMaster, or null for default.
* @param rsClass The class to use as HRegionServer, or null for default.
* @param createRootDir Whether to create a new root or data directory path.
* @param createWALDir Whether to create a new WAL directory.
* @return The mini HBase cluster created.
* @see #shutdownMiniHBaseCluster()
* @deprecated Use {@link #startMiniHBaseCluster(StartMiniClusterOption)} instead.
*/
@Deprecated
public MiniHBaseCluster startMiniHBaseCluster(int numMasters, int numRegionServers,
List<Integer> rsPorts, Class<? extends HMaster> masterClass,
Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> rsClass,
boolean createRootDir, boolean createWALDir) throws IOException, InterruptedException {
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numMasters(numMasters).masterClass(masterClass)
.numRegionServers(numRegionServers).rsClass(rsClass).rsPorts(rsPorts)
.createRootDir(createRootDir).createWALDir(createWALDir).build();
return startMiniHBaseCluster(option);
}
/**

View File

@ -0,0 +1,254 @@
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import java.util.Arrays;
import java.util.List;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.yetus.audience.InterfaceAudience;
/**
* Options for starting up a mini cluster (including an hbase, dfs and zookeeper clusters) in test.
* The options include HDFS options to build mini dfs cluster, Zookeeper options to build mini zk
* cluster, and mostly HBase options to build mini hbase cluster.
*
* To create an object, use a {@link Builder}.
* Example usage:
* <pre>
* StartMiniClusterOption option = StartMiniClusterOption.builder().
* .numMasters(3).rsClass(MyRegionServer.class).createWALDir(true).build();
* </pre>
*
* Default values can be found in {@link Builder}.
*/
@InterfaceAudience.Public
public final class StartMiniClusterOption {
/**
* Number of masters to start up. We'll start this many hbase masters. If numMasters > 1, you
* can find the active/primary master with {@link MiniHBaseCluster#getMaster()}.
*/
private final int numMasters;
/**
* The class to use as HMaster, or null for default.
*/
private final Class<? extends HMaster> masterClass;
/**
* Number of region servers to start up.
* If this value is > 1, then make sure config "hbase.regionserver.info.port" is -1
* (i.e. no ui per regionserver) otherwise bind errors.
*/
private final int numRegionServers;
/**
* Ports that RegionServer should use. Pass ports if you want to test cluster restart where for
* sure the regionservers come up on same address+port (but just with different startcode); by
* default mini hbase clusters choose new arbitrary ports on each cluster start.
*/
private final List<Integer> rsPorts;
/**
* The class to use as HRegionServer, or null for default.
*/
private Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> rsClass;
/**
* Number of datanodes. Used to create mini DSF cluster. Surpassed by {@link #dataNodeHosts} size.
*/
private final int numDataNodes;
/**
* The hostnames of DataNodes to run on. This is useful if you want to run datanode on distinct
* hosts for things like HDFS block location verification. If you start MiniDFSCluster without
* host names, all instances of the datanodes will have the same host name.
*/
private final String[] dataNodeHosts;
/**
* Number of Zookeeper servers.
*/
private final int numZkServers;
/**
* Whether to create a new root or data directory path. If true, the newly created data directory
* will be configured as HBase rootdir. This will overwrite existing root directory config.
*/
private final boolean createRootDir;
/**
* Whether to create a new WAL directory. If true, the newly created directory will be configured
* as HBase wal.dir which is separate from HBase rootdir.
*/
private final boolean createWALDir;
/**
* Private constructor. Use {@link Builder#build()}.
*/
private StartMiniClusterOption(int numMasters, Class<? extends HMaster> masterClass,
int numRegionServers, List<Integer> rsPorts,
Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> rsClass, int numDataNodes,
String[] dataNodeHosts, int numZkServers, boolean createRootDir, boolean createWALDir) {
this.numMasters = numMasters;
this.masterClass = masterClass;
this.numRegionServers = numRegionServers;
this.rsPorts = rsPorts;
this.rsClass = rsClass;
this.numDataNodes = numDataNodes;
this.dataNodeHosts = dataNodeHosts;
this.numZkServers = numZkServers;
this.createRootDir = createRootDir;
this.createWALDir = createWALDir;
}
public int getNumMasters() {
return numMasters;
}
public Class<? extends HMaster> getMasterClass() {
return masterClass;
}
public int getNumRegionServers() {
return numRegionServers;
}
public List<Integer> getRsPorts() {
return rsPorts;
}
public Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> getRsClass() {
return rsClass;
}
public int getNumDataNodes() {
return numDataNodes;
}
public String[] getDataNodeHosts() {
return dataNodeHosts;
}
public int getNumZkServers() {
return numZkServers;
}
public boolean isCreateRootDir() {
return createRootDir;
}
public boolean isCreateWALDir() {
return createWALDir;
}
@Override
public String toString() {
return "StartMiniClusterOption{" + "numMasters=" + numMasters + ", masterClass=" + masterClass
+ ", numRegionServers=" + numRegionServers + ", rsPorts=" + StringUtils.join(rsPorts)
+ ", rsClass=" + rsClass + ", numDataNodes=" + numDataNodes
+ ", dataNodeHosts=" + Arrays.toString(dataNodeHosts) + ", numZkServers=" + numZkServers
+ ", createRootDir=" + createRootDir + ", createWALDir=" + createWALDir + '}';
}
/**
* @return a new builder.
*/
public static Builder builder() {
return new Builder();
}
/**
* Builder pattern for creating an {@link StartMiniClusterOption}.
*
* The default values of its fields should be considered public and constant. Changing the default
* values may cause other tests fail.
*/
public static final class Builder {
private int numMasters = 1;
private Class<? extends HMaster> masterClass = null;
private int numRegionServers = 1;
private List<Integer> rsPorts = null;
private Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> rsClass = null;
private int numDataNodes = 1;
private String[] dataNodeHosts = null;
private int numZkServers = 1;
private boolean createRootDir = false;
private boolean createWALDir = false;
private Builder() {
}
public StartMiniClusterOption build() {
if (dataNodeHosts != null && dataNodeHosts.length != 0) {
numDataNodes = dataNodeHosts.length;
}
return new StartMiniClusterOption(numMasters, masterClass, numRegionServers, rsPorts, rsClass,
numDataNodes, dataNodeHosts, numZkServers, createRootDir, createWALDir);
}
public Builder numMasters(int numMasters) {
this.numMasters = numMasters;
return this;
}
public Builder masterClass(Class<? extends HMaster> masterClass) {
this.masterClass = masterClass;
return this;
}
public Builder numRegionServers(int numRegionServers) {
this.numRegionServers = numRegionServers;
return this;
}
public Builder rsPorts(List<Integer> rsPorts) {
this.rsPorts = rsPorts;
return this;
}
public Builder rsClass(Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> rsClass) {
this.rsClass = rsClass;
return this;
}
public Builder numDataNodes(int numDataNodes) {
this.numDataNodes = numDataNodes;
return this;
}
public Builder dataNodeHosts(String[] dataNodeHosts) {
this.dataNodeHosts = dataNodeHosts;
return this;
}
public Builder numZkServers(int numZkServers) {
this.numZkServers = numZkServers;
return this;
}
public Builder createRootDir(boolean createRootDir) {
this.createRootDir = createRootDir;
return this;
}
public Builder createWALDir(boolean createWALDir) {
this.createWALDir = createWALDir;
return this;
}
}
}

View File

@ -66,7 +66,9 @@ public class TestClientClusterMetrics {
Configuration conf = HBaseConfiguration.create();
conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, MyObserver.class.getName());
UTIL = new HBaseTestingUtility(conf);
UTIL.startMiniCluster(MASTERS, SLAVES);
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numMasters(MASTERS).numRegionServers(SLAVES).numDataNodes(SLAVES).build();
UTIL.startMiniCluster(option);
CLUSTER = UTIL.getHBaseCluster();
CLUSTER.waitForActiveAndReadyMaster();
ADMIN = UTIL.getAdmin();

View File

@ -67,7 +67,9 @@ public class TestClientClusterStatus {
Configuration conf = HBaseConfiguration.create();
conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, MyObserver.class.getName());
UTIL = new HBaseTestingUtility(conf);
UTIL.startMiniCluster(MASTERS, SLAVES);
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numMasters(MASTERS).numRegionServers(SLAVES).numDataNodes(SLAVES).build();
UTIL.startMiniCluster(option);
CLUSTER = UTIL.getHBaseCluster();
CLUSTER.waitForActiveAndReadyMaster();
ADMIN = UTIL.getAdmin();

View File

@ -88,7 +88,10 @@ public class TestClientOperationTimeout {
TESTING_UTIL.getConfiguration().setLong(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, 500);
TESTING_UTIL.getConfiguration().setLong(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
TESTING_UTIL.startMiniCluster(1, 1, null, null, DelayedRegionServer.class);
// Set RegionServer class and use default values for other options.
StartMiniClusterOption option = StartMiniClusterOption.builder()
.rsClass(DelayedRegionServer.class).build();
TESTING_UTIL.startMiniCluster(option);
}
@Before

View File

@ -78,7 +78,7 @@ public class TestGlobalMemStoreSize {
LOG.info("Starting cluster");
Configuration conf = HBaseConfiguration.create();
TEST_UTIL = new HBaseTestingUtility(conf);
TEST_UTIL.startMiniCluster(1, regionServerNum);
TEST_UTIL.startMiniCluster(regionServerNum);
cluster = TEST_UTIL.getHBaseCluster();
LOG.info("Waiting for active/ready master");
cluster.waitForActiveAndReadyMaster();

View File

@ -47,7 +47,10 @@ public class TestLocalHBaseCluster {
*/
@Test
public void testLocalHBaseCluster() throws Exception {
TEST_UTIL.startMiniCluster(1, 1, null, MyHMaster.class, MyHRegionServer.class);
// Set Master class and RegionServer class, and use default values for other options.
StartMiniClusterOption option = StartMiniClusterOption.builder()
.masterClass(MyHMaster.class).rsClass(MyHRegionServer.class).build();
TEST_UTIL.startMiniCluster(option);
// Can we cast back to our master class?
try {
int val = ((MyHMaster)TEST_UTIL.getHBaseCluster().getMaster(0)).echo(42);

View File

@ -150,7 +150,9 @@ public class TestMultiVersions {
table.close();
UTIL.shutdownMiniHBaseCluster();
LOG.debug("HBase cluster shut down -- restarting");
UTIL.startMiniHBaseCluster(1, NUM_SLAVES);
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numRegionServers(NUM_SLAVES).build();
UTIL.startMiniHBaseCluster(option);
// Make a new connection.
table = UTIL.getConnection().getTable(desc.getTableName());
// Overwrite previous value

View File

@ -88,7 +88,9 @@ public class TestZooKeeper {
@Before
public void setUp() throws Exception {
TEST_UTIL.startMiniHBaseCluster(2, 2);
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numMasters(2).numRegionServers(2).build();
TEST_UTIL.startMiniHBaseCluster(option);
}
@After

View File

@ -63,7 +63,7 @@ public class TestAsyncClusterAdminApi2 extends TestAsyncAdminBase {
@Before
@Override
public void setUp() throws Exception {
TEST_UTIL.startMiniCluster(1, 3);
TEST_UTIL.startMiniCluster(3);
ASYNC_CONN = ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get();
admin = ASYNC_CONN.getAdmin();
}

View File

@ -52,7 +52,7 @@ public class TestGetScanPartialResult {
@BeforeClass
public static void setUp() throws Exception {
TEST_UTIL.startMiniCluster(1);
TEST_UTIL.startMiniCluster();
TEST_UTIL.createTable(TABLE, CF);
}

View File

@ -25,6 +25,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.StartMiniClusterOption;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.NoSuchProcedureException;
@ -82,7 +83,9 @@ public class TestSeparateClientZKCluster {
// reduce zk session timeout to easier trigger session expiration
TEST_UTIL.getConfiguration().setInt(HConstants.ZK_SESSION_TIMEOUT, ZK_SESSION_TIMEOUT);
// Start a cluster with 2 masters and 3 regionservers.
TEST_UTIL.startMiniCluster(2, 3);
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numMasters(2).numRegionServers(3).numDataNodes(3).build();
TEST_UTIL.startMiniCluster(option);
}
@AfterClass

View File

@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.StartMiniClusterOption;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
@ -74,7 +75,10 @@ public class TestTableSnapshotScanner {
public void setupCluster() throws Exception {
setupConf(UTIL.getConfiguration());
UTIL.startMiniCluster(NUM_REGION_SERVERS, true);
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numRegionServers(NUM_REGION_SERVERS).numDataNodes(NUM_REGION_SERVERS)
.createRootDir(true).build();
UTIL.startMiniCluster(option);
rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
fs = rootDir.getFileSystem(UTIL.getConfiguration());
}

View File

@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.StartMiniClusterOption;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.junit.BeforeClass;
import org.junit.ClassRule;
@ -48,7 +49,9 @@ public class TestUpdateConfiguration {
@BeforeClass
public static void setup() throws Exception {
TEST_UTIL.startMiniCluster(2, 1);
// Set master number and use default values for other options.
StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(2).build();
TEST_UTIL.startMiniCluster(option);
}
@Test

View File

@ -101,7 +101,7 @@ public class TestBlockReorderBlockLocation {
public void testBlockLocation() throws Exception {
// We need to start HBase to get HConstants.HBASE_DIR set in conf
htu.startMiniZKCluster();
MiniHBaseCluster hbm = htu.startMiniHBaseCluster(1, 1);
MiniHBaseCluster hbm = htu.startMiniHBaseCluster();
conf = hbm.getConfiguration();

View File

@ -116,7 +116,7 @@ public class TestBlockReorderMultiBlocks {
byte[] sb = Bytes.toBytes("sb");
htu.startMiniZKCluster();
MiniHBaseCluster hbm = htu.startMiniHBaseCluster(1, 1);
MiniHBaseCluster hbm = htu.startMiniHBaseCluster();
hbm.waitForActiveAndReadyMaster();
HRegionServer targetRs = LoadBalancer.isTablesOnMaster(hbm.getConf())? hbm.getMaster():
hbm.getRegionServer(0);

View File

@ -55,6 +55,7 @@ import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.SplitLogCounters;
import org.apache.hadoop.hbase.StartMiniClusterOption;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.client.Put;
@ -146,7 +147,9 @@ public abstract class AbstractTestDLS {
conf.setInt("hbase.regionserver.wal.max.splitters", 3);
conf.setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10);
conf.set("hbase.wal.provider", getWalProvider());
TEST_UTIL.startMiniHBaseCluster(NUM_MASTERS, numRS);
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numMasters(NUM_MASTERS).numRegionServers(numRS).build();
TEST_UTIL.startMiniHBaseCluster(option);
cluster = TEST_UTIL.getHBaseCluster();
LOG.info("Waiting for active/ready master");
cluster.waitForActiveAndReadyMaster();

View File

@ -45,7 +45,7 @@ public class TestGetInfoPort {
@Before
public void setUp() throws Exception {
testUtil.getConfiguration().setInt(HConstants.MASTER_INFO_PORT, 0);
testUtil.startMiniCluster(1, 1);
testUtil.startMiniCluster();
}
@After

View File

@ -66,7 +66,7 @@ public class TestGetLastFlushedSequenceId {
@Before
public void setUp() throws Exception {
testUtil.getConfiguration().setInt("hbase.regionserver.msginterval", 1000);
testUtil.startMiniCluster(1, 1);
testUtil.startMiniCluster();
}
@After

View File

@ -21,6 +21,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.StartMiniClusterOption;
import org.apache.hadoop.hbase.replication.ReplicationLoadSource;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.testclassification.MasterTests;
@ -66,7 +67,10 @@ public class TestGetReplicationLoad {
public static void startCluster() throws Exception {
LOG.info("Starting cluster");
TEST_UTIL = new HBaseTestingUtility();
TEST_UTIL.startMiniCluster(1, 1, 1, null, TestMasterMetrics.MyMaster.class, null);
// Set master class and use default values for other options.
StartMiniClusterOption option = StartMiniClusterOption.builder()
.masterClass(TestMasterMetrics.MyMaster.class).build();
TEST_UTIL.startMiniCluster(option);
cluster = TEST_UTIL.getHBaseCluster();
LOG.info("Waiting for active/ready master");
cluster.waitForActiveAndReadyMaster();

View File

@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.StartMiniClusterOption;
import org.apache.hadoop.hbase.master.RegionState.State;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.testclassification.FlakeyTests;
@ -66,7 +67,9 @@ public class TestMasterFailover {
// Start the cluster
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
try {
TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numMasters(NUM_MASTERS).numRegionServers(NUM_RS).numDataNodes(NUM_RS).build();
TEST_UTIL.startMiniCluster(option);
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
// get all the master threads
@ -166,12 +169,9 @@ public class TestMasterFailover {
*/
@Test
public void testMetaInTransitionWhenMasterFailover() throws Exception {
final int NUM_MASTERS = 1;
final int NUM_RS = 1;
// Start the cluster
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
TEST_UTIL.startMiniCluster();
try {
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
LOG.info("Cluster started");

View File

@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.StartMiniClusterOption;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.util.JVMClusterUtil;
@ -49,13 +50,12 @@ public class TestMasterFailoverBalancerPersistence {
*/
@Test
public void testMasterFailoverBalancerPersistence() throws Exception {
final int NUM_MASTERS = 3;
final int NUM_RS = 1;
// Start the cluster
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numMasters(3).build();
TEST_UTIL.startMiniCluster(option);
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
assertTrue(cluster.waitForActiveAndReadyMaster());

View File

@ -22,7 +22,7 @@ import static org.junit.Assert.assertEquals;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.StartMiniClusterOption;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.FSUtils;
@ -46,7 +46,8 @@ public class TestMasterFileSystemWithWALDir {
@BeforeClass
public static void setupTest() throws Exception {
UTIL.startMiniCluster(true);
// Set createWALDir to true and use default values for other options.
UTIL.startMiniCluster(StartMiniClusterOption.builder().createWALDir(true).build());
}
@AfterClass

View File

@ -25,6 +25,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.StartMiniClusterOption;
import org.apache.hadoop.hbase.test.MetricsAssertHelper;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
@ -71,7 +72,10 @@ public class TestMasterMetrics {
public static void startCluster() throws Exception {
LOG.info("Starting cluster");
TEST_UTIL = new HBaseTestingUtility();
TEST_UTIL.startMiniCluster(1, 1, 1, null, MyMaster.class, null);
// Set master class and use default values for other options.
StartMiniClusterOption option = StartMiniClusterOption.builder()
.masterClass(MyMaster.class).build();
TEST_UTIL.startMiniCluster(option);
cluster = TEST_UTIL.getHBaseCluster();
LOG.info("Waiting for active/ready master");
cluster.waitForActiveAndReadyMaster();

View File

@ -59,7 +59,7 @@ public class TestMasterMetricsWrapper {
@BeforeClass
public static void setup() throws Exception {
TEST_UTIL.startMiniCluster(1, NUM_RS);
TEST_UTIL.startMiniCluster(NUM_RS);
}
@AfterClass

View File

@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.MetaTableAccessor.Visitor;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.StartMiniClusterOption;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
@ -196,7 +197,9 @@ public class TestMasterOperationsForRegionReplicas {
rsports.add(rst.getRegionServer().getRpcServer().getListenerAddress().getPort());
}
TEST_UTIL.shutdownMiniHBaseCluster();
TEST_UTIL.startMiniHBaseCluster(1, numSlaves, rsports);
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numRegionServers(numSlaves).rsPorts(rsports).build();
TEST_UTIL.startMiniHBaseCluster(option);
TEST_UTIL.waitTableEnabled(tableName);
validateFromSnapshotFromMeta(TEST_UTIL, tableName, numRegions, numReplica,
ADMIN.getConnection());
@ -204,7 +207,7 @@ public class TestMasterOperationsForRegionReplicas {
// Now shut the whole cluster down, and verify regions are assigned even if there is only
// one server running
TEST_UTIL.shutdownMiniHBaseCluster();
TEST_UTIL.startMiniHBaseCluster(1, 1);
TEST_UTIL.startMiniHBaseCluster();
TEST_UTIL.waitTableEnabled(tableName);
validateSingleRegionServerAssignment(ADMIN.getConnection(), numRegions, numReplica);
for (int i = 1; i < numSlaves; i++) { //restore the cluster

View File

@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.StartMiniClusterOption;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.RegionLocator;
@ -61,14 +62,15 @@ public class TestMasterRestartAfterDisablingTable {
public void testForCheckingIfEnableAndDisableWorksFineAfterSwitch()
throws Exception {
final int NUM_MASTERS = 2;
final int NUM_RS = 1;
final int NUM_REGIONS_TO_CREATE = 4;
// Start the cluster
log("Starting cluster");
Configuration conf = HBaseConfiguration.create();
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf);
TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numMasters(NUM_MASTERS).build();
TEST_UTIL.startMiniCluster(option);
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
log("Waiting for active/ready master");
cluster.waitForActiveAndReadyMaster();

View File

@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.LocalHBaseCluster;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.StartMiniClusterOption;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
@ -66,7 +67,9 @@ public class TestMasterShutdown {
// Start the cluster
HBaseTestingUtility htu = new HBaseTestingUtility(conf);
htu.startMiniCluster(NUM_MASTERS, NUM_RS);
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numMasters(NUM_MASTERS).numRegionServers(NUM_RS).numDataNodes(NUM_RS).build();
htu.startMiniCluster(option);
MiniHBaseCluster cluster = htu.getHBaseCluster();
// get all the master threads

View File

@ -23,6 +23,7 @@ import static org.junit.Assert.fail;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.StartMiniClusterOption;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
@ -50,7 +51,9 @@ public class TestMetaAssignmentWithStopMaster {
@BeforeClass
public static void setUp() throws Exception {
UTIL.startMiniCluster(2,3);
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numMasters(2).numRegionServers(3).numDataNodes(3).build();
UTIL.startMiniCluster(option);
}
@Test

View File

@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.MiniHBaseCluster.MiniHBaseClusterRegionServer;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.StartMiniClusterOption;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.master.assignment.RegionStates;
import org.apache.hadoop.hbase.testclassification.MediumTests;
@ -60,7 +61,9 @@ public class TestMetaShutdownHandler {
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.startMiniCluster(1, 3, null, null, MyRegionServer.class);
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numRegionServers(3).rsClass(MyRegionServer.class).numDataNodes(3).build();
TEST_UTIL.startMiniCluster(option);
}
@AfterClass

View File

@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.StartMiniClusterOption;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionLocator;
@ -78,7 +79,9 @@ public class TestRollingRestart {
log("Starting cluster");
Configuration conf = HBaseConfiguration.create();
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf);
TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numMasters(NUM_MASTERS).numRegionServers(NUM_RS).numDataNodes(NUM_RS).build();
TEST_UTIL.startMiniCluster(option);
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
log("Waiting for active/ready master");
cluster.waitForActiveAndReadyMaster();

View File

@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.StartMiniClusterOption;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
@ -72,7 +73,9 @@ public class TestShutdownBackupMaster {
@BeforeClass
public static void setUpBeforeClass() throws Exception {
UTIL.getConfiguration().setClass(HConstants.MASTER_IMPL, MockHMaster.class, HMaster.class);
UTIL.startMiniCluster(2, 2);
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numMasters(2).numRegionServers(2).numDataNodes(2).build();
UTIL.startMiniCluster(option);
UTIL.waitUntilAllSystemRegionsAssigned();
}

View File

@ -22,6 +22,7 @@ import static org.junit.Assert.assertEquals;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.StartMiniClusterOption;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.client.Get;
@ -69,7 +70,8 @@ public class TestRegionMoveAndAbandon {
public void setup() throws Exception {
UTIL = new HBaseTestingUtility();
zkCluster = UTIL.startMiniZKCluster();
cluster = UTIL.startMiniHBaseCluster(1, 2);
StartMiniClusterOption option = StartMiniClusterOption.builder().numRegionServers(2).build();
cluster = UTIL.startMiniHBaseCluster(option);
rs1 = cluster.getRegionServer(0);
rs2 = cluster.getRegionServer(1);
assertEquals(2, cluster.getRegionServerThreads().size());

View File

@ -61,7 +61,7 @@ public class TestRegionLocationFinder {
@BeforeClass
public static void setUpBeforeClass() throws Exception {
cluster = TEST_UTIL.startMiniCluster(1, ServerNum);
cluster = TEST_UTIL.startMiniCluster(ServerNum);
table = TEST_UTIL.createTable(tableName, FAMILY, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
TEST_UTIL.waitTableAvailable(tableName, 1000);
TEST_UTIL.loadTable(table, FAMILY);

View File

@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.StartMiniClusterOption;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.master.HMaster;
@ -157,7 +158,9 @@ public class TestRegionsOnMasterOptions {
}
private void checkBalance(int masterCount, int rsCount) throws Exception {
MiniHBaseCluster cluster = TEST_UTIL.startMiniCluster(MASTERS, SLAVES);
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numMasters(MASTERS).numRegionServers(SLAVES).numDataNodes(SLAVES).build();
MiniHBaseCluster cluster = TEST_UTIL.startMiniCluster(option);
TableName tn = TableName.valueOf(this.name.getMethodName());
try {
Table t = TEST_UTIL.createMultiRegionTable(tn, HConstants.CATALOG_FAMILY, REGIONS);

View File

@ -23,6 +23,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.StartMiniClusterOption;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.TableDescriptor;
@ -72,7 +73,9 @@ public class TestMasterFailoverWithProcedures {
@Before
public void setup() throws Exception {
setupConf(UTIL.getConfiguration());
UTIL.startMiniCluster(2, 1);
// Set master number and use default values for other options.
StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(2).build();
UTIL.startMiniCluster(option);
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExec, false);

View File

@ -26,6 +26,7 @@ import java.util.concurrent.CountDownLatch;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.StartMiniClusterOption;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.TableDescriptor;
@ -76,7 +77,9 @@ public class TestMasterProcedureWalLease {
@Before
public void setup() throws Exception {
setupConf(UTIL.getConfiguration());
UTIL.startMiniCluster(2, 3);
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numMasters(2).numRegionServers(3).numDataNodes(3).build();
UTIL.startMiniCluster(option);
}
@After

View File

@ -118,7 +118,7 @@ public class TestNamespaceAuditor {
conf.setBoolean(QuotaUtil.QUOTA_CONF_KEY, true);
conf.setClass("hbase.coprocessor.regionserver.classes", CPRegionServerObserver.class,
RegionServerObserver.class);
UTIL.startMiniCluster(1, 1);
UTIL.startMiniCluster();
waitForQuotaInitialize(UTIL);
ADMIN = UTIL.getAdmin();
}

View File

@ -59,7 +59,6 @@ public class TestClearRegionBlockCache {
private static final TableName TABLE_NAME = TableName.valueOf("testClearRegionBlockCache");
private static final byte[] FAMILY = Bytes.toBytes("family");
private static final byte[][] SPLIT_KEY = new byte[][] { Bytes.toBytes("5") };
private static final int NUM_MASTERS = 1;
private static final int NUM_RS = 2;
private final HBaseTestingUtility HTU = new HBaseTestingUtility();
@ -83,7 +82,7 @@ public class TestClearRegionBlockCache {
CONF.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, 30);
}
cluster = HTU.startMiniCluster(NUM_MASTERS, NUM_RS);
cluster = HTU.startMiniCluster(NUM_RS);
rs1 = cluster.getRegionServer(0);
rs2 = cluster.getRegionServer(1);

View File

@ -27,6 +27,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.StartMiniClusterOption;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.LoadBalancer;
import org.apache.hadoop.hbase.testclassification.MediumTests;
@ -82,7 +83,9 @@ public class TestClusterId {
//Make sure RS is in blocking state
Thread.sleep(10000);
TEST_UTIL.startMiniHBaseCluster(1, 0);
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numMasters(1).numRegionServers(0).build();
TEST_UTIL.startMiniHBaseCluster(option);
rst.waitForServerOnline();
@ -108,7 +111,7 @@ public class TestClusterId {
s.close();
}
}
TEST_UTIL.startMiniHBaseCluster(1, 1);
TEST_UTIL.startMiniHBaseCluster();
HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
int expected = LoadBalancer.isTablesOnMaster(TEST_UTIL.getConfiguration())? 2: 1;
assertEquals(expected, master.getServerManager().getOnlineServersList().size());

View File

@ -198,7 +198,7 @@ public class TestEncryptionKeyRotation {
conf.set(HConstants.CRYPTO_MASTERKEY_ALTERNATE_NAME_CONF_KEY, "hbase");
// Start the cluster back up
TEST_UTIL.startMiniHBaseCluster(1, 1);
TEST_UTIL.startMiniHBaseCluster();
// Verify the table can still be loaded
TEST_UTIL.waitTableAvailable(htd.getTableName(), 5000);
// Double check that the store file keys can be unwrapped

View File

@ -95,6 +95,7 @@ import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.PrivateCellUtil;
import org.apache.hadoop.hbase.RegionTooBusyException;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.StartMiniClusterOption;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TagType;
import org.apache.hadoop.hbase.Waiter;
@ -4288,7 +4289,9 @@ public class TestHRegion {
int regionServersCount = 3;
try {
cluster = htu.startMiniCluster(1, regionServersCount, dataNodeHosts);
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numRegionServers(regionServersCount).dataNodeHosts(dataNodeHosts).build();
cluster = htu.startMiniCluster(option);
byte[][] families = { fam1, fam2 };
Table ht = htu.createTable(tableName, families);

View File

@ -70,10 +70,9 @@ public class TestHRegionOnCluster {
@Test
public void testDataCorrectnessReplayingRecoveredEdits() throws Exception {
final int NUM_MASTERS = 1;
final int NUM_RS = 3;
Admin hbaseAdmin = null;
TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
TEST_UTIL.startMiniCluster(NUM_RS);
try {
final TableName tableName = TableName.valueOf(name.getMethodName());

View File

@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.MultithreadedTestUtil.RepeatingTestThread;
import org.apache.hadoop.hbase.MultithreadedTestUtil.TestContext;
import org.apache.hadoop.hbase.StartMiniClusterOption;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ClientServiceCallable;
@ -375,7 +376,8 @@ public class TestHRegionServerBulkLoad {
int millisToRun = 30000;
int numScanners = 50;
UTIL.startMiniCluster(1, false, true);
// Set createWALDir to true and use default values for other options.
UTIL.startMiniCluster(StartMiniClusterOption.builder().createWALDir(true).build());
try {
WAL log = UTIL.getHBaseCluster().getRegionServer(0).getWAL(null);
FindBulkHBaseListener listener = new FindBulkHBaseListener();

View File

@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.StartMiniClusterOption;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
@ -98,7 +99,9 @@ public class TestJoinedScanners {
String[] dataNodeHosts = new String[] {"host1", "host2", "host3"};
int regionServersCount = 3;
TEST_UTIL.startMiniCluster(1, regionServersCount, dataNodeHosts);
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numRegionServers(regionServersCount).dataNodeHosts(dataNodeHosts).build();
TEST_UTIL.startMiniCluster(option);
}
@AfterClass

View File

@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.StartMiniClusterOption;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.client.Admin;
@ -117,7 +118,9 @@ public class TestRegionMergeTransactionOnCluster {
@BeforeClass
public static void beforeAllTests() throws Exception {
// Start a cluster
TEST_UTIL.startMiniCluster(1, NB_SERVERS, null, MyMaster.class, null);
StartMiniClusterOption option = StartMiniClusterOption.builder()
.masterClass(MyMaster.class).numRegionServers(NB_SERVERS).numDataNodes(NB_SERVERS).build();
TEST_UTIL.startMiniCluster(option);
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
MASTER = cluster.getMaster();
MASTER.balanceSwitch(false);

View File

@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.StartMiniClusterOption;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Durability;
@ -101,7 +102,8 @@ public class TestRegionServerAbort {
testUtil.startMiniZKCluster();
dfsCluster = testUtil.startMiniDFSCluster(2);
cluster = testUtil.startMiniHBaseCluster(1, 2);
StartMiniClusterOption option = StartMiniClusterOption.builder().numRegionServers(2).build();
cluster = testUtil.startMiniHBaseCluster(option);
}
@After

View File

@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.StartMiniClusterOption;
import org.apache.hadoop.hbase.master.LoadBalancer;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
@ -103,7 +104,9 @@ public class TestRegionServerHostname {
TEST_UTIL.getConfiguration().set(HRegionServer.MASTER_HOSTNAME_KEY, hostName);
TEST_UTIL.getConfiguration().set(HRegionServer.RS_HOSTNAME_KEY, hostName);
TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numMasters(NUM_MASTERS).numRegionServers(NUM_RS).numDataNodes(NUM_RS).build();
TEST_UTIL.startMiniCluster(option);
try {
ZKWatcher zkw = TEST_UTIL.getZooKeeperWatcher();
List<String> servers = ZKUtil.listChildrenNoWatch(zkw, zkw.getZNodePaths().rsZNode);
@ -143,7 +146,9 @@ public class TestRegionServerHostname {
TEST_UTIL.getConfiguration().set(HRegionServer.RS_HOSTNAME_KEY, hostName);
TEST_UTIL.getConfiguration().setBoolean(HRegionServer.RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY, true);
try {
TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numMasters(NUM_MASTERS).numRegionServers(NUM_RS).numDataNodes(NUM_RS).build();
TEST_UTIL.startMiniCluster(option);
} catch (Exception e) {
Throwable t1 = e.getCause();
Throwable t2 = t1.getCause();
@ -163,7 +168,9 @@ public class TestRegionServerHostname {
public void testRegionServerHostnameReportedToMaster() throws Exception {
TEST_UTIL.getConfiguration().setBoolean(HRegionServer.RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY,
true);
TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numMasters(NUM_MASTERS).numRegionServers(NUM_RS).numDataNodes(NUM_RS).build();
TEST_UTIL.startMiniCluster(option);
boolean tablesOnMaster = LoadBalancer.isTablesOnMaster(TEST_UTIL.getConfiguration());
int expectedRS = NUM_RS + (tablesOnMaster? 1: 0);
try (ZKWatcher zkw = TEST_UTIL.getZooKeeperWatcher()) {

View File

@ -114,7 +114,7 @@ public class TestRegionServerMetrics {
conf.setInt("hbase.hstore.compaction.max", 100);
conf.setInt(HConstants.REGIONSERVER_INFO_PORT, -1);
TEST_UTIL.startMiniCluster(1, 1);
TEST_UTIL.startMiniCluster();
cluster = TEST_UTIL.getHBaseCluster();
cluster.waitForActiveAndReadyMaster();
admin = TEST_UTIL.getAdmin();

View File

@ -73,7 +73,7 @@ public class TestRegionServerOnlineConfigChange {
@BeforeClass
public static void setUp() throws Exception {
conf = hbaseTestingUtility.getConfiguration();
hbaseTestingUtility.startMiniCluster(1,1);
hbaseTestingUtility.startMiniCluster();
t1 = hbaseTestingUtility.createTable(TABLE1, COLUMN_FAMILY1);
try (RegionLocator locator = hbaseTestingUtility.getConnection().getRegionLocator(TABLE1)) {
HRegionInfo firstHRI = locator.getAllRegionLocations().get(0).getRegionInfo();

View File

@ -68,7 +68,7 @@ public class TestRemoveRegionMetrics {
conf.setInt("zookeeper.recovery.retry", 0);
conf.setInt(HConstants.REGIONSERVER_INFO_PORT, -1);
TEST_UTIL.startMiniCluster(1, 2);
TEST_UTIL.startMiniCluster(2);
cluster = TEST_UTIL.getHBaseCluster();
cluster.waitForActiveAndReadyMaster();

View File

@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.StartMiniClusterOption;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
@ -138,7 +139,9 @@ public class TestSplitTransactionOnCluster {
@BeforeClass public static void before() throws Exception {
TESTING_UTIL.getConfiguration().setInt(HConstants.HBASE_BALANCER_PERIOD, 60000);
TESTING_UTIL.startMiniCluster(1, NB_SERVERS, null, MyMaster.class, null);
StartMiniClusterOption option = StartMiniClusterOption.builder()
.masterClass(MyMaster.class).numRegionServers(NB_SERVERS).numDataNodes(NB_SERVERS).build();
TESTING_UTIL.startMiniCluster(option);
}
@AfterClass public static void after() throws Exception {

View File

@ -92,7 +92,7 @@ public class TestTags {
conf.setInt("hfile.format.version", 3);
conf.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
TestCoprocessorForTags.class.getName());
TEST_UTIL.startMiniCluster(1, 2);
TEST_UTIL.startMiniCluster(2);
}
@AfterClass

View File

@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.StartMiniClusterOption;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
@ -119,7 +120,8 @@ public abstract class AbstractTestLogRolling {
@Before
public void setUp() throws Exception {
TEST_UTIL.startMiniCluster(1, 1, 2);
// Use 2 DataNodes and default values for other StartMiniCluster options.
TEST_UTIL.startMiniCluster(StartMiniClusterOption.builder().numDataNodes(2).build());
cluster = TEST_UTIL.getHBaseCluster();
dfsCluster = TEST_UTIL.getDFSCluster();

View File

@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HBaseZKTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.StartMiniClusterOption;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
import org.apache.hadoop.hbase.client.Admin;
@ -102,8 +103,10 @@ public class SyncReplicationTestBase {
ZK_UTIL.startMiniZKCluster();
initTestingUtility(UTIL1, "/cluster1");
initTestingUtility(UTIL2, "/cluster2");
UTIL1.startMiniCluster(2,3);
UTIL2.startMiniCluster(2,3);
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numMasters(2).numRegionServers(3).numDataNodes(3).build();
UTIL1.startMiniCluster(option);
UTIL2.startMiniCluster(option);
TableDescriptor td =
TableDescriptorBuilder.newBuilder(TABLE_NAME).setColumnFamily(ColumnFamilyDescriptorBuilder
.newBuilder(CF).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()).build();

View File

@ -21,6 +21,7 @@ import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.fail;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.StartMiniClusterOption;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
@ -65,7 +66,8 @@ public class TestReplicationDisableInactivePeer extends TestReplicationBase {
// disable and start the peer
admin.disablePeer("2");
utility2.startMiniHBaseCluster(1, 2);
StartMiniClusterOption option = StartMiniClusterOption.builder().numRegionServers(2).build();
utility2.startMiniHBaseCluster(option);
Get get = new Get(rowkey);
for (int i = 0; i < NB_RETRIES; i++) {
Result res = htable2.get(get);

View File

@ -149,7 +149,7 @@ public class TestReplicationDroppedTables extends TestReplicationBase {
// make sure we have a single region server only, so that all
// edits for all tables go there
utility1.shutdownMiniHBaseCluster();
utility1.startMiniHBaseCluster(1, 1);
utility1.startMiniHBaseCluster();
TableName tablename = TableName.valueOf(tName);
byte[] familyName = Bytes.toBytes("fam");
@ -224,7 +224,7 @@ public class TestReplicationDroppedTables extends TestReplicationBase {
// make sure we have a single region server only, so that all
// edits for all tables go there
utility1.shutdownMiniHBaseCluster();
utility1.startMiniHBaseCluster(1, 1);
utility1.startMiniHBaseCluster();
TableName tablename = TableName.valueOf("testdroppedtimed");
byte[] familyName = Bytes.toBytes("fam");

View File

@ -102,8 +102,8 @@ public class TestGlobalReplicationThrottler {
ReplicationPeerConfig rpc = new ReplicationPeerConfig();
rpc.setClusterKey(utility2.getClusterKey());
utility1.startMiniCluster(1, 1);
utility2.startMiniCluster(1, 1);
utility1.startMiniCluster();
utility2.startMiniCluster();
admin1.addPeer("peer1", rpc, null);
admin1.addPeer("peer2", rpc, null);

View File

@ -25,6 +25,7 @@ import java.util.LinkedList;
import java.util.List;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.StartMiniClusterOption;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.client.Put;
@ -63,7 +64,9 @@ public class TestHTraceHooks {
@BeforeClass
public static void before() throws Exception {
TEST_UTIL.startMiniCluster(2, 3);
StartMiniClusterOption option = StartMiniClusterOption.builder()
.numMasters(2).numRegionServers(3).numDataNodes(3).build();
TEST_UTIL.startMiniCluster(option);
rcvr = new POJOSpanReceiver(new HBaseHTraceConfiguration(TEST_UTIL.getConfiguration()));
TraceUtil.addReceiver(rcvr);
TraceUtil.addSampler(new Sampler() {

View File

@ -109,7 +109,7 @@ public class TestMiniClusterLoadSequential {
@Before
public void setUp() throws Exception {
LOG.debug("Test setup: isMultiPut=" + isMultiPut);
TEST_UTIL.startMiniCluster(1, NUM_RS);
TEST_UTIL.startMiniCluster(NUM_RS);
}
@After

View File

@ -56,7 +56,6 @@ public class TestWALFiltering {
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestWALFiltering.class);
private static final int NUM_MASTERS = 1;
private static final int NUM_RS = 4;
private static final TableName TABLE_NAME =
@ -69,7 +68,7 @@ public class TestWALFiltering {
@Before
public void setUp() throws Exception {
TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
TEST_UTIL.startMiniCluster(NUM_RS);
fillTable();
}

View File

@ -80,7 +80,7 @@ public class TestWALOpenAfterDNRollingStart {
@Before
public void setUp() throws IOException, InterruptedException {
TEST_UTIL.getConfiguration().set("hbase.wal.provider", walProvider);
TEST_UTIL.startMiniHBaseCluster(1, 1);
TEST_UTIL.startMiniHBaseCluster();
}
@After

View File

@ -76,7 +76,7 @@ public class TestShellRSGroups {
CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
RSGroupAdminEndpoint.class.getName());
TEST_UTIL.startMiniCluster(1,4);
TEST_UTIL.startMiniCluster(4);
// Configure jruby runtime
List<String> loadPaths = new ArrayList<>(2);

View File

@ -99,7 +99,7 @@ public class TestJavaHBaseContext implements Serializable {
LOG.info("starting minicluster");
htu.startMiniZKCluster();
htu.startMiniHBaseCluster(1, 1);
htu.startMiniHBaseCluster();
LOG.info(" - minicluster started");