HBASE-25022 Remove 'hbase.testing.nocluster' config (#2394)

Signed-off-by: Guanghao Zhang <zghao@apache.org>
This commit is contained in:
Duo Zhang 2020-09-14 20:15:43 +08:00 committed by GitHub
parent 2042523f43
commit 724a0e5500
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 67 additions and 84 deletions

View File

@ -588,14 +588,9 @@ public class HMaster extends HRegionServer implements MasterServices {
}
}
// Some unit tests don't need a cluster, so no zookeeper at all
if (!conf.getBoolean("hbase.testing.nocluster", false)) {
this.metaRegionLocationCache = new MetaRegionLocationCache(this.zooKeeper);
this.activeMasterManager = createActiveMasterManager(zooKeeper, serverName, this);
} else {
this.metaRegionLocationCache = null;
this.activeMasterManager = null;
}
this.metaRegionLocationCache = new MetaRegionLocationCache(this.zooKeeper);
this.activeMasterManager = createActiveMasterManager(zooKeeper, serverName, this);
cachedClusterId = new CachedClusterId(this, conf);
} catch (Throwable t) {
// Make sure we log the exception. HMaster is often started via reflection and the
@ -624,22 +619,20 @@ public class HMaster extends HRegionServer implements MasterServices {
@Override
public void run() {
try {
if (!conf.getBoolean("hbase.testing.nocluster", false)) {
Threads.setDaemonThreadRunning(new Thread(() -> {
try {
int infoPort = putUpJettyServer();
startActiveMasterManager(infoPort);
} catch (Throwable t) {
// Make sure we log the exception.
String error = "Failed to become Active Master";
LOG.error(error, t);
// Abort should have been called already.
if (!isAborted()) {
abort(error, t);
}
Threads.setDaemonThreadRunning(new Thread(() -> {
try {
int infoPort = putUpJettyServer();
startActiveMasterManager(infoPort);
} catch (Throwable t) {
// Make sure we log the exception.
String error = "Failed to become Active Master";
LOG.error(error, t);
// Abort should have been called already.
if (!isAborted()) {
abort(error, t);
}
}), getName() + ":becomeActiveMaster");
}
}
}), getName() + ":becomeActiveMaster");
// Fall in here even if we have been aborted. Need to run the shutdown services and
// the super run call will do this for us.
super.run();

View File

@ -638,28 +638,22 @@ public class HRegionServer extends Thread implements
setupWindows(getConfiguration(), getConfigurationManager());
// Some unit tests don't need a cluster, so no zookeeper at all
if (!conf.getBoolean("hbase.testing.nocluster", false)) {
// Open connection to zookeeper and set primary watcher
zooKeeper = new ZKWatcher(conf, getProcessName() + ":" +
rpcServices.isa.getPort(), this, canCreateBaseZNode());
// If no master in cluster, skip trying to track one or look for a cluster status.
if (!this.masterless) {
if (conf.getBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK,
DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK)) {
this.csm = new ZkCoordinatedStateManager(this);
}
masterAddressTracker = new MasterAddressTracker(getZooKeeper(), this);
masterAddressTracker.start();
clusterStatusTracker = new ClusterStatusTracker(zooKeeper, this);
clusterStatusTracker.start();
} else {
masterAddressTracker = null;
clusterStatusTracker = null;
// Open connection to zookeeper and set primary watcher
zooKeeper = new ZKWatcher(conf, getProcessName() + ":" + rpcServices.isa.getPort(), this,
canCreateBaseZNode());
// If no master in cluster, skip trying to track one or look for a cluster status.
if (!this.masterless) {
if (conf.getBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK,
DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK)) {
this.csm = new ZkCoordinatedStateManager(this);
}
masterAddressTracker = new MasterAddressTracker(getZooKeeper(), this);
masterAddressTracker.start();
clusterStatusTracker = new ClusterStatusTracker(zooKeeper, this);
clusterStatusTracker.start();
} else {
zooKeeper = null;
masterAddressTracker = null;
clusterStatusTracker = null;
}

View File

@ -106,6 +106,7 @@ import org.apache.hadoop.hbase.log.HBaseMarkers;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.MasterRpcServices;
import org.apache.hadoop.hbase.namequeues.NamedQueuePayload;
import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder;
import org.apache.hadoop.hbase.namequeues.request.NamedQueueGetRequest;
import org.apache.hadoop.hbase.namequeues.response.NamedQueueGetResponse;
import org.apache.hadoop.hbase.net.Address;
@ -128,7 +129,6 @@ import org.apache.hadoop.hbase.regionserver.handler.OpenMetaHandler;
import org.apache.hadoop.hbase.regionserver.handler.OpenPriorityRegionHandler;
import org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler;
import org.apache.hadoop.hbase.regionserver.handler.UnassignRegionHandler;
import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder;
import org.apache.hadoop.hbase.replication.ReplicationUtils;
import org.apache.hadoop.hbase.replication.regionserver.RejectReplicationRequestStateChecker;
import org.apache.hadoop.hbase.replication.regionserver.RejectRequestsFromClientStateChecker;
@ -1425,14 +1425,12 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
} else {
accessChecker = new NoopAccessChecker(getConfiguration());
}
if (!getConfiguration().getBoolean("hbase.testing.nocluster", false) && zkWatcher != null) {
zkPermissionWatcher =
new ZKPermissionWatcher(zkWatcher, accessChecker.getAuthManager(), getConfiguration());
try {
zkPermissionWatcher.start();
} catch (KeeperException e) {
LOG.error("ZooKeeper permission watcher initialization failed", e);
}
zkPermissionWatcher =
new ZKPermissionWatcher(zkWatcher, accessChecker.getAuthManager(), getConfiguration());
try {
zkPermissionWatcher.start();
} catch (KeeperException e) {
LOG.error("ZooKeeper permission watcher initialization failed", e);
}
this.scannerIdGenerator = new ScannerIdGenerator(this.regionServer.serverName);
rpcServer.start();

View File

@ -70,8 +70,6 @@ import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
import org.mockito.Mockito;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Category({CoprocessorTests.class, MediumTests.class})
public class TestCoprocessorInterface {
@ -81,7 +79,6 @@ public class TestCoprocessorInterface {
HBaseClassTestRule.forClass(TestCoprocessorInterface.class);
@Rule public TestName name = new TestName();
private static final Logger LOG = LoggerFactory.getLogger(TestCoprocessorInterface.class);
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
static final Path DIR = TEST_UTIL.getDataTestDir();
@ -396,7 +393,7 @@ public class TestCoprocessorInterface {
r.setCoprocessorHost(host);
for (Class<?> implClass : implClasses) {
host.load((Class<? extends RegionCoprocessor>) implClass, Coprocessor.PRIORITY_USER, conf);
host.load(implClass.asSubclass(RegionCoprocessor.class), Coprocessor.PRIORITY_USER, conf);
}
// we need to manually call pre- and postOpen here since the
// above load() is not the real case for CP loading. A CP is
@ -431,7 +428,7 @@ public class TestCoprocessorInterface {
r.setCoprocessorHost(host);
for (Class<?> implClass : implClasses) {
host.load((Class<? extends RegionCoprocessor>) implClass, Coprocessor.PRIORITY_USER, conf);
host.load(implClass.asSubclass(RegionCoprocessor.class), Coprocessor.PRIORITY_USER, conf);
Coprocessor c = host.findCoprocessor(implClass.getName());
assertNotNull(c);
}
@ -452,8 +449,6 @@ public class TestCoprocessorInterface {
// below. After adding all data, the first region is 1.3M
TEST_UTIL.getConfiguration().setLong(HConstants.HREGION_MAX_FILESIZE,
1024 * 128);
TEST_UTIL.getConfiguration().setBoolean("hbase.testing.nocluster",
true);
TEST_UTIL.getConfiguration().setBoolean(CoprocessorHost.ABORT_ON_ERROR_KEY, false);
return TEST_UTIL.getConfiguration();

View File

@ -20,9 +20,7 @@ package org.apache.hadoop.hbase.regionserver;
import static org.junit.Assert.assertEquals;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
@ -33,7 +31,8 @@ import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.Before;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@ -59,23 +58,26 @@ public class TestPriorityRpc {
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestPriorityRpc.class);
private Configuration conf;
private HRegionServer regionServer = null;
private PriorityFunction priority = null;
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
@Before
public void setup() {
conf = HBaseConfiguration.create();
conf.setBoolean("hbase.testing.nocluster", true); // No need to do ZK
final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf);
TEST_UTIL.getDataTestDir(this.getClass().getName());
regionServer = HRegionServer.constructRegionServer(HRegionServer.class, conf);
priority = regionServer.rpcServices.getPriority();
private static HRegionServer RS = null;
private static PriorityFunction PRIORITY = null;
@BeforeClass
public static void setUp() throws Exception {
UTIL.startMiniCluster(1);
RS = UTIL.getHBaseCluster().getRegionServer(0);
PRIORITY = RS.rpcServices.getPriority();
}
@AfterClass
public static void tearDown() throws IOException {
UTIL.shutdownMiniCluster();
}
@Test
public void testQosFunctionForMeta() throws IOException {
priority = regionServer.rpcServices.getPriority();
PRIORITY = RS.rpcServices.getPriority();
RequestHeader.Builder headerBuilder = RequestHeader.newBuilder();
//create a rpc request that has references to hbase:meta region and also
//uses one of the known argument classes (known argument classes are
@ -105,9 +107,9 @@ public class TestPriorityRpc {
Mockito.when(mockRegionInfo.getTable())
.thenReturn(RegionInfoBuilder.FIRST_META_REGIONINFO.getTable());
// Presume type.
((AnnotationReadingPriorityFunction)priority).setRegionServer(mockRS);
((AnnotationReadingPriorityFunction)PRIORITY).setRegionServer(mockRS);
assertEquals(
HConstants.SYSTEMTABLE_QOS, priority.getPriority(header, getRequest, createSomeUser()));
HConstants.SYSTEMTABLE_QOS, PRIORITY.getPriority(header, getRequest, createSomeUser()));
}
@Test
@ -119,7 +121,7 @@ public class TestPriorityRpc {
RequestHeader.Builder headerBuilder = RequestHeader.newBuilder();
headerBuilder.setMethodName("foo");
RequestHeader header = headerBuilder.build();
PriorityFunction qosFunc = regionServer.rpcServices.getPriority();
PriorityFunction qosFunc = RS.rpcServices.getPriority();
assertEquals(HConstants.NORMAL_QOS, qosFunc.getPriority(header, null, createSomeUser()));
}
@ -143,8 +145,8 @@ public class TestPriorityRpc {
Mockito.when(mockRegionInfo.getTable())
.thenReturn(TableName.valueOf("testQosFunctionForScanMethod"));
// Presume type.
((AnnotationReadingPriorityFunction)priority).setRegionServer(mockRS);
final int qos = priority.getPriority(header, scanRequest, createSomeUser());
((AnnotationReadingPriorityFunction)PRIORITY).setRegionServer(mockRS);
final int qos = PRIORITY.getPriority(header, scanRequest, createSomeUser());
assertEquals(Integer.toString(qos), qos, HConstants.NORMAL_QOS);
//build a scan request with scannerID
@ -161,11 +163,11 @@ public class TestPriorityRpc {
.thenReturn(RegionInfoBuilder.FIRST_META_REGIONINFO.getTable());
// Presume type.
((AnnotationReadingPriorityFunction)priority).setRegionServer(mockRS);
((AnnotationReadingPriorityFunction)PRIORITY).setRegionServer(mockRS);
assertEquals(
HConstants.SYSTEMTABLE_QOS,
priority.getPriority(header, scanRequest, createSomeUser()));
PRIORITY.getPriority(header, scanRequest, createSomeUser()));
//the same as above but with non-meta region
// make isSystemTable return false
@ -173,10 +175,11 @@ public class TestPriorityRpc {
.thenReturn(TableName.valueOf("testQosFunctionForScanMethod"));
assertEquals(
HConstants.NORMAL_QOS,
priority.getPriority(header, scanRequest, createSomeUser()));
PRIORITY.getPriority(header, scanRequest, createSomeUser()));
}
private User createSomeUser() {
return User.createUserForTesting(conf, "someuser", new String[] { "somegroup" });
private static User createSomeUser() {
return User.createUserForTesting(UTIL.getConfiguration(), "someuser",
new String[] { "somegroup" });
}
}