HBASE-11025 Infrastructure for pluggable consensus service (Mikhail Antonov)

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1589537 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2014-04-23 22:58:23 +00:00
parent 639ca70f9c
commit d5db1fb545
22 changed files with 138 additions and 48 deletions

View File

@ -971,6 +971,9 @@ public final class HConstants {
/** Configuration key for setting replication codec class name */
public static final String REPLICATION_CODEC_CONF_KEY = "hbase.replication.rpc.codec";
/** Config for pluggable consensus provider */
public static final String HBASE_CONSENSUS_PROVIDER_CLASS = "hbase.consensus.provider.class";
private HConstants() {
// Can't be instantiated with this ctor.
}

View File

@ -1166,4 +1166,9 @@ possible configurations would overwhelm and obscure the important.
procedure. After implementing your own MasterProcedureManager, just put it in HBase's classpath
and add the fully qualified class name here.</description>
</property>
<property>
<name>hbase.consensus.provider.class</name>
<value>org.apache.hadoop.hbase.consensus.ZkConsensusProvider</value>
<description>Fully qualified name of class implementing consensus.</description>
</property>
</configuration>

View File

@ -30,6 +30,8 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.consensus.ConsensusProvider;
import org.apache.hadoop.hbase.consensus.ConsensusProviderFactory;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes;
@ -73,6 +75,7 @@ public class LocalHBaseCluster {
private final Class<? extends HMaster> masterClass;
private final Class<? extends HRegionServer> regionServerClass;
ConsensusProvider consensusProvider;
/**
* Constructor.
* @param conf
@ -139,6 +142,8 @@ public class LocalHBaseCluster {
final Class<? extends HRegionServer> regionServerClass)
throws IOException {
this.conf = conf;
consensusProvider = ConsensusProviderFactory.getConsensusProvider(conf);
// Always have masters and regionservers come up on port '0' so we don't
// clash over default ports.
conf.set(HConstants.MASTER_PORT, "0");
@ -173,7 +178,7 @@ public class LocalHBaseCluster {
// its HConnection instance rather than share (see HBASE_INSTANCES down in
// the guts of HConnectionManager.
JVMClusterUtil.RegionServerThread rst =
JVMClusterUtil.createRegionServerThread(config,
JVMClusterUtil.createRegionServerThread(config, consensusProvider,
this.regionServerClass, index);
this.regionThreads.add(rst);
return rst;
@ -199,7 +204,7 @@ public class LocalHBaseCluster {
// Create each master with its own Configuration instance so each has
// its HConnection instance rather than share (see HBASE_INSTANCES down in
// the guts of HConnectionManager.
JVMClusterUtil.MasterThread mt = JVMClusterUtil.createMasterThread(c,
JVMClusterUtil.MasterThread mt = JVMClusterUtil.createMasterThread(c, consensusProvider,
(Class<? extends HMaster>) conf.getClass(HConstants.MASTER_IMPL, this.masterClass), index);
this.masterThreads.add(mt);
return mt;

View File

@ -68,6 +68,7 @@ import org.apache.hadoop.hbase.client.MetaScanner;
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.consensus.ConsensusProvider;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.executor.ExecutorType;
@ -254,9 +255,9 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
* @throws KeeperException
* @throws IOException
*/
public HMaster(final Configuration conf)
public HMaster(final Configuration conf, ConsensusProvider consensusProvider)
throws IOException, KeeperException, InterruptedException {
super(conf);
super(conf, consensusProvider);
this.rsFatals = new MemoryBoundedLogMessageBuffer(
conf.getLong("hbase.master.buffer.for.rs.fatals", 1*1024*1024));

View File

@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.LocalHBaseCluster;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.consensus.ConsensusProvider;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.util.JVMClusterUtil;
import org.apache.hadoop.hbase.util.ServerCommandLine;
@ -255,9 +256,9 @@ public class HMasterCommandLine extends ServerCommandLine {
public static class LocalHMaster extends HMaster {
private MiniZooKeeperCluster zkcluster = null;
public LocalHMaster(Configuration conf)
public LocalHMaster(Configuration conf, ConsensusProvider consensusProvider)
throws IOException, KeeperException, InterruptedException {
super(conf);
super(conf, consensusProvider);
}
@Override

View File

@ -75,6 +75,8 @@ import org.apache.hadoop.hbase.catalog.MetaEditor;
import org.apache.hadoop.hbase.client.ConnectionUtils;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.consensus.ConsensusProvider;
import org.apache.hadoop.hbase.consensus.ConsensusProviderFactory;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.exceptions.RegionMovedException;
import org.apache.hadoop.hbase.exceptions.RegionOpeningException;
@ -390,14 +392,26 @@ public class HRegionServer extends HasThread implements
protected final RSRpcServices rpcServices;
protected ConsensusProvider consensusProvider;
/**
* Starts a HRegionServer at the default location
*
* Starts a HRegionServer at the default location.
* @param conf
* @throws IOException
* @throws InterruptedException
*/
public HRegionServer(Configuration conf)
public HRegionServer(Configuration conf) throws IOException, InterruptedException {
this(conf, ConsensusProviderFactory.getConsensusProvider(conf));
}
/**
* Starts a HRegionServer at the default location
* @param conf
* @param consensusProvider implementation of ConsensusProvider to be used
* @throws IOException
* @throws InterruptedException
*/
public HRegionServer(Configuration conf, ConsensusProvider consensusProvider)
throws IOException, InterruptedException {
this.fsOk = true;
this.conf = conf;
@ -469,6 +483,10 @@ public class HRegionServer extends HasThread implements
zooKeeper = new ZooKeeperWatcher(conf, getProcessName() + ":" +
rpcServices.isa.getPort(), this, canCreateBaseZNode());
this.consensusProvider = consensusProvider;
this.consensusProvider.initialize(this);
this.consensusProvider.start();
tableLockManager = TableLockManager.createTableLockManager(
conf, zooKeeper, serverName);
@ -2128,6 +2146,10 @@ public class HRegionServer extends HasThread implements
return zooKeeper;
}
public ConsensusProvider getConsensusProvider() {
return consensusProvider;
}
@Override
public ServerName getServerName() {
return serverName;
@ -2222,11 +2244,11 @@ public class HRegionServer extends HasThread implements
*/
public static HRegionServer constructRegionServer(
Class<? extends HRegionServer> regionServerClass,
final Configuration conf2) {
final Configuration conf2, ConsensusProvider cp) {
try {
Constructor<? extends HRegionServer> c = regionServerClass
.getConstructor(Configuration.class);
return c.newInstance(conf2);
.getConstructor(Configuration.class, ConsensusProvider.class);
return c.newInstance(conf2, cp);
} catch (Exception e) {
throw new RuntimeException("Failed construction of " + "Regionserver: "
+ regionServerClass.toString(), e);

View File

@ -25,6 +25,8 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.LocalHBaseCluster;
import org.apache.hadoop.hbase.consensus.ConsensusProvider;
import org.apache.hadoop.hbase.consensus.ConsensusProviderFactory;
import org.apache.hadoop.hbase.util.ServerCommandLine;
/**
@ -50,6 +52,7 @@ public class HRegionServerCommandLine extends ServerCommandLine {
private int start() throws Exception {
Configuration conf = getConf();
ConsensusProvider cp = ConsensusProviderFactory.getConsensusProvider(conf);
try {
// If 'local', don't start a region server here. Defer to
// LocalHBaseCluster. It manages 'local' clusters.
@ -58,7 +61,7 @@ public class HRegionServerCommandLine extends ServerCommandLine {
+ HConstants.CLUSTER_DISTRIBUTED + " is false");
} else {
logProcessInfo(getConf());
HRegionServer hrs = HRegionServer.constructRegionServer(regionServerClass, conf);
HRegionServer hrs = HRegionServer.constructRegionServer(regionServerClass, conf, cp);
hrs.start();
hrs.join();
if (hrs.isAborted()) {

View File

@ -29,6 +29,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.consensus.ConsensusProvider;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.util.ReflectionUtils;
@ -73,20 +74,23 @@ public class JVMClusterUtil {
* Creates a {@link RegionServerThread}.
* Call 'start' on the returned thread to make it run.
* @param c Configuration to use.
* @param cp consensus provider to use
* @param hrsc Class to create.
* @param index Used distinguishing the object returned.
* @throws IOException
* @return Region server added.
*/
public static JVMClusterUtil.RegionServerThread createRegionServerThread(
final Configuration c, final Class<? extends HRegionServer> hrsc,
final Configuration c, ConsensusProvider cp, final Class<? extends HRegionServer> hrsc,
final int index)
throws IOException {
HRegionServer server;
try {
Constructor<? extends HRegionServer> ctor = hrsc.getConstructor(Configuration.class);
Constructor<? extends HRegionServer> ctor = hrsc.getConstructor(Configuration.class,
ConsensusProvider.class);
ctor.setAccessible(true);
server = ctor.newInstance(c);
server = ctor.newInstance(c, cp);
} catch (InvocationTargetException ite) {
Throwable target = ite.getTargetException();
throw new RuntimeException("Failed construction of RegionServer: " +
@ -122,18 +126,20 @@ public class JVMClusterUtil {
* Creates a {@link MasterThread}.
* Call 'start' on the returned thread to make it run.
* @param c Configuration to use.
* @param cp consensus provider to use
* @param hmc Class to create.
* @param index Used distinguishing the object returned.
* @throws IOException
* @return Master added.
*/
public static JVMClusterUtil.MasterThread createMasterThread(
final Configuration c, final Class<? extends HMaster> hmc,
final Configuration c, ConsensusProvider cp, final Class<? extends HMaster> hmc,
final int index)
throws IOException {
HMaster server;
try {
server = hmc.getConstructor(Configuration.class).newInstance(c);
server = hmc.getConstructor(Configuration.class, ConsensusProvider.class).
newInstance(c, cp);
} catch (InvocationTargetException ite) {
Throwable target = ite.getTargetException();
throw new RuntimeException("Failed construction of Master: " +

View File

@ -30,6 +30,7 @@ import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.consensus.ConsensusProvider;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
@ -110,9 +111,9 @@ public class MiniHBaseCluster extends HBaseCluster {
private User user = null;
public static boolean TEST_SKIP_CLOSE = false;
public MiniHBaseClusterRegionServer(Configuration conf)
public MiniHBaseClusterRegionServer(Configuration conf, ConsensusProvider cp)
throws IOException, InterruptedException {
super(conf);
super(conf, cp);
this.user = User.getCurrent();
}

View File

@ -22,6 +22,7 @@ import static org.junit.Assert.*;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.consensus.ConsensusProvider;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.zookeeper.KeeperException;
@ -64,9 +65,10 @@ public class TestLocalHBaseCluster {
* running in local mode.
*/
public static class MyHMaster extends HMaster {
public MyHMaster(Configuration conf) throws IOException, KeeperException,
public MyHMaster(Configuration conf, ConsensusProvider cp)
throws IOException, KeeperException,
InterruptedException {
super(conf);
super(conf, cp);
}
public int echo(int val) {
@ -79,9 +81,9 @@ public class TestLocalHBaseCluster {
*/
public static class MyHRegionServer extends MiniHBaseCluster.MiniHBaseClusterRegionServer {
public MyHRegionServer(Configuration conf) throws IOException,
public MyHRegionServer(Configuration conf, ConsensusProvider cp) throws IOException,
InterruptedException {
super(conf);
super(conf, cp);
}
public int echo(int val) {

View File

@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.MiniHBaseCluster.MiniHBaseClusterRegionServer;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.consensus.ConsensusProvider;
import org.apache.hadoop.hbase.ipc.RpcClient;
import org.apache.hadoop.hbase.ipc.RpcServer;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
@ -129,9 +130,9 @@ public class TestClientScannerRPCTimeout {
}
private static class RegionServerWithScanTimeout extends MiniHBaseClusterRegionServer {
public RegionServerWithScanTimeout(Configuration conf)
public RegionServerWithScanTimeout(Configuration conf, ConsensusProvider cp)
throws IOException, InterruptedException {
super(conf);
super(conf, cp);
}
protected RSRpcServices createRpcServices() throws IOException {

View File

@ -50,6 +50,8 @@ import org.apache.hadoop.hbase.catalog.MetaMockingUtil;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.consensus.ConsensusProvider;
import org.apache.hadoop.hbase.consensus.ConsensusProviderFactory;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.executor.ExecutorService;
@ -886,7 +888,9 @@ public class TestAssignmentManager {
Mockito.when(this.serverManager.createDestinationServersList()).thenReturn(destServers);
// To avoid cast exception in DisableTableHandler process.
HTU.getConfiguration().setInt(HConstants.MASTER_PORT, 0);
Server server = new HMaster(HTU.getConfiguration());
ConsensusProvider cp = ConsensusProviderFactory.getConsensusProvider(HTU.getConfiguration());
Server server = new HMaster(HTU.getConfiguration(), cp);
AssignmentManagerWithExtrasForTesting am = setUpMockedAssignmentManager(server,
this.serverManager);
AtomicBoolean gate = new AtomicBoolean(false);
@ -928,7 +932,8 @@ public class TestAssignmentManager {
Mockito.when(this.serverManager.createDestinationServersList()).thenReturn(destServers);
Mockito.when(this.serverManager.isServerOnline(SERVERNAME_A)).thenReturn(true);
HTU.getConfiguration().setInt(HConstants.MASTER_PORT, 0);
Server server = new HMaster(HTU.getConfiguration());
ConsensusProvider cp = ConsensusProviderFactory.getConsensusProvider(HTU.getConfiguration());
Server server = new HMaster(HTU.getConfiguration(), cp);
Whitebox.setInternalState(server, "serverManager", this.serverManager);
AssignmentManagerWithExtrasForTesting am = setUpMockedAssignmentManager(server,
this.serverManager);
@ -965,7 +970,8 @@ public class TestAssignmentManager {
Mockito.when(this.serverManager.createDestinationServersList()).thenReturn(destServers);
Mockito.when(this.serverManager.isServerOnline(SERVERNAME_A)).thenReturn(true);
HTU.getConfiguration().setInt(HConstants.MASTER_PORT, 0);
Server server = new HMaster(HTU.getConfiguration());
ConsensusProvider cp = ConsensusProviderFactory.getConsensusProvider(HTU.getConfiguration());
Server server = new HMaster(HTU.getConfiguration(), cp);
Whitebox.setInternalState(server, "serverManager", this.serverManager);
AssignmentManagerWithExtrasForTesting am = setUpMockedAssignmentManager(server,
this.serverManager);

View File

@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.catalog.MetaEditor;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.consensus.ConsensusProvider;
import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
@ -837,9 +838,10 @@ public class TestAssignmentManagerOnCluster {
public static class MyMaster extends HMaster {
AtomicBoolean enabled = new AtomicBoolean(true);
public MyMaster(Configuration conf) throws IOException, KeeperException,
public MyMaster(Configuration conf, ConsensusProvider cp)
throws IOException, KeeperException,
InterruptedException {
super(conf);
super(conf, cp);
}
@Override

View File

@ -26,6 +26,8 @@ import java.net.SocketTimeoutException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.consensus.ConsensusProvider;
import org.apache.hadoop.hbase.consensus.ConsensusProviderFactory;
import org.apache.hadoop.hbase.ipc.RpcClient;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos;
@ -46,7 +48,8 @@ public class TestHMasterRPCException {
TEST_UTIL.startMiniZKCluster();
Configuration conf = TEST_UTIL.getConfiguration();
conf.set(HConstants.MASTER_PORT, "0");
HMaster hm = new HMaster(conf);
ConsensusProvider cp = ConsensusProviderFactory.getConsensusProvider(conf);
HMaster hm = new HMaster(conf, cp);
ServerName sm = hm.getServerName();
RpcClient rpcClient = new RpcClient(conf, HConstants.CLUSTER_ID_DEFAULT);
try {

View File

@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.consensus.ConsensusProvider;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos;
@ -49,9 +50,9 @@ public class TestMasterMetrics {
private static HBaseTestingUtility TEST_UTIL;
public static class MyMaster extends HMaster {
public MyMaster(Configuration conf) throws IOException,
public MyMaster(Configuration conf, ConsensusProvider cp) throws IOException,
KeeperException, InterruptedException {
super(conf);
super(conf, cp);
}
@Override

View File

@ -46,6 +46,8 @@ import org.apache.hadoop.hbase.catalog.MetaMockingUtil;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.consensus.ConsensusProvider;
import org.apache.hadoop.hbase.consensus.ConsensusProviderFactory;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
@ -123,7 +125,9 @@ public class TestMasterNoCluster {
@Test (timeout=30000)
public void testStopDuringStart()
throws IOException, KeeperException, InterruptedException {
HMaster master = new HMaster(TESTUTIL.getConfiguration());
ConsensusProvider cp = ConsensusProviderFactory.getConsensusProvider(
TESTUTIL.getConfiguration());
HMaster master = new HMaster(TESTUTIL.getConfiguration(), cp);
master.start();
// Immediately have it stop. We used hang in assigning meta.
master.stopMaster();
@ -173,7 +177,9 @@ public class TestMasterNoCluster {
// and get notification on transitions. We need to fake out any rpcs the
// master does opening/closing regions. Also need to fake out the address
// of the 'remote' mocked up regionservers.
HMaster master = new HMaster(conf) {
ConsensusProvider cp = ConsensusProviderFactory.getConsensusProvider(
TESTUTIL.getConfiguration());
HMaster master = new HMaster(conf, cp) {
InetAddress getRemoteInetAddress(final int port, final long serverStartCode)
throws UnknownHostException {
// Return different address dependent on port passed.
@ -254,7 +260,9 @@ public class TestMasterNoCluster {
final ServerName deadServer = ServerName.valueOf("test.sample", 1, 100);
final MockRegionServer rs0 = new MockRegionServer(conf, newServer);
HMaster master = new HMaster(conf) {
ConsensusProvider cp = ConsensusProviderFactory.getConsensusProvider(
TESTUTIL.getConfiguration());
HMaster master = new HMaster(conf, cp) {
@Override
void assignMeta(MonitoredTask status, Set<ServerName> previouslyFailedMeatRSs) {
}

View File

@ -25,6 +25,7 @@ import java.util.List;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.consensus.ConsensusProvider;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.RequestConverter;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest;
@ -41,8 +42,9 @@ import com.google.protobuf.ServiceException;
public class OOMERegionServer extends HRegionServer {
private List<Put> retainer = new ArrayList<Put>();
public OOMERegionServer(HBaseConfiguration conf) throws IOException, InterruptedException {
super(conf);
public OOMERegionServer(HBaseConfiguration conf, ConsensusProvider cp)
throws IOException, InterruptedException {
super(conf, cp);
}
public void put(byte [] regionName, Put put)

View File

@ -30,6 +30,8 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.consensus.ConsensusProvider;
import org.apache.hadoop.hbase.consensus.ConsensusProviderFactory;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.JVMClusterUtil;
@ -70,9 +72,10 @@ public class TestClusterId {
TEST_UTIL.startMiniDFSCluster(1);
Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
ConsensusProvider cp = ConsensusProviderFactory.getConsensusProvider(conf);
//start region server, needs to be separate
//so we get an unset clusterId
rst = JVMClusterUtil.createRegionServerThread(conf,
rst = JVMClusterUtil.createRegionServerThread(conf,cp,
HRegionServer.class, 0);
rst.start();
//Make sure RS is in blocking state

View File

@ -29,6 +29,8 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.consensus.ConsensusProvider;
import org.apache.hadoop.hbase.consensus.ConsensusProviderFactory;
import org.apache.hadoop.hbase.ipc.PriorityFunction;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest;
@ -56,7 +58,8 @@ public class TestPriorityRpc {
public void setup() {
Configuration conf = HBaseConfiguration.create();
conf.setBoolean("hbase.testing.nocluster", true); // No need to do ZK
regionServer = HRegionServer.constructRegionServer(HRegionServer.class, conf);
ConsensusProvider cp = ConsensusProviderFactory.getConsensusProvider(conf);
regionServer = HRegionServer.constructRegionServer(HRegionServer.class, conf, cp);
priority = regionServer.rpcServices.getPriority();
}

View File

@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.LargeTests;
import org.apache.hadoop.hbase.LocalHBaseCluster;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.consensus.ConsensusProvider;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse;
@ -103,8 +104,9 @@ public class TestRSKilledWhenInitializing {
public static class MockedRegionServer extends MiniHBaseCluster.MiniHBaseClusterRegionServer {
public MockedRegionServer(Configuration conf) throws IOException, InterruptedException {
super(conf);
public MockedRegionServer(Configuration conf, ConsensusProvider cp)
throws IOException, InterruptedException {
super(conf, cp);
}
@Override

View File

@ -42,6 +42,8 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.consensus.ConsensusProvider;
import org.apache.hadoop.hbase.consensus.ConsensusProviderFactory;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
import org.apache.hadoop.hbase.util.Bytes;
@ -218,7 +220,9 @@ public class TestRegionMergeTransaction {
// Run the execute. Look at what it returns.
TEST_UTIL.getConfiguration().setInt(HConstants.REGIONSERVER_PORT, 0);
Server mockServer = new HRegionServer(TEST_UTIL.getConfiguration());
ConsensusProvider cp = ConsensusProviderFactory.getConsensusProvider(
TEST_UTIL.getConfiguration());
Server mockServer = new HRegionServer(TEST_UTIL.getConfiguration(), cp);
HRegion mergedRegion = mt.execute(mockServer, null);
// Do some assertions about execution.
assertTrue(this.fs.exists(mt.getMergesDir()));
@ -265,7 +269,9 @@ public class TestRegionMergeTransaction {
// Run the execute. Look at what it returns.
boolean expectedException = false;
TEST_UTIL.getConfiguration().setInt(HConstants.REGIONSERVER_PORT, 0);
Server mockServer = new HRegionServer(TEST_UTIL.getConfiguration());
ConsensusProvider cp = ConsensusProviderFactory.getConsensusProvider(
TEST_UTIL.getConfiguration());
Server mockServer = new HRegionServer(TEST_UTIL.getConfiguration(), cp);
try {
mt.execute(mockServer, null);
} catch (MockedFailedMergedRegionCreation e) {
@ -324,7 +330,9 @@ public class TestRegionMergeTransaction {
// Run the execute. Look at what it returns.
boolean expectedException = false;
TEST_UTIL.getConfiguration().setInt(HConstants.REGIONSERVER_PORT, 0);
Server mockServer = new HRegionServer(TEST_UTIL.getConfiguration());
ConsensusProvider cp = ConsensusProviderFactory.getConsensusProvider(
TEST_UTIL.getConfiguration());
Server mockServer = new HRegionServer(TEST_UTIL.getConfiguration(), cp);
try {
mt.execute(mockServer, null);
} catch (MockedFailedMergedRegionOpen e) {

View File

@ -65,6 +65,7 @@ import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.consensus.ConsensusProvider;
import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
@ -1297,9 +1298,10 @@ public class TestSplitTransactionOnCluster {
public static class MockMasterWithoutCatalogJanitor extends HMaster {
public MockMasterWithoutCatalogJanitor(Configuration conf) throws IOException, KeeperException,
public MockMasterWithoutCatalogJanitor(Configuration conf, ConsensusProvider cp)
throws IOException, KeeperException,
InterruptedException {
super(conf);
super(conf, cp);
}
@Override