HBASE-23081 Add an option to enable/disable rs group feature (#691)
Signed-off-by: Peter Somogyi <psomogyi@apache.org>
This commit is contained in:
parent
b10b39ad03
commit
0f4a87c24d
|
@ -186,6 +186,7 @@ import org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner;
|
||||||
import org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner;
|
import org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner;
|
||||||
import org.apache.hadoop.hbase.replication.master.ReplicationPeerConfigUpgrader;
|
import org.apache.hadoop.hbase.replication.master.ReplicationPeerConfigUpgrader;
|
||||||
import org.apache.hadoop.hbase.replication.regionserver.ReplicationStatus;
|
import org.apache.hadoop.hbase.replication.regionserver.ReplicationStatus;
|
||||||
|
import org.apache.hadoop.hbase.rsgroup.RSGroupAdminEndpoint;
|
||||||
import org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer;
|
import org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer;
|
||||||
import org.apache.hadoop.hbase.rsgroup.RSGroupInfoManager;
|
import org.apache.hadoop.hbase.rsgroup.RSGroupInfoManager;
|
||||||
import org.apache.hadoop.hbase.security.AccessDeniedException;
|
import org.apache.hadoop.hbase.security.AccessDeniedException;
|
||||||
|
@ -800,6 +801,17 @@ public class HMaster extends HRegionServer implements MasterServices {
|
||||||
this.splitOrMergeTracker = new SplitOrMergeTracker(zooKeeper, conf, this);
|
this.splitOrMergeTracker = new SplitOrMergeTracker(zooKeeper, conf, this);
|
||||||
this.splitOrMergeTracker.start();
|
this.splitOrMergeTracker.start();
|
||||||
|
|
||||||
|
// This is for backwards compatible. We do not need the CP for rs group now but if user want to
|
||||||
|
// load it, we need to enable rs group.
|
||||||
|
String[] cpClasses = conf.getStrings(MasterCoprocessorHost.MASTER_COPROCESSOR_CONF_KEY);
|
||||||
|
if (cpClasses != null) {
|
||||||
|
for (String cpClass : cpClasses) {
|
||||||
|
if (RSGroupAdminEndpoint.class.getName().equals(cpClass)) {
|
||||||
|
conf.setBoolean(RSGroupInfoManager.RS_GROUP_ENABLED, true);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
this.rsGroupInfoManager = RSGroupInfoManager.create(this);
|
this.rsGroupInfoManager = RSGroupInfoManager.create(this);
|
||||||
|
|
||||||
this.replicationPeerManager = ReplicationPeerManager.create(zooKeeper, conf);
|
this.replicationPeerManager = ReplicationPeerManager.create(zooKeeper, conf);
|
||||||
|
|
|
@ -0,0 +1,109 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase.rsgroup;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Set;
|
||||||
|
import java.util.SortedSet;
|
||||||
|
import java.util.TreeSet;
|
||||||
|
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
||||||
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
import org.apache.hadoop.hbase.master.ServerManager;
|
||||||
|
import org.apache.hadoop.hbase.net.Address;
|
||||||
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A dummy RSGroupInfoManager which only contains a default rs group.
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
class DisabledRSGroupInfoManager implements RSGroupInfoManager {
|
||||||
|
|
||||||
|
private final ServerManager serverManager;
|
||||||
|
|
||||||
|
public DisabledRSGroupInfoManager(ServerManager serverManager) {
|
||||||
|
this.serverManager = serverManager;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void start() {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void addRSGroup(RSGroupInfo rsGroupInfo) throws IOException {
|
||||||
|
throw new DoNotRetryIOException("RSGroup is disabled");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void removeRSGroup(String groupName) throws IOException {
|
||||||
|
throw new DoNotRetryIOException("RSGroup is disabled");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Set<Address> moveServers(Set<Address> servers, String srcGroup, String dstGroup)
|
||||||
|
throws IOException {
|
||||||
|
throw new DoNotRetryIOException("RSGroup is disabled");
|
||||||
|
}
|
||||||
|
|
||||||
|
private SortedSet<Address> getOnlineServers() {
|
||||||
|
SortedSet<Address> onlineServers = new TreeSet<Address>();
|
||||||
|
serverManager.getOnlineServers().keySet().stream().map(ServerName::getAddress)
|
||||||
|
.forEach(onlineServers::add);
|
||||||
|
return onlineServers;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public RSGroupInfo getRSGroupOfServer(Address serverHostPort) throws IOException {
|
||||||
|
SortedSet<Address> onlineServers = getOnlineServers();
|
||||||
|
if (onlineServers.contains(serverHostPort)) {
|
||||||
|
return new RSGroupInfo(RSGroupInfo.DEFAULT_GROUP, onlineServers);
|
||||||
|
} else {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public RSGroupInfo getRSGroup(String groupName) throws IOException {
|
||||||
|
if (RSGroupInfo.DEFAULT_GROUP.equals(groupName)) {
|
||||||
|
return new RSGroupInfo(RSGroupInfo.DEFAULT_GROUP, getOnlineServers());
|
||||||
|
} else {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<RSGroupInfo> listRSGroups() throws IOException {
|
||||||
|
return Arrays.asList(new RSGroupInfo(RSGroupInfo.DEFAULT_GROUP, getOnlineServers()));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isOnline() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void removeServers(Set<Address> servers) throws IOException {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public RSGroupInfo getRSGroupForTable(TableName tableName) throws IOException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
|
@ -32,6 +32,8 @@ import org.apache.yetus.audience.InterfaceAudience;
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public interface RSGroupInfoManager {
|
public interface RSGroupInfoManager {
|
||||||
|
|
||||||
|
static final String RS_GROUP_ENABLED = "hbase.balancer.rsgroup.enabled";
|
||||||
|
|
||||||
void start();
|
void start();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -90,6 +92,10 @@ public interface RSGroupInfoManager {
|
||||||
RSGroupInfo getRSGroupForTable(TableName tableName) throws IOException;
|
RSGroupInfo getRSGroupForTable(TableName tableName) throws IOException;
|
||||||
|
|
||||||
static RSGroupInfoManager create(MasterServices master) throws IOException {
|
static RSGroupInfoManager create(MasterServices master) throws IOException {
|
||||||
return RSGroupInfoManagerImpl.getInstance(master);
|
if (master.getConfiguration().getBoolean(RS_GROUP_ENABLED, false)) {
|
||||||
|
return RSGroupInfoManagerImpl.getInstance(master);
|
||||||
|
} else {
|
||||||
|
return new DisabledRSGroupInfoManager(master.getServerManager());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -117,8 +117,7 @@ public class TestNamespace {
|
||||||
assertEquals(2, admin.listNamespaceDescriptors().length);
|
assertEquals(2, admin.listNamespaceDescriptors().length);
|
||||||
|
|
||||||
//verify existence of system tables
|
//verify existence of system tables
|
||||||
Set<TableName> systemTables = Sets.newHashSet(
|
Set<TableName> systemTables = Sets.newHashSet(TableName.META_TABLE_NAME);
|
||||||
TableName.META_TABLE_NAME, TableName.valueOf("hbase:rsgroup"));
|
|
||||||
List<TableDescriptor> descs = admin.listTableDescriptorsByNamespace(
|
List<TableDescriptor> descs = admin.listTableDescriptorsByNamespace(
|
||||||
Bytes.toBytes(NamespaceDescriptor.SYSTEM_NAMESPACE.getName()));
|
Bytes.toBytes(NamespaceDescriptor.SYSTEM_NAMESPACE.getName()));
|
||||||
assertEquals(systemTables.size(), descs.size());
|
assertEquals(systemTables.size(), descs.size());
|
||||||
|
|
|
@ -504,14 +504,14 @@ public abstract class AbstractTestDLS {
|
||||||
for (String oregion : regions)
|
for (String oregion : regions)
|
||||||
LOG.debug("Region still online: " + oregion);
|
LOG.debug("Region still online: " + oregion);
|
||||||
}
|
}
|
||||||
assertEquals(2 + existingRegions, regions.size());
|
assertEquals(1 + existingRegions, regions.size());
|
||||||
LOG.debug("Enabling table\n");
|
LOG.debug("Enabling table\n");
|
||||||
TEST_UTIL.getAdmin().enableTable(tableName);
|
TEST_UTIL.getAdmin().enableTable(tableName);
|
||||||
LOG.debug("Waiting for no more RIT\n");
|
LOG.debug("Waiting for no more RIT\n");
|
||||||
blockUntilNoRIT();
|
blockUntilNoRIT();
|
||||||
LOG.debug("Verifying there are " + numRegions + " assigned on cluster\n");
|
LOG.debug("Verifying there are " + numRegions + " assigned on cluster\n");
|
||||||
regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
|
regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
|
||||||
assertEquals(numRegions + 2 + existingRegions, regions.size());
|
assertEquals(numRegions + 1 + existingRegions, regions.size());
|
||||||
return table;
|
return table;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -43,7 +43,7 @@ public class TestClusterRestart extends AbstractTestRestartCluster {
|
||||||
|
|
||||||
private static final Logger LOG = LoggerFactory.getLogger(TestClusterRestart.class);
|
private static final Logger LOG = LoggerFactory.getLogger(TestClusterRestart.class);
|
||||||
|
|
||||||
private static final int NUM_REGIONS = 4;
|
private static final int NUM_REGIONS = 3;
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected boolean splitWALCoordinatedByZk() {
|
protected boolean splitWALCoordinatedByZk() {
|
||||||
|
|
|
@ -25,7 +25,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.StartMiniClusterOption;
|
import org.apache.hadoop.hbase.StartMiniClusterOption;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
|
||||||
import org.apache.hadoop.hbase.test.MetricsAssertHelper;
|
import org.apache.hadoop.hbase.test.MetricsAssertHelper;
|
||||||
import org.apache.hadoop.hbase.testclassification.MasterTests;
|
import org.apache.hadoop.hbase.testclassification.MasterTests;
|
||||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||||
|
@ -91,7 +90,6 @@ public class TestMasterMetrics {
|
||||||
cluster = TEST_UTIL.getHBaseCluster();
|
cluster = TEST_UTIL.getHBaseCluster();
|
||||||
LOG.info("Waiting for active/ready master");
|
LOG.info("Waiting for active/ready master");
|
||||||
cluster.waitForActiveAndReadyMaster();
|
cluster.waitForActiveAndReadyMaster();
|
||||||
TEST_UTIL.waitTableAvailable(TableName.valueOf("hbase:rsgroup"));
|
|
||||||
master = cluster.getMaster();
|
master = cluster.getMaster();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -133,7 +131,7 @@ public class TestMasterMetrics {
|
||||||
MetricsMasterSource masterSource = master.getMasterMetrics().getMetricsSource();
|
MetricsMasterSource masterSource = master.getMasterMetrics().getMetricsSource();
|
||||||
boolean tablesOnMaster = LoadBalancer.isTablesOnMaster(TEST_UTIL.getConfiguration());
|
boolean tablesOnMaster = LoadBalancer.isTablesOnMaster(TEST_UTIL.getConfiguration());
|
||||||
metricsHelper.assertGauge("numRegionServers", 1 + (tablesOnMaster ? 1 : 0), masterSource);
|
metricsHelper.assertGauge("numRegionServers", 1 + (tablesOnMaster ? 1 : 0), masterSource);
|
||||||
metricsHelper.assertGauge("averageLoad", 2, masterSource);
|
metricsHelper.assertGauge("averageLoad", 1, masterSource);
|
||||||
metricsHelper.assertGauge("numDeadRegionServers", 0, masterSource);
|
metricsHelper.assertGauge("numDeadRegionServers", 0, masterSource);
|
||||||
|
|
||||||
metricsHelper.assertGauge("masterStartTime", master.getMasterStartTime(), masterSource);
|
metricsHelper.assertGauge("masterStartTime", master.getMasterStartTime(), masterSource);
|
||||||
|
|
|
@ -92,7 +92,7 @@ public class TestMasterRestartAfterDisablingTable {
|
||||||
|
|
||||||
NavigableSet<String> regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
|
NavigableSet<String> regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
|
||||||
assertEquals("The number of regions for the table tableRestart should be 0 and only" +
|
assertEquals("The number of regions for the table tableRestart should be 0 and only" +
|
||||||
"the catalog table should be present.", 2, regions.size());
|
"the catalog table should be present.", 1, regions.size());
|
||||||
|
|
||||||
List<MasterThread> masterThreads = cluster.getMasterThreads();
|
List<MasterThread> masterThreads = cluster.getMasterThreads();
|
||||||
MasterThread activeMaster = null;
|
MasterThread activeMaster = null;
|
||||||
|
@ -120,7 +120,7 @@ public class TestMasterRestartAfterDisablingTable {
|
||||||
log("Verifying there are " + numRegions + " assigned on cluster\n");
|
log("Verifying there are " + numRegions + " assigned on cluster\n");
|
||||||
regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
|
regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
|
||||||
assertEquals("The assigned regions were not onlined after master" +
|
assertEquals("The assigned regions were not onlined after master" +
|
||||||
" switch except for the catalog table.", 6, regions.size());
|
" switch except for the catalog table.", 5, regions.size());
|
||||||
assertTrue("The table should be in enabled state", cluster.getMaster().getTableStateManager()
|
assertTrue("The table should be in enabled state", cluster.getMaster().getTableStateManager()
|
||||||
.isTableState(TableName.valueOf(name.getMethodName()), TableState.State.ENABLED));
|
.isTableState(TableName.valueOf(name.getMethodName()), TableState.State.ENABLED));
|
||||||
ht.close();
|
ht.close();
|
||||||
|
|
|
@ -121,7 +121,7 @@ public class TestRollingRestart {
|
||||||
log("Region still online: " + oregion);
|
log("Region still online: " + oregion);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
assertEquals(2, regions.size());
|
assertEquals(1, regions.size());
|
||||||
log("Enabling table\n");
|
log("Enabling table\n");
|
||||||
TEST_UTIL.getAdmin().enableTable(tableName);
|
TEST_UTIL.getAdmin().enableTable(tableName);
|
||||||
log("Waiting for no more RIT\n");
|
log("Waiting for no more RIT\n");
|
||||||
|
|
|
@ -86,7 +86,6 @@ public class TestRegionOpen {
|
||||||
final TableName tableName = TableName.valueOf(TestRegionOpen.class.getSimpleName());
|
final TableName tableName = TableName.valueOf(TestRegionOpen.class.getSimpleName());
|
||||||
ThreadPoolExecutor exec = getRS().getExecutorService()
|
ThreadPoolExecutor exec = getRS().getExecutorService()
|
||||||
.getExecutorThreadPool(ExecutorType.RS_OPEN_PRIORITY_REGION);
|
.getExecutorThreadPool(ExecutorType.RS_OPEN_PRIORITY_REGION);
|
||||||
HTU.waitTableAvailable(TableName.valueOf("hbase:rsgroup"));
|
|
||||||
long completed = exec.getCompletedTaskCount();
|
long completed = exec.getCompletedTaskCount();
|
||||||
|
|
||||||
HTableDescriptor htd = new HTableDescriptor(tableName);
|
HTableDescriptor htd = new HTableDescriptor(tableName);
|
||||||
|
|
|
@ -156,7 +156,7 @@ public class TestRegionReplicasWithRestartScenarios {
|
||||||
assertFalse(res);
|
assertFalse(res);
|
||||||
int totalRegions = HTU.getMiniHBaseCluster().getLiveRegionServerThreads().stream().
|
int totalRegions = HTU.getMiniHBaseCluster().getLiveRegionServerThreads().stream().
|
||||||
mapToInt(l -> l.getRegionServer().getOnlineRegions().size()).sum();
|
mapToInt(l -> l.getRegionServer().getOnlineRegions().size()).sum();
|
||||||
assertEquals(62, totalRegions);
|
assertEquals(61, totalRegions);
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean checkDuplicates(Collection<HRegion> onlineRegions3) throws Exception {
|
private boolean checkDuplicates(Collection<HRegion> onlineRegions3) throws Exception {
|
||||||
|
|
|
@ -231,7 +231,7 @@ public class TestRegionServerMetrics {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testRegionCount() throws Exception {
|
public void testRegionCount() throws Exception {
|
||||||
metricsHelper.assertGauge("regionCount", TABLES_ON_MASTER ? 1 : 3, serverSource);
|
metricsHelper.assertGauge("regionCount", TABLES_ON_MASTER ? 1 : 2, serverSource);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -341,7 +341,7 @@ public class TestRegionServerMetrics {
|
||||||
TEST_UTIL.getAdmin().flush(tableName);
|
TEST_UTIL.getAdmin().flush(tableName);
|
||||||
|
|
||||||
metricsRegionServer.getRegionServerWrapper().forceRecompute();
|
metricsRegionServer.getRegionServerWrapper().forceRecompute();
|
||||||
assertGauge("storeCount", TABLES_ON_MASTER ? 1 : 6);
|
assertGauge("storeCount", TABLES_ON_MASTER ? 1 : 5);
|
||||||
assertGauge("storeFileCount", 1);
|
assertGauge("storeFileCount", 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -102,7 +102,7 @@ public class TestTablePermissions {
|
||||||
|
|
||||||
// Wait for the ACL table to become available
|
// Wait for the ACL table to become available
|
||||||
UTIL.waitTableEnabled(PermissionStorage.ACL_TABLE_NAME);
|
UTIL.waitTableEnabled(PermissionStorage.ACL_TABLE_NAME);
|
||||||
UTIL.waitTableAvailable(TableName.valueOf("hbase:rsgroup"));
|
UTIL.waitTableAvailable(TableName.valueOf("hbase:acl"));
|
||||||
|
|
||||||
ZKW = new ZKWatcher(UTIL.getConfiguration(),
|
ZKW = new ZKWatcher(UTIL.getConfiguration(),
|
||||||
"TestTablePermissions", ABORTABLE);
|
"TestTablePermissions", ABORTABLE);
|
||||||
|
@ -223,7 +223,7 @@ public class TestTablePermissions {
|
||||||
// check full load
|
// check full load
|
||||||
Map<byte[], ListMultimap<String, UserPermission>> allPerms = PermissionStorage.loadAll(conf);
|
Map<byte[], ListMultimap<String, UserPermission>> allPerms = PermissionStorage.loadAll(conf);
|
||||||
assertEquals("Full permission map should have entries for both test tables",
|
assertEquals("Full permission map should have entries for both test tables",
|
||||||
3, allPerms.size());
|
2, allPerms.size());
|
||||||
|
|
||||||
userPerms = allPerms.get(TEST_TABLE.getName()).get("hubert");
|
userPerms = allPerms.get(TEST_TABLE.getName()).get("hubert");
|
||||||
assertNotNull(userPerms);
|
assertNotNull(userPerms);
|
||||||
|
|
|
@ -24,7 +24,6 @@ import java.util.stream.Stream;
|
||||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
|
||||||
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
|
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
|
||||||
import org.apache.hadoop.hbase.replication.ReplicationPeerStorage;
|
import org.apache.hadoop.hbase.replication.ReplicationPeerStorage;
|
||||||
import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
|
import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
|
||||||
|
@ -53,7 +52,6 @@ public class TestHBaseFsckReplication {
|
||||||
public static void setUp() throws Exception {
|
public static void setUp() throws Exception {
|
||||||
UTIL.getConfiguration().setBoolean("hbase.write.hbck1.lock.file", false);
|
UTIL.getConfiguration().setBoolean("hbase.write.hbck1.lock.file", false);
|
||||||
UTIL.startMiniCluster(1);
|
UTIL.startMiniCluster(1);
|
||||||
UTIL.waitTableAvailable(TableName.valueOf("hbase:rsgroup"));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@AfterClass
|
@AfterClass
|
||||||
|
|
Loading…
Reference in New Issue