HBASE-21736 Remove the server from online servers before scheduling SCP for it in hbck
Signed-off-by: Peter Somogyi <psomogyi@apache.org>
This commit is contained in:
parent
d6623cf8f0
commit
839dad2c9e
|
@ -2443,6 +2443,7 @@ public class MasterRpcServices extends RSRpcServices
|
||||||
for (HBaseProtos.ServerName serverName : serverNames) {
|
for (HBaseProtos.ServerName serverName : serverNames) {
|
||||||
ServerName server = ProtobufUtil.toServerName(serverName);
|
ServerName server = ProtobufUtil.toServerName(serverName);
|
||||||
if (shouldSubmitSCP(server)) {
|
if (shouldSubmitSCP(server)) {
|
||||||
|
master.getServerManager().moveFromOnlineToDeadServers(server);
|
||||||
ProcedureExecutor<MasterProcedureEnv> procExec = this.master.getMasterProcedureExecutor();
|
ProcedureExecutor<MasterProcedureEnv> procExec = this.master.getMasterProcedureExecutor();
|
||||||
pids.add(procExec.submitProcedure(new ServerCrashProcedure(procExec.getEnvironment(),
|
pids.add(procExec.submitProcedure(new ServerCrashProcedure(procExec.getEnvironment(),
|
||||||
server, true, containMetaWals(server))));
|
server, true, containMetaWals(server))));
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/*
|
/**
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -15,12 +15,12 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.client;
|
package org.apache.hadoop.hbase.client;
|
||||||
|
|
||||||
import static junit.framework.TestCase.assertTrue;
|
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
|
@ -39,8 +39,8 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
||||||
import org.apache.hadoop.hbase.testclassification.ClientTests;
|
import org.apache.hadoop.hbase.testclassification.ClientTests;
|
||||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.Threads;
|
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
|
import org.junit.Before;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
import org.junit.ClassRule;
|
import org.junit.ClassRule;
|
||||||
import org.junit.Rule;
|
import org.junit.Rule;
|
||||||
|
@ -59,16 +59,14 @@ import org.apache.hbase.thirdparty.com.google.common.io.Closeables;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Class to test HBaseHbck.
|
* Class to test HBaseHbck. Spins up the minicluster once at test start and then takes it down
|
||||||
* Spins up the minicluster once at test start and then takes it down afterward.
|
* afterward. Add any testing of HBaseHbck functionality here.
|
||||||
* Add any testing of HBaseHbck functionality here.
|
|
||||||
*/
|
*/
|
||||||
@RunWith(Parameterized.class)
|
@RunWith(Parameterized.class)
|
||||||
@Category({LargeTests.class, ClientTests.class})
|
@Category({ LargeTests.class, ClientTests.class })
|
||||||
public class TestHbck {
|
public class TestHbck {
|
||||||
@ClassRule
|
@ClassRule
|
||||||
public static final HBaseClassTestRule CLASS_RULE =
|
public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestHbck.class);
|
||||||
HBaseClassTestRule.forClass(TestHbck.class);
|
|
||||||
|
|
||||||
private static final Logger LOG = LoggerFactory.getLogger(TestHbck.class);
|
private static final Logger LOG = LoggerFactory.getLogger(TestHbck.class);
|
||||||
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
||||||
|
@ -112,15 +110,20 @@ public class TestHbck {
|
||||||
TEST_UTIL.shutdownMiniCluster();
|
TEST_UTIL.shutdownMiniCluster();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void setUp() throws IOException {
|
||||||
|
TEST_UTIL.ensureSomeRegionServersAvailable(3);
|
||||||
|
}
|
||||||
|
|
||||||
public static class SuspendProcedure extends
|
public static class SuspendProcedure extends
|
||||||
ProcedureTestingUtility.NoopProcedure<MasterProcedureEnv> implements TableProcedureInterface {
|
ProcedureTestingUtility.NoopProcedure<MasterProcedureEnv> implements TableProcedureInterface {
|
||||||
public SuspendProcedure() {
|
public SuspendProcedure() {
|
||||||
super();
|
super();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@SuppressWarnings({ "rawtypes", "unchecked" })
|
||||||
@Override
|
@Override
|
||||||
protected Procedure[] execute(final MasterProcedureEnv env)
|
protected Procedure[] execute(final MasterProcedureEnv env) throws ProcedureSuspendedException {
|
||||||
throws ProcedureSuspendedException {
|
|
||||||
// Always suspend the procedure
|
// Always suspend the procedure
|
||||||
throw new ProcedureSuspendedException();
|
throw new ProcedureSuspendedException();
|
||||||
}
|
}
|
||||||
|
@ -143,8 +146,8 @@ public class TestHbck {
|
||||||
long procId = procExec.submitProcedure(proc);
|
long procId = procExec.submitProcedure(proc);
|
||||||
Thread.sleep(500);
|
Thread.sleep(500);
|
||||||
|
|
||||||
//bypass the procedure
|
// bypass the procedure
|
||||||
List<Long> pids = Arrays.<Long>asList(procId);
|
List<Long> pids = Arrays.<Long> asList(procId);
|
||||||
List<Boolean> results = getHbck().bypassProcedure(pids, 30000, false, false);
|
List<Boolean> results = getHbck().bypassProcedure(pids, 30000, false, false);
|
||||||
assertTrue("Failed to by pass procedure!", results.get(0));
|
assertTrue("Failed to by pass procedure!", results.get(0));
|
||||||
TEST_UTIL.waitFor(5000, () -> proc.isSuccess() && proc.isBypass());
|
TEST_UTIL.waitFor(5000, () -> proc.isSuccess() && proc.isBypass());
|
||||||
|
@ -169,33 +172,33 @@ public class TestHbck {
|
||||||
Hbck hbck = getHbck();
|
Hbck hbck = getHbck();
|
||||||
try (Admin admin = TEST_UTIL.getConnection().getAdmin()) {
|
try (Admin admin = TEST_UTIL.getConnection().getAdmin()) {
|
||||||
List<RegionInfo> regions = admin.getRegions(TABLE_NAME);
|
List<RegionInfo> regions = admin.getRegions(TABLE_NAME);
|
||||||
for (RegionInfo ri: regions) {
|
for (RegionInfo ri : regions) {
|
||||||
RegionState rs = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().
|
RegionState rs = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager()
|
||||||
getRegionStates().getRegionState(ri.getEncodedName());
|
.getRegionStates().getRegionState(ri.getEncodedName());
|
||||||
LOG.info("RS: {}", rs.toString());
|
LOG.info("RS: {}", rs.toString());
|
||||||
}
|
}
|
||||||
List<Long> pids = hbck.unassigns(regions.stream().map(r -> r.getEncodedName()).
|
List<Long> pids =
|
||||||
collect(Collectors.toList()));
|
hbck.unassigns(regions.stream().map(r -> r.getEncodedName()).collect(Collectors.toList()));
|
||||||
waitOnPids(pids);
|
waitOnPids(pids);
|
||||||
for (RegionInfo ri: regions) {
|
for (RegionInfo ri : regions) {
|
||||||
RegionState rs = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().
|
RegionState rs = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager()
|
||||||
getRegionStates().getRegionState(ri.getEncodedName());
|
.getRegionStates().getRegionState(ri.getEncodedName());
|
||||||
LOG.info("RS: {}", rs.toString());
|
LOG.info("RS: {}", rs.toString());
|
||||||
assertTrue(rs.toString(), rs.isClosed());
|
assertTrue(rs.toString(), rs.isClosed());
|
||||||
}
|
}
|
||||||
pids = hbck.assigns(regions.stream().map(r -> r.getEncodedName()).
|
pids =
|
||||||
collect(Collectors.toList()));
|
hbck.assigns(regions.stream().map(r -> r.getEncodedName()).collect(Collectors.toList()));
|
||||||
waitOnPids(pids);
|
waitOnPids(pids);
|
||||||
for (RegionInfo ri: regions) {
|
for (RegionInfo ri : regions) {
|
||||||
RegionState rs = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().
|
RegionState rs = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager()
|
||||||
getRegionStates().getRegionState(ri.getEncodedName());
|
.getRegionStates().getRegionState(ri.getEncodedName());
|
||||||
LOG.info("RS: {}", rs.toString());
|
LOG.info("RS: {}", rs.toString());
|
||||||
assertTrue(rs.toString(), rs.isOpened());
|
assertTrue(rs.toString(), rs.isOpened());
|
||||||
}
|
}
|
||||||
// What happens if crappy region list passed?
|
// What happens if crappy region list passed?
|
||||||
pids = hbck.assigns(Arrays.stream(new String [] {"a", "some rubbish name"}).
|
pids = hbck.assigns(
|
||||||
collect(Collectors.toList()));
|
Arrays.stream(new String[] { "a", "some rubbish name" }).collect(Collectors.toList()));
|
||||||
for (long pid: pids) {
|
for (long pid : pids) {
|
||||||
assertEquals(org.apache.hadoop.hbase.procedure2.Procedure.NO_PROC_ID, pid);
|
assertEquals(org.apache.hadoop.hbase.procedure2.Procedure.NO_PROC_ID, pid);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -213,17 +216,14 @@ public class TestHbck {
|
||||||
assertTrue(pids.get(0) > 0);
|
assertTrue(pids.get(0) > 0);
|
||||||
LOG.info("pid is {}", pids.get(0));
|
LOG.info("pid is {}", pids.get(0));
|
||||||
|
|
||||||
pids = hbck.scheduleServerCrashProcedure(Arrays.asList(ProtobufUtil.toServerName(serverName)));
|
List<Long> newPids =
|
||||||
assertTrue(pids.get(0) == -1);
|
hbck.scheduleServerCrashProcedure(Arrays.asList(ProtobufUtil.toServerName(serverName)));
|
||||||
LOG.info("pid is {}", pids.get(0));
|
assertTrue(newPids.get(0) < 0);
|
||||||
|
LOG.info("pid is {}", newPids.get(0));
|
||||||
|
waitOnPids(pids);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void waitOnPids(List<Long> pids) {
|
private void waitOnPids(List<Long> pids) {
|
||||||
for (Long pid: pids) {
|
TEST_UTIL.waitFor(60000, () -> pids.stream().allMatch(procExec::isFinished));
|
||||||
while (!TEST_UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor().
|
|
||||||
isFinished(pid)) {
|
|
||||||
Threads.sleep(100);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue