HBASE-8169 TestMasterFailover#testMasterFailoverWithMockedRITOnDeadRS may fail due to regions randomly assigned to a RS (Jeffrey Zhong)
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1464790 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
5ca140fe2f
commit
4855d12aa9
|
@ -2237,6 +2237,25 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
|
||||||
scanner.close();
|
scanner.close();
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create region split keys between startkey and endKey
|
||||||
|
*
|
||||||
|
* @param startKey
|
||||||
|
* @param endKey
|
||||||
|
* @param numRegions the number of regions to be created. it has to be greater than 3.
|
||||||
|
* @return
|
||||||
|
*/
|
||||||
|
public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions){
|
||||||
|
assertTrue(numRegions>3);
|
||||||
|
byte [][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3);
|
||||||
|
byte [][] result = new byte[tmpSplitKeys.length+1][];
|
||||||
|
for (int i=0;i<tmpSplitKeys.length;i++) {
|
||||||
|
result[i+1] = tmpSplitKeys[i];
|
||||||
|
}
|
||||||
|
result[0] = HConstants.EMPTY_BYTE_ARRAY;
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Do a small get/scan against one store. This is required because store
|
* Do a small get/scan against one store. This is required because store
|
||||||
|
|
|
@ -25,6 +25,8 @@ import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
import java.util.HashSet;
|
||||||
|
import java.util.Iterator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.TreeSet;
|
import java.util.TreeSet;
|
||||||
|
@ -485,14 +487,10 @@ public class TestMasterFailover {
|
||||||
// disable load balancing on this master
|
// disable load balancing on this master
|
||||||
master.balanceSwitch(false);
|
master.balanceSwitch(false);
|
||||||
|
|
||||||
// create two tables in META, each with 10 regions
|
// create two tables in META, each with 30 regions
|
||||||
byte [] FAMILY = Bytes.toBytes("family");
|
byte [] FAMILY = Bytes.toBytes("family");
|
||||||
byte [][] SPLIT_KEYS = new byte [][] {
|
byte[][] SPLIT_KEYS =
|
||||||
new byte[0], Bytes.toBytes("aaa"), Bytes.toBytes("bbb"),
|
TEST_UTIL.getRegionSplitStartKeys(Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), 30);
|
||||||
Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
|
|
||||||
Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
|
|
||||||
Bytes.toBytes("iii"), Bytes.toBytes("jjj")
|
|
||||||
};
|
|
||||||
|
|
||||||
byte [] enabledTable = Bytes.toBytes("enabledTable");
|
byte [] enabledTable = Bytes.toBytes("enabledTable");
|
||||||
HTableDescriptor htdEnabled = new HTableDescriptor(enabledTable);
|
HTableDescriptor htdEnabled = new HTableDescriptor(enabledTable);
|
||||||
|
@ -536,11 +534,11 @@ public class TestMasterFailover {
|
||||||
|
|
||||||
// we'll need some regions to already be assigned out properly on live RS
|
// we'll need some regions to already be assigned out properly on live RS
|
||||||
List<HRegionInfo> enabledAndAssignedRegions = new ArrayList<HRegionInfo>();
|
List<HRegionInfo> enabledAndAssignedRegions = new ArrayList<HRegionInfo>();
|
||||||
enabledAndAssignedRegions.add(enabledRegions.remove(0));
|
enabledAndAssignedRegions.addAll(enabledRegions.subList(0, 6));
|
||||||
enabledAndAssignedRegions.add(enabledRegions.remove(0));
|
enabledRegions.removeAll(enabledAndAssignedRegions);
|
||||||
List<HRegionInfo> disabledAndAssignedRegions = new ArrayList<HRegionInfo>();
|
List<HRegionInfo> disabledAndAssignedRegions = new ArrayList<HRegionInfo>();
|
||||||
disabledAndAssignedRegions.add(disabledRegions.remove(0));
|
disabledAndAssignedRegions.addAll(disabledRegions.subList(0, 6));
|
||||||
disabledAndAssignedRegions.add(disabledRegions.remove(0));
|
disabledRegions.removeAll(disabledAndAssignedRegions);
|
||||||
|
|
||||||
// now actually assign them
|
// now actually assign them
|
||||||
for (HRegionInfo hri : enabledAndAssignedRegions) {
|
for (HRegionInfo hri : enabledAndAssignedRegions) {
|
||||||
|
@ -554,15 +552,20 @@ public class TestMasterFailover {
|
||||||
master.assignRegion(hri);
|
master.assignRegion(hri);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log("Waiting for assignment to finish");
|
||||||
|
ZKAssign.blockUntilNoRIT(zkw);
|
||||||
|
master.assignmentManager.waitUntilNoRegionsInTransition(60000);
|
||||||
|
log("Assignment completed");
|
||||||
|
|
||||||
assertTrue(" Table must be enabled.", master.getAssignmentManager()
|
assertTrue(" Table must be enabled.", master.getAssignmentManager()
|
||||||
.getZKTable().isEnabledTable("enabledTable"));
|
.getZKTable().isEnabledTable("enabledTable"));
|
||||||
// we also need regions assigned out on the dead server
|
// we also need regions assigned out on the dead server
|
||||||
List<HRegionInfo> enabledAndOnDeadRegions = new ArrayList<HRegionInfo>();
|
List<HRegionInfo> enabledAndOnDeadRegions = new ArrayList<HRegionInfo>();
|
||||||
enabledAndOnDeadRegions.add(enabledRegions.remove(0));
|
enabledAndOnDeadRegions.addAll(enabledRegions.subList(0, 6));
|
||||||
enabledAndOnDeadRegions.add(enabledRegions.remove(0));
|
enabledRegions.removeAll(enabledAndOnDeadRegions);
|
||||||
List<HRegionInfo> disabledAndOnDeadRegions = new ArrayList<HRegionInfo>();
|
List<HRegionInfo> disabledAndOnDeadRegions = new ArrayList<HRegionInfo>();
|
||||||
disabledAndOnDeadRegions.add(disabledRegions.remove(0));
|
disabledAndOnDeadRegions.addAll(disabledRegions.subList(0, 6));
|
||||||
disabledAndOnDeadRegions.add(disabledRegions.remove(0));
|
disabledRegions.removeAll(disabledAndOnDeadRegions);
|
||||||
|
|
||||||
// set region plan to server to be killed and trigger assign
|
// set region plan to server to be killed and trigger assign
|
||||||
for (HRegionInfo hri : enabledAndOnDeadRegions) {
|
for (HRegionInfo hri : enabledAndOnDeadRegions) {
|
||||||
|
@ -579,8 +582,25 @@ public class TestMasterFailover {
|
||||||
// wait for no more RIT
|
// wait for no more RIT
|
||||||
log("Waiting for assignment to finish");
|
log("Waiting for assignment to finish");
|
||||||
ZKAssign.blockUntilNoRIT(zkw);
|
ZKAssign.blockUntilNoRIT(zkw);
|
||||||
|
master.assignmentManager.waitUntilNoRegionsInTransition(60000);
|
||||||
log("Assignment completed");
|
log("Assignment completed");
|
||||||
|
|
||||||
|
// Due to master.assignRegion(hri) could fail to assign a region to a specified RS
|
||||||
|
// therefore, we need make sure that regions are in the expected RS
|
||||||
|
verifyRegionLocation(hrs, enabledAndAssignedRegions);
|
||||||
|
verifyRegionLocation(hrs, disabledAndAssignedRegions);
|
||||||
|
verifyRegionLocation(hrsDead, enabledAndOnDeadRegions);
|
||||||
|
verifyRegionLocation(hrsDead, disabledAndOnDeadRegions);
|
||||||
|
|
||||||
|
assertTrue(" Didn't get enough regions of enabledTalbe on live rs.",
|
||||||
|
enabledAndAssignedRegions.size() >= 2);
|
||||||
|
assertTrue(" Didn't get enough regions of disalbedTable on live rs.",
|
||||||
|
disabledAndAssignedRegions.size() >= 2);
|
||||||
|
assertTrue(" Didn't get enough regions of enabledTalbe on dead rs.",
|
||||||
|
enabledAndOnDeadRegions.size() >= 2);
|
||||||
|
assertTrue(" Didn't get enough regions of disalbedTable on dead rs.",
|
||||||
|
disabledAndOnDeadRegions.size() >= 2);
|
||||||
|
|
||||||
// Stop the master
|
// Stop the master
|
||||||
log("Aborting master");
|
log("Aborting master");
|
||||||
cluster.abortMaster(0);
|
cluster.abortMaster(0);
|
||||||
|
@ -802,6 +822,21 @@ public class TestMasterFailover {
|
||||||
TEST_UTIL.shutdownMiniCluster();
|
TEST_UTIL.shutdownMiniCluster();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Verify regions are on the expected region server
|
||||||
|
*/
|
||||||
|
private void verifyRegionLocation(HRegionServer hrs, List<HRegionInfo> regions)
|
||||||
|
throws IOException {
|
||||||
|
List<HRegionInfo> tmpOnlineRegions = ProtobufUtil.getOnlineRegions(hrs);
|
||||||
|
Iterator<HRegionInfo> itr = regions.iterator();
|
||||||
|
while (itr.hasNext()) {
|
||||||
|
HRegionInfo tmp = itr.next();
|
||||||
|
if (!tmpOnlineRegions.contains(tmp)) {
|
||||||
|
itr.remove();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
HRegion createRegion(final HRegionInfo hri, final Path rootdir, final Configuration c,
|
HRegion createRegion(final HRegionInfo hri, final Path rootdir, final Configuration c,
|
||||||
final HTableDescriptor htd)
|
final HTableDescriptor htd)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
|
Loading…
Reference in New Issue