HBASE-8344. Improves the assignment when node failures happen to choose the secondary RS as the new primary RS
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1491994 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
d6becac229
commit
fbd8cf7a73
|
@ -72,19 +72,19 @@ public class FavoredNodeAssignmentHelper {
|
||||||
public final static short FAVORED_NODES_NUM = 3;
|
public final static short FAVORED_NODES_NUM = 3;
|
||||||
|
|
||||||
public FavoredNodeAssignmentHelper(final List<ServerName> servers, Configuration conf) {
|
public FavoredNodeAssignmentHelper(final List<ServerName> servers, Configuration conf) {
|
||||||
|
this(servers, new RackManager(conf));
|
||||||
|
}
|
||||||
|
|
||||||
|
public FavoredNodeAssignmentHelper(final List<ServerName> servers,
|
||||||
|
final RackManager rackManager) {
|
||||||
this.servers = servers;
|
this.servers = servers;
|
||||||
this.rackManager = new RackManager(conf);
|
this.rackManager = rackManager;
|
||||||
this.rackToRegionServerMap = new HashMap<String, List<ServerName>>();
|
this.rackToRegionServerMap = new HashMap<String, List<ServerName>>();
|
||||||
this.regionServerToRackMap = new HashMap<ServerName, String>();
|
this.regionServerToRackMap = new HashMap<ServerName, String>();
|
||||||
this.uniqueRackList = new ArrayList<String>();
|
this.uniqueRackList = new ArrayList<String>();
|
||||||
this.random = new Random();
|
this.random = new Random();
|
||||||
}
|
}
|
||||||
|
|
||||||
// For unit tests
|
|
||||||
void setRackManager(RackManager rackManager) {
|
|
||||||
this.rackManager = rackManager;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Perform full scan of the meta table similar to
|
* Perform full scan of the meta table similar to
|
||||||
* {@link MetaReader#fullScan(CatalogTracker, Set, boolean)} except that this is
|
* {@link MetaReader#fullScan(CatalogTracker, Set, boolean)} except that this is
|
||||||
|
@ -204,25 +204,47 @@ public class FavoredNodeAssignmentHelper {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Place the regions round-robin across the racks picking one server from each
|
// Place the regions round-robin across the racks picking one server from each
|
||||||
// rack at a time. For example, if 2 racks (r1 and r2) with 8 servers (s1..s8) each, it will
|
// rack at a time. Start with a random rack, and a random server from every rack.
|
||||||
// choose s1 from r1, s1 from r2, s2 from r1, s2 from r2, ...
|
// If a rack doesn't have enough servers it will go to the next rack and so on.
|
||||||
|
// for choosing a primary.
|
||||||
|
// For example, if 4 racks (r1 .. r4) with 8 servers (s1..s8) each, one possible
|
||||||
|
// placement could be r2:s5, r3:s5, r4:s5, r1:s5, r2:s6, r3:s6..
|
||||||
|
// If there were fewer servers in one rack, say r3, which had 3 servers, one possible
|
||||||
|
// placement could be r2:s5, <skip-r3>, r4:s5, r1:s5, r2:s6, <skip-r3> ...
|
||||||
|
// The regions should be distributed proportionately to the racksizes
|
||||||
void placePrimaryRSAsRoundRobin(Map<ServerName, List<HRegionInfo>> assignmentMap,
|
void placePrimaryRSAsRoundRobin(Map<ServerName, List<HRegionInfo>> assignmentMap,
|
||||||
Map<HRegionInfo, ServerName> primaryRSMap, List<HRegionInfo> regions) {
|
Map<HRegionInfo, ServerName> primaryRSMap, List<HRegionInfo> regions) {
|
||||||
List<String> rackList = new ArrayList<String>(rackToRegionServerMap.size());
|
List<String> rackList = new ArrayList<String>(rackToRegionServerMap.size());
|
||||||
rackList.addAll(rackToRegionServerMap.keySet());
|
rackList.addAll(rackToRegionServerMap.keySet());
|
||||||
Map<String, Integer> currentProcessIndexMap = new HashMap<String, Integer>();
|
int rackIndex = random.nextInt(rackList.size());
|
||||||
int rackIndex = 0;
|
int maxRackSize = 0;
|
||||||
for (HRegionInfo regionInfo : regions) {
|
for (Map.Entry<String,List<ServerName>> r : rackToRegionServerMap.entrySet()) {
|
||||||
String rackName = rackList.get(rackIndex);
|
if (r.getValue().size() > maxRackSize) {
|
||||||
// Initialize the current processing host index.
|
maxRackSize = r.getValue().size();
|
||||||
int serverIndex = 0;
|
}
|
||||||
// Restore the current process index from the currentProcessIndexMap
|
}
|
||||||
Integer currentProcessIndex = currentProcessIndexMap.get(rackName);
|
int numIterations = 0;
|
||||||
if (currentProcessIndex != null) {
|
int firstServerIndex = random.nextInt(maxRackSize);
|
||||||
serverIndex = currentProcessIndex.intValue();
|
// Initialize the current processing host index.
|
||||||
|
int serverIndex = firstServerIndex;
|
||||||
|
for (HRegionInfo regionInfo : regions) {
|
||||||
|
List<ServerName> currentServerList;
|
||||||
|
String rackName;
|
||||||
|
while (true) {
|
||||||
|
rackName = rackList.get(rackIndex);
|
||||||
|
numIterations++;
|
||||||
|
// Get the server list for the current rack
|
||||||
|
currentServerList = rackToRegionServerMap.get(rackName);
|
||||||
|
|
||||||
|
if (serverIndex >= currentServerList.size()) { //not enough machines in this rack
|
||||||
|
if (numIterations % rackList.size() == 0) {
|
||||||
|
if (++serverIndex >= maxRackSize) serverIndex = 0;
|
||||||
|
}
|
||||||
|
if ((++rackIndex) >= rackList.size()) {
|
||||||
|
rackIndex = 0; // reset the rack index to 0
|
||||||
|
}
|
||||||
|
} else break;
|
||||||
}
|
}
|
||||||
// Get the server list for the current rack
|
|
||||||
List<ServerName> currentServerList = rackToRegionServerMap.get(rackName);
|
|
||||||
|
|
||||||
// Get the current process region server
|
// Get the current process region server
|
||||||
ServerName currentServer = currentServerList.get(serverIndex);
|
ServerName currentServer = currentServerList.get(serverIndex);
|
||||||
|
@ -237,12 +259,9 @@ public class FavoredNodeAssignmentHelper {
|
||||||
regionsForServer.add(regionInfo);
|
regionsForServer.add(regionInfo);
|
||||||
|
|
||||||
// Set the next processing index
|
// Set the next processing index
|
||||||
if ((++serverIndex) >= currentServerList.size()) {
|
if (numIterations % rackList.size() == 0) {
|
||||||
// Reset the server index for the current rack
|
++serverIndex;
|
||||||
serverIndex = 0;
|
|
||||||
}
|
}
|
||||||
// Keep track of the next processing index
|
|
||||||
currentProcessIndexMap.put(rackName, serverIndex);
|
|
||||||
if ((++rackIndex) >= rackList.size()) {
|
if ((++rackIndex) >= rackList.size()) {
|
||||||
rackIndex = 0; // reset the rack index to 0
|
rackIndex = 0; // reset the rack index to 0
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,9 +30,13 @@ import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
|
import org.apache.hadoop.hbase.ServerLoad;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.master.LoadBalancer;
|
import org.apache.hadoop.hbase.master.LoadBalancer;
|
||||||
|
import org.apache.hadoop.hbase.master.RackManager;
|
||||||
import org.apache.hadoop.hbase.master.RegionPlan;
|
import org.apache.hadoop.hbase.master.RegionPlan;
|
||||||
|
import org.apache.hadoop.hbase.master.balancer.FavoredNodes.Position;
|
||||||
|
import org.apache.hadoop.hbase.util.Pair;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* An implementation of the {@link LoadBalancer} that assigns favored nodes for
|
* An implementation of the {@link LoadBalancer} that assigns favored nodes for
|
||||||
|
@ -52,12 +56,12 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer {
|
||||||
private static final Log LOG = LogFactory.getLog(FavoredNodeLoadBalancer.class);
|
private static final Log LOG = LogFactory.getLog(FavoredNodeLoadBalancer.class);
|
||||||
|
|
||||||
private FavoredNodes globalFavoredNodesAssignmentPlan;
|
private FavoredNodes globalFavoredNodesAssignmentPlan;
|
||||||
private Configuration configuration;
|
private RackManager rackManager;
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void setConf(Configuration conf) {
|
public void setConf(Configuration conf) {
|
||||||
this.configuration = conf;
|
|
||||||
globalFavoredNodesAssignmentPlan = new FavoredNodes();
|
globalFavoredNodesAssignmentPlan = new FavoredNodes();
|
||||||
|
this.rackManager = new RackManager(conf);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -76,13 +80,36 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer {
|
||||||
Map<ServerName, List<HRegionInfo>> assignmentMap;
|
Map<ServerName, List<HRegionInfo>> assignmentMap;
|
||||||
try {
|
try {
|
||||||
FavoredNodeAssignmentHelper assignmentHelper =
|
FavoredNodeAssignmentHelper assignmentHelper =
|
||||||
new FavoredNodeAssignmentHelper(servers, configuration);
|
new FavoredNodeAssignmentHelper(servers, rackManager);
|
||||||
assignmentHelper.initialize();
|
assignmentHelper.initialize();
|
||||||
if (!assignmentHelper.canPlaceFavoredNodes()) {
|
if (!assignmentHelper.canPlaceFavoredNodes()) {
|
||||||
return super.roundRobinAssignment(regions, servers);
|
return super.roundRobinAssignment(regions, servers);
|
||||||
}
|
}
|
||||||
|
// Segregate the regions into two types:
|
||||||
|
// 1. The regions that have favored node assignment, and where at least
|
||||||
|
// one of the favored node is still alive. In this case, try to adhere
|
||||||
|
// to the current favored nodes assignment as much as possible - i.e.,
|
||||||
|
// if the current primary is gone, then make the secondary or tertiary
|
||||||
|
// as the new host for the region (based on their current load).
|
||||||
|
// Note that we don't change the favored
|
||||||
|
// node assignments here (even though one or more favored node is currently
|
||||||
|
// down). It is up to the balanceCluster to do this hard work. The HDFS
|
||||||
|
// can handle the fact that some nodes in the favored nodes hint is down
|
||||||
|
// It'd allocate some other DNs. In combination with stale settings for HDFS,
|
||||||
|
// we should be just fine.
|
||||||
|
// 2. The regions that currently don't have favored node assignment. We will
|
||||||
|
// need to come up with favored nodes assignments for them. The corner case
|
||||||
|
// in (1) above is that all the nodes are unavailable and in that case, we
|
||||||
|
// will note that this region doesn't have favored nodes.
|
||||||
|
Pair<Map<ServerName,List<HRegionInfo>>, List<HRegionInfo>> segregatedRegions =
|
||||||
|
segregateRegionsAndAssignRegionsWithFavoredNodes(regions, servers);
|
||||||
|
Map<ServerName,List<HRegionInfo>> regionsWithFavoredNodesMap = segregatedRegions.getFirst();
|
||||||
|
List<HRegionInfo> regionsWithNoFavoredNodes = segregatedRegions.getSecond();
|
||||||
assignmentMap = new HashMap<ServerName, List<HRegionInfo>>();
|
assignmentMap = new HashMap<ServerName, List<HRegionInfo>>();
|
||||||
roundRobinAssignmentImpl(assignmentHelper, assignmentMap, regions, servers);
|
roundRobinAssignmentImpl(assignmentHelper, assignmentMap, regionsWithNoFavoredNodes,
|
||||||
|
servers);
|
||||||
|
// merge the assignment maps
|
||||||
|
assignmentMap.putAll(regionsWithFavoredNodesMap);
|
||||||
} catch (Exception ex) {
|
} catch (Exception ex) {
|
||||||
LOG.warn("Encountered exception while doing favored-nodes assignment " + ex +
|
LOG.warn("Encountered exception while doing favored-nodes assignment " + ex +
|
||||||
" Falling back to regular assignment");
|
" Falling back to regular assignment");
|
||||||
|
@ -95,12 +122,24 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer {
|
||||||
public ServerName randomAssignment(HRegionInfo regionInfo, List<ServerName> servers) {
|
public ServerName randomAssignment(HRegionInfo regionInfo, List<ServerName> servers) {
|
||||||
try {
|
try {
|
||||||
FavoredNodeAssignmentHelper assignmentHelper =
|
FavoredNodeAssignmentHelper assignmentHelper =
|
||||||
new FavoredNodeAssignmentHelper(servers, configuration);
|
new FavoredNodeAssignmentHelper(servers, rackManager);
|
||||||
assignmentHelper.initialize();
|
assignmentHelper.initialize();
|
||||||
ServerName primary = super.randomAssignment(regionInfo, servers);
|
ServerName primary = super.randomAssignment(regionInfo, servers);
|
||||||
if (!assignmentHelper.canPlaceFavoredNodes()) {
|
if (!assignmentHelper.canPlaceFavoredNodes()) {
|
||||||
return primary;
|
return primary;
|
||||||
}
|
}
|
||||||
|
List<ServerName> favoredNodes = globalFavoredNodesAssignmentPlan.getFavoredNodes(regionInfo);
|
||||||
|
// check if we have a favored nodes mapping for this region and if so, return
|
||||||
|
// a server from the favored nodes list if the passed 'servers' contains this
|
||||||
|
// server as well (available servers, that is)
|
||||||
|
if (favoredNodes != null) {
|
||||||
|
for (ServerName s : favoredNodes) {
|
||||||
|
ServerName serverWithLegitStartCode = availableServersContains(servers, s);
|
||||||
|
if (serverWithLegitStartCode != null) {
|
||||||
|
return serverWithLegitStartCode;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
List<HRegionInfo> regions = new ArrayList<HRegionInfo>(1);
|
List<HRegionInfo> regions = new ArrayList<HRegionInfo>(1);
|
||||||
regions.add(regionInfo);
|
regions.add(regionInfo);
|
||||||
Map<HRegionInfo, ServerName> primaryRSMap = new HashMap<HRegionInfo, ServerName>(1);
|
Map<HRegionInfo, ServerName> primaryRSMap = new HashMap<HRegionInfo, ServerName>(1);
|
||||||
|
@ -114,6 +153,90 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private Pair<Map<ServerName, List<HRegionInfo>>, List<HRegionInfo>>
|
||||||
|
segregateRegionsAndAssignRegionsWithFavoredNodes(List<HRegionInfo> regions,
|
||||||
|
List<ServerName> availableServers) {
|
||||||
|
Map<ServerName, List<HRegionInfo>> assignmentMapForFavoredNodes =
|
||||||
|
new HashMap<ServerName, List<HRegionInfo>>(regions.size() / 2);
|
||||||
|
List<HRegionInfo> regionsWithNoFavoredNodes = new ArrayList<HRegionInfo>(regions.size()/2);
|
||||||
|
for (HRegionInfo region : regions) {
|
||||||
|
List<ServerName> favoredNodes = globalFavoredNodesAssignmentPlan.getFavoredNodes(region);
|
||||||
|
ServerName primaryHost = null;
|
||||||
|
ServerName secondaryHost = null;
|
||||||
|
ServerName tertiaryHost = null;
|
||||||
|
if (favoredNodes != null) {
|
||||||
|
for (ServerName s : favoredNodes) {
|
||||||
|
ServerName serverWithLegitStartCode = availableServersContains(availableServers, s);
|
||||||
|
if (serverWithLegitStartCode != null) {
|
||||||
|
FavoredNodes.Position position =
|
||||||
|
FavoredNodes.getFavoredServerPosition(favoredNodes, s);
|
||||||
|
if (Position.PRIMARY.equals(position)) {
|
||||||
|
primaryHost = serverWithLegitStartCode;
|
||||||
|
} else if (Position.SECONDARY.equals(position)) {
|
||||||
|
secondaryHost = serverWithLegitStartCode;
|
||||||
|
} else if (Position.TERTIARY.equals(position)) {
|
||||||
|
tertiaryHost = serverWithLegitStartCode;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assignRegionToAvailableFavoredNode(assignmentMapForFavoredNodes, region,
|
||||||
|
primaryHost, secondaryHost, tertiaryHost);
|
||||||
|
}
|
||||||
|
if (primaryHost == null && secondaryHost == null && tertiaryHost == null) {
|
||||||
|
//all favored nodes unavailable
|
||||||
|
regionsWithNoFavoredNodes.add(region);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return new Pair<Map<ServerName, List<HRegionInfo>>, List<HRegionInfo>>(
|
||||||
|
assignmentMapForFavoredNodes, regionsWithNoFavoredNodes);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do a check of the hostname and port and return the servername from the servers list
|
||||||
|
// that matched (the favoredNode will have a startcode of -1 but we want the real
|
||||||
|
// server with the legit startcode
|
||||||
|
private ServerName availableServersContains(List<ServerName> servers, ServerName favoredNode) {
|
||||||
|
for (ServerName server : servers) {
|
||||||
|
if (ServerName.isSameHostnameAndPort(favoredNode, server)) {
|
||||||
|
return server;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void assignRegionToAvailableFavoredNode(Map<ServerName,
|
||||||
|
List<HRegionInfo>> assignmentMapForFavoredNodes, HRegionInfo region, ServerName primaryHost,
|
||||||
|
ServerName secondaryHost, ServerName tertiaryHost) {
|
||||||
|
if (primaryHost != null) {
|
||||||
|
addRegionToMap(assignmentMapForFavoredNodes, region, primaryHost);
|
||||||
|
} else if (secondaryHost != null && tertiaryHost != null) {
|
||||||
|
// assign the region to the one with a lower load
|
||||||
|
// (both have the desired hdfs blocks)
|
||||||
|
ServerName s;
|
||||||
|
ServerLoad tertiaryLoad = super.services.getServerManager().getLoad(tertiaryHost);
|
||||||
|
ServerLoad secondaryLoad = super.services.getServerManager().getLoad(secondaryHost);
|
||||||
|
if (secondaryLoad.getLoad() < tertiaryLoad.getLoad()) {
|
||||||
|
s = secondaryHost;
|
||||||
|
} else {
|
||||||
|
s = tertiaryHost;
|
||||||
|
}
|
||||||
|
addRegionToMap(assignmentMapForFavoredNodes, region, s);
|
||||||
|
} else if (secondaryHost != null) {
|
||||||
|
addRegionToMap(assignmentMapForFavoredNodes, region, secondaryHost);
|
||||||
|
} else if (tertiaryHost != null) {
|
||||||
|
addRegionToMap(assignmentMapForFavoredNodes, region, tertiaryHost);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void addRegionToMap(Map<ServerName, List<HRegionInfo>> assignmentMapForFavoredNodes,
|
||||||
|
HRegionInfo region, ServerName host) {
|
||||||
|
List<HRegionInfo> regionsOnServer = null;
|
||||||
|
if ((regionsOnServer = assignmentMapForFavoredNodes.get(host)) == null) {
|
||||||
|
regionsOnServer = new ArrayList<HRegionInfo>();
|
||||||
|
assignmentMapForFavoredNodes.put(host, regionsOnServer);
|
||||||
|
}
|
||||||
|
regionsOnServer.add(region);
|
||||||
|
}
|
||||||
|
|
||||||
public List<ServerName> getFavoredNodes(HRegionInfo regionInfo) {
|
public List<ServerName> getFavoredNodes(HRegionInfo regionInfo) {
|
||||||
return this.globalFavoredNodesAssignmentPlan.getFavoredNodes(regionInfo);
|
return this.globalFavoredNodesAssignmentPlan.getFavoredNodes(regionInfo);
|
||||||
}
|
}
|
||||||
|
@ -135,12 +258,18 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer {
|
||||||
assignmentHelper.placeSecondaryAndTertiaryRS(primaryRSMap);
|
assignmentHelper.placeSecondaryAndTertiaryRS(primaryRSMap);
|
||||||
// now record all the assignments so that we can serve queries later
|
// now record all the assignments so that we can serve queries later
|
||||||
for (HRegionInfo region : regions) {
|
for (HRegionInfo region : regions) {
|
||||||
|
// Store the favored nodes without startCode for the ServerName objects
|
||||||
|
// We don't care about the startcode; but only the hostname really
|
||||||
List<ServerName> favoredNodesForRegion = new ArrayList<ServerName>(3);
|
List<ServerName> favoredNodesForRegion = new ArrayList<ServerName>(3);
|
||||||
favoredNodesForRegion.add(primaryRSMap.get(region));
|
ServerName sn = primaryRSMap.get(region);
|
||||||
|
favoredNodesForRegion.add(new ServerName(sn.getHostname(), sn.getPort(),
|
||||||
|
ServerName.NON_STARTCODE));
|
||||||
ServerName[] secondaryAndTertiaryNodes = secondaryAndTertiaryRSMap.get(region);
|
ServerName[] secondaryAndTertiaryNodes = secondaryAndTertiaryRSMap.get(region);
|
||||||
if (secondaryAndTertiaryNodes != null) {
|
if (secondaryAndTertiaryNodes != null) {
|
||||||
favoredNodesForRegion.add(secondaryAndTertiaryNodes[0]);
|
favoredNodesForRegion.add(new ServerName(secondaryAndTertiaryNodes[0].getHostname(),
|
||||||
favoredNodesForRegion.add(secondaryAndTertiaryNodes[1]);
|
secondaryAndTertiaryNodes[0].getPort(), ServerName.NON_STARTCODE));
|
||||||
|
favoredNodesForRegion.add(new ServerName(secondaryAndTertiaryNodes[1].getHostname(),
|
||||||
|
secondaryAndTertiaryNodes[1].getPort(), ServerName.NON_STARTCODE));
|
||||||
}
|
}
|
||||||
globalFavoredNodesAssignmentPlan.updateFavoredNodesMap(region, favoredNodesForRegion);
|
globalFavoredNodesAssignmentPlan.updateFavoredNodesMap(region, favoredNodesForRegion);
|
||||||
}
|
}
|
||||||
|
@ -148,6 +277,7 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer {
|
||||||
|
|
||||||
void noteFavoredNodes(final Map<HRegionInfo, ServerName[]> favoredNodesMap) {
|
void noteFavoredNodes(final Map<HRegionInfo, ServerName[]> favoredNodesMap) {
|
||||||
for (Map.Entry<HRegionInfo, ServerName[]> entry : favoredNodesMap.entrySet()) {
|
for (Map.Entry<HRegionInfo, ServerName[]> entry : favoredNodesMap.entrySet()) {
|
||||||
|
// the META should already have favorednode ServerName objects without startcode
|
||||||
globalFavoredNodesAssignmentPlan.updateFavoredNodesMap(entry.getKey(),
|
globalFavoredNodesAssignmentPlan.updateFavoredNodesMap(entry.getKey(),
|
||||||
Arrays.asList(entry.getValue()));
|
Arrays.asList(entry.getValue()));
|
||||||
}
|
}
|
||||||
|
|
|
@ -73,4 +73,25 @@ public class FavoredNodes {
|
||||||
public synchronized List<ServerName> getFavoredNodes(HRegionInfo region) {
|
public synchronized List<ServerName> getFavoredNodes(HRegionInfo region) {
|
||||||
return favoredNodesMap.get(region);
|
return favoredNodesMap.get(region);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return the position of the server in the favoredNodes list. Assumes the
|
||||||
|
* favoredNodes list is of size 3.
|
||||||
|
* @param favoredNodes
|
||||||
|
* @param server
|
||||||
|
* @return position
|
||||||
|
*/
|
||||||
|
static Position getFavoredServerPosition(
|
||||||
|
List<ServerName> favoredNodes, ServerName server) {
|
||||||
|
if (favoredNodes == null || server == null ||
|
||||||
|
favoredNodes.size() != FavoredNodeAssignmentHelper.FAVORED_NODES_NUM) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
for (Position p : Position.values()) {
|
||||||
|
if (favoredNodes.get(p.ordinal()).equals(server)) {
|
||||||
|
return p;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,9 +25,11 @@ import static org.junit.Assert.assertTrue;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
import java.util.Collection;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.Set;
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
|
@ -62,12 +64,15 @@ import org.junit.experimental.categories.Category;
|
||||||
public class TestRegionPlacement {
|
public class TestRegionPlacement {
|
||||||
final static Log LOG = LogFactory.getLog(TestRegionPlacement.class);
|
final static Log LOG = LogFactory.getLog(TestRegionPlacement.class);
|
||||||
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
||||||
private final static int SLAVES = 4;
|
private final static int SLAVES = 10;
|
||||||
private static HBaseAdmin admin;
|
private static HBaseAdmin admin;
|
||||||
private static Position[] positions = Position.values();
|
private static Position[] positions = Position.values();
|
||||||
private int REGION_NUM = 10;
|
private int REGION_NUM = 10;
|
||||||
private Map<HRegionInfo, ServerName[]> favoredNodesAssignmentPlan =
|
private Map<HRegionInfo, ServerName[]> favoredNodesAssignmentPlan =
|
||||||
new HashMap<HRegionInfo, ServerName[]>();
|
new HashMap<HRegionInfo, ServerName[]>();
|
||||||
|
private final static int PRIMARY = Position.PRIMARY.ordinal();
|
||||||
|
private final static int SECONDARY = Position.SECONDARY.ordinal();
|
||||||
|
private final static int TERTIARY = Position.TERTIARY.ordinal();
|
||||||
|
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
public static void setupBeforeClass() throws Exception {
|
public static void setupBeforeClass() throws Exception {
|
||||||
|
@ -75,6 +80,7 @@ public class TestRegionPlacement {
|
||||||
// Enable the favored nodes based load balancer
|
// Enable the favored nodes based load balancer
|
||||||
conf.setClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
|
conf.setClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
|
||||||
FavoredNodeLoadBalancer.class, LoadBalancer.class);
|
FavoredNodeLoadBalancer.class, LoadBalancer.class);
|
||||||
|
conf.setBoolean("hbase.tests.use.shortcircuit.reads", false);
|
||||||
TEST_UTIL.startMiniCluster(SLAVES);
|
TEST_UTIL.startMiniCluster(SLAVES);
|
||||||
admin = new HBaseAdmin(conf);
|
admin = new HBaseAdmin(conf);
|
||||||
}
|
}
|
||||||
|
@ -85,27 +91,108 @@ public class TestRegionPlacement {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testGetFavoredNodes() {
|
public void testFavoredNodesPresentForRoundRobinAssignment() {
|
||||||
LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(TEST_UTIL.getConfiguration());
|
LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(TEST_UTIL.getConfiguration());
|
||||||
HRegionInfo regionInfo = new HRegionInfo("oneregion".getBytes());
|
balancer.setMasterServices(TEST_UTIL.getMiniHBaseCluster().getMaster());
|
||||||
List<ServerName> servers = new ArrayList<ServerName>();
|
List<ServerName> servers = new ArrayList<ServerName>();
|
||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < SLAVES; i++) {
|
||||||
ServerName server = new ServerName("foo"+i+":1234",-1);
|
ServerName server = TEST_UTIL.getMiniHBaseCluster().getRegionServer(i).getServerName();
|
||||||
servers.add(server);
|
servers.add(server);
|
||||||
}
|
}
|
||||||
// test that we have enough favored nodes after we call randomAssignment
|
List<HRegionInfo> regions = new ArrayList<HRegionInfo>(1);
|
||||||
balancer.randomAssignment(regionInfo, servers);
|
HRegionInfo region = new HRegionInfo(("foobar").getBytes());
|
||||||
assertTrue(((FavoredNodeLoadBalancer)balancer).getFavoredNodes(regionInfo).size() == 3);
|
regions.add(region);
|
||||||
List<HRegionInfo> regions = new ArrayList<HRegionInfo>(100);
|
Map<ServerName,List<HRegionInfo>> assignmentMap = balancer.roundRobinAssignment(regions,
|
||||||
for (int i = 0; i < 100; i++) {
|
servers);
|
||||||
HRegionInfo region = new HRegionInfo(("foobar"+i).getBytes());
|
Set<ServerName> serverBefore = assignmentMap.keySet();
|
||||||
regions.add(region);
|
List<ServerName> favoredNodesBefore =
|
||||||
}
|
((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region);
|
||||||
// test that we have enough favored nodes after we call roundRobinAssignment
|
assertTrue(favoredNodesBefore.size() == 3);
|
||||||
balancer.roundRobinAssignment(regions, servers);
|
// the primary RS should be the one that the balancer's assignment returns
|
||||||
for (int i = 0; i < 100; i++) {
|
assertTrue(ServerName.isSameHostnameAndPort(serverBefore.iterator().next(),
|
||||||
assertTrue(((FavoredNodeLoadBalancer)balancer).getFavoredNodes(regions.get(i)).size() == 3);
|
favoredNodesBefore.get(PRIMARY)));
|
||||||
|
// now remove the primary from the list of available servers
|
||||||
|
List<ServerName> removedServers = removeMatchingServers(serverBefore, servers);
|
||||||
|
// call roundRobinAssignment with the modified servers list
|
||||||
|
assignmentMap = balancer.roundRobinAssignment(regions, servers);
|
||||||
|
List<ServerName> favoredNodesAfter =
|
||||||
|
((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region);
|
||||||
|
assertTrue(favoredNodesAfter.size() == 3);
|
||||||
|
// We don't expect the favored nodes assignments to change in multiple calls
|
||||||
|
// to the roundRobinAssignment method in the balancer (relevant for AssignmentManager.assign
|
||||||
|
// failures)
|
||||||
|
assertTrue(favoredNodesAfter.containsAll(favoredNodesBefore));
|
||||||
|
Set<ServerName> serverAfter = assignmentMap.keySet();
|
||||||
|
// We expect the new RegionServer assignee to be one of the favored nodes
|
||||||
|
// chosen earlier.
|
||||||
|
assertTrue(ServerName.isSameHostnameAndPort(serverAfter.iterator().next(),
|
||||||
|
favoredNodesBefore.get(SECONDARY)) ||
|
||||||
|
ServerName.isSameHostnameAndPort(serverAfter.iterator().next(),
|
||||||
|
favoredNodesBefore.get(TERTIARY)));
|
||||||
|
|
||||||
|
// put back the primary in the list of available servers
|
||||||
|
servers.addAll(removedServers);
|
||||||
|
// now roundRobinAssignment with the modified servers list should return the primary
|
||||||
|
// as the regionserver assignee
|
||||||
|
assignmentMap = balancer.roundRobinAssignment(regions, servers);
|
||||||
|
Set<ServerName> serverWithPrimary = assignmentMap.keySet();
|
||||||
|
assertTrue(serverBefore.containsAll(serverWithPrimary));
|
||||||
|
|
||||||
|
// Make all the favored nodes unavailable for assignment
|
||||||
|
removeMatchingServers(favoredNodesAfter, servers);
|
||||||
|
// call roundRobinAssignment with the modified servers list
|
||||||
|
assignmentMap = balancer.roundRobinAssignment(regions, servers);
|
||||||
|
List<ServerName> favoredNodesNow =
|
||||||
|
((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region);
|
||||||
|
assertTrue(favoredNodesNow.size() == 3);
|
||||||
|
assertTrue(!favoredNodesNow.contains(favoredNodesAfter.get(PRIMARY)) &&
|
||||||
|
!favoredNodesNow.contains(favoredNodesAfter.get(SECONDARY)) &&
|
||||||
|
!favoredNodesNow.contains(favoredNodesAfter.get(TERTIARY)));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testFavoredNodesPresentForRandomAssignment() {
|
||||||
|
LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(TEST_UTIL.getConfiguration());
|
||||||
|
balancer.setMasterServices(TEST_UTIL.getMiniHBaseCluster().getMaster());
|
||||||
|
List<ServerName> servers = new ArrayList<ServerName>();
|
||||||
|
for (int i = 0; i < SLAVES; i++) {
|
||||||
|
ServerName server = TEST_UTIL.getMiniHBaseCluster().getRegionServer(i).getServerName();
|
||||||
|
servers.add(server);
|
||||||
}
|
}
|
||||||
|
List<HRegionInfo> regions = new ArrayList<HRegionInfo>(1);
|
||||||
|
HRegionInfo region = new HRegionInfo(("foobar").getBytes());
|
||||||
|
regions.add(region);
|
||||||
|
ServerName serverBefore = balancer.randomAssignment(region, servers);
|
||||||
|
List<ServerName> favoredNodesBefore =
|
||||||
|
((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region);
|
||||||
|
assertTrue(favoredNodesBefore.size() == 3);
|
||||||
|
// the primary RS should be the one that the balancer's assignment returns
|
||||||
|
assertTrue(ServerName.isSameHostnameAndPort(serverBefore,favoredNodesBefore.get(PRIMARY)));
|
||||||
|
// now remove the primary from the list of servers
|
||||||
|
removeMatchingServers(serverBefore, servers);
|
||||||
|
// call randomAssignment with the modified servers list
|
||||||
|
ServerName serverAfter = balancer.randomAssignment(region, servers);
|
||||||
|
List<ServerName> favoredNodesAfter =
|
||||||
|
((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region);
|
||||||
|
assertTrue(favoredNodesAfter.size() == 3);
|
||||||
|
// We don't expect the favored nodes assignments to change in multiple calls
|
||||||
|
// to the randomAssignment method in the balancer (relevant for AssignmentManager.assign
|
||||||
|
// failures)
|
||||||
|
assertTrue(favoredNodesAfter.containsAll(favoredNodesBefore));
|
||||||
|
// We expect the new RegionServer assignee to be one of the favored nodes
|
||||||
|
// chosen earlier.
|
||||||
|
assertTrue(ServerName.isSameHostnameAndPort(serverAfter, favoredNodesBefore.get(SECONDARY)) ||
|
||||||
|
ServerName.isSameHostnameAndPort(serverAfter, favoredNodesBefore.get(TERTIARY)));
|
||||||
|
// Make all the favored nodes unavailable for assignment
|
||||||
|
removeMatchingServers(favoredNodesAfter, servers);
|
||||||
|
// call randomAssignment with the modified servers list
|
||||||
|
balancer.randomAssignment(region, servers);
|
||||||
|
List<ServerName> favoredNodesNow =
|
||||||
|
((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region);
|
||||||
|
assertTrue(favoredNodesNow.size() == 3);
|
||||||
|
assertTrue(!favoredNodesNow.contains(favoredNodesAfter.get(PRIMARY)) &&
|
||||||
|
!favoredNodesNow.contains(favoredNodesAfter.get(SECONDARY)) &&
|
||||||
|
!favoredNodesNow.contains(favoredNodesAfter.get(TERTIARY)));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout = 180000)
|
@Test(timeout = 180000)
|
||||||
|
@ -123,6 +210,27 @@ public class TestRegionPlacement {
|
||||||
verifyRegionServerUpdated();
|
verifyRegionServerUpdated();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private List<ServerName> removeMatchingServers(ServerName serverWithoutStartCode,
|
||||||
|
List<ServerName> servers) {
|
||||||
|
List<ServerName> serversToRemove = new ArrayList<ServerName>();
|
||||||
|
for (ServerName s : servers) {
|
||||||
|
if (ServerName.isSameHostnameAndPort(s, serverWithoutStartCode)) {
|
||||||
|
serversToRemove.add(s);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
servers.removeAll(serversToRemove);
|
||||||
|
return serversToRemove;
|
||||||
|
}
|
||||||
|
|
||||||
|
private List<ServerName> removeMatchingServers(Collection<ServerName> serversWithoutStartCode,
|
||||||
|
List<ServerName> servers) {
|
||||||
|
List<ServerName> serversToRemove = new ArrayList<ServerName>();
|
||||||
|
for (ServerName s : serversWithoutStartCode) {
|
||||||
|
serversToRemove.addAll(removeMatchingServers(s, servers));
|
||||||
|
}
|
||||||
|
return serversToRemove;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Verify the number of user regions is assigned to the primary
|
* Verify the number of user regions is assigned to the primary
|
||||||
* region server based on the plan is expected
|
* region server based on the plan is expected
|
||||||
|
@ -204,8 +312,6 @@ public class TestRegionPlacement {
|
||||||
HRegionInfo info = MetaScanner.getHRegionInfo(result);
|
HRegionInfo info = MetaScanner.getHRegionInfo(result);
|
||||||
byte[] server = result.getValue(HConstants.CATALOG_FAMILY,
|
byte[] server = result.getValue(HConstants.CATALOG_FAMILY,
|
||||||
HConstants.SERVER_QUALIFIER);
|
HConstants.SERVER_QUALIFIER);
|
||||||
byte[] startCode = result.getValue(HConstants.CATALOG_FAMILY,
|
|
||||||
HConstants.STARTCODE_QUALIFIER);
|
|
||||||
byte[] favoredNodes = result.getValue(HConstants.CATALOG_FAMILY,
|
byte[] favoredNodes = result.getValue(HConstants.CATALOG_FAMILY,
|
||||||
FavoredNodeAssignmentHelper.FAVOREDNODES_QUALIFIER);
|
FavoredNodeAssignmentHelper.FAVOREDNODES_QUALIFIER);
|
||||||
// Add the favored nodes into assignment plan
|
// Add the favored nodes into assignment plan
|
||||||
|
@ -218,7 +324,7 @@ public class TestRegionPlacement {
|
||||||
totalRegionNum.incrementAndGet();
|
totalRegionNum.incrementAndGet();
|
||||||
if (server != null) {
|
if (server != null) {
|
||||||
ServerName serverName =
|
ServerName serverName =
|
||||||
new ServerName(Bytes.toString(server),Bytes.toLong(startCode));
|
new ServerName(Bytes.toString(server), -1);
|
||||||
if (favoredNodes != null) {
|
if (favoredNodes != null) {
|
||||||
String placement = "[NOT FAVORED NODE]";
|
String placement = "[NOT FAVORED NODE]";
|
||||||
for (int i = 0; i < favoredServerList.length; i++) {
|
for (int i = 0; i < favoredServerList.length; i++) {
|
||||||
|
|
|
@ -24,10 +24,12 @@ import java.util.ArrayList;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.SortedMap;
|
||||||
|
import java.util.TreeMap;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.MediumTests;
|
import org.apache.hadoop.hbase.SmallTests;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.master.RackManager;
|
import org.apache.hadoop.hbase.master.RackManager;
|
||||||
import org.apache.hadoop.hbase.util.Triple;
|
import org.apache.hadoop.hbase.util.Triple;
|
||||||
|
@ -36,7 +38,7 @@ import org.junit.Test;
|
||||||
import org.junit.experimental.categories.Category;
|
import org.junit.experimental.categories.Category;
|
||||||
import org.mockito.Mockito;
|
import org.mockito.Mockito;
|
||||||
|
|
||||||
@Category(MediumTests.class)
|
@Category(SmallTests.class)
|
||||||
public class TestFavoredNodeAssignmentHelper {
|
public class TestFavoredNodeAssignmentHelper {
|
||||||
|
|
||||||
private static List<ServerName> servers = new ArrayList<ServerName>();
|
private static List<ServerName> servers = new ArrayList<ServerName>();
|
||||||
|
@ -108,12 +110,29 @@ public class TestFavoredNodeAssignmentHelper {
|
||||||
public void testPlacePrimaryRSAsRoundRobin() {
|
public void testPlacePrimaryRSAsRoundRobin() {
|
||||||
// Test the regular case where there are many servers in different racks
|
// Test the regular case where there are many servers in different racks
|
||||||
// Test once for few regions and once for many regions
|
// Test once for few regions and once for many regions
|
||||||
primaryRSPlacement(6, null);
|
primaryRSPlacement(6, null, 10, 10, 10);
|
||||||
// now create lots of regions and try to place them on the limited number of machines
|
// now create lots of regions and try to place them on the limited number of machines
|
||||||
primaryRSPlacement(600, null);
|
primaryRSPlacement(600, null, 10, 10, 10);
|
||||||
}
|
}
|
||||||
|
|
||||||
//@Test
|
@Test
|
||||||
|
public void testRoundRobinAssignmentsWithUnevenSizedRacks() {
|
||||||
|
//In the case of uneven racks, the regions should be distributed
|
||||||
|
//proportionately to the rack sizes
|
||||||
|
primaryRSPlacement(6, null, 10, 10, 10);
|
||||||
|
primaryRSPlacement(600, null, 10, 10, 5);
|
||||||
|
primaryRSPlacement(600, null, 10, 5, 10);
|
||||||
|
primaryRSPlacement(600, null, 5, 10, 10);
|
||||||
|
primaryRSPlacement(500, null, 10, 10, 5);
|
||||||
|
primaryRSPlacement(500, null, 10, 5, 10);
|
||||||
|
primaryRSPlacement(500, null, 5, 10, 10);
|
||||||
|
primaryRSPlacement(500, null, 9, 7, 8);
|
||||||
|
primaryRSPlacement(500, null, 8, 7, 9);
|
||||||
|
primaryRSPlacement(500, null, 7, 9, 8);
|
||||||
|
primaryRSPlacement(459, null, 7, 9, 8);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
public void testSecondaryAndTertiaryPlacementWithSingleRack() {
|
public void testSecondaryAndTertiaryPlacementWithSingleRack() {
|
||||||
// Test the case where there is a single rack and we need to choose
|
// Test the case where there is a single rack and we need to choose
|
||||||
// Primary/Secondary/Tertiary from a single rack.
|
// Primary/Secondary/Tertiary from a single rack.
|
||||||
|
@ -245,10 +264,9 @@ public class TestFavoredNodeAssignmentHelper {
|
||||||
List<ServerName> servers = getServersFromRack(rackToServerCount);
|
List<ServerName> servers = getServersFromRack(rackToServerCount);
|
||||||
FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers,
|
FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers,
|
||||||
new Configuration());
|
new Configuration());
|
||||||
helper = new FavoredNodeAssignmentHelper(servers, new Configuration());
|
helper = new FavoredNodeAssignmentHelper(servers, rackManager);
|
||||||
Map<ServerName, List<HRegionInfo>> assignmentMap =
|
Map<ServerName, List<HRegionInfo>> assignmentMap =
|
||||||
new HashMap<ServerName, List<HRegionInfo>>();
|
new HashMap<ServerName, List<HRegionInfo>>();
|
||||||
helper.setRackManager(rackManager);
|
|
||||||
helper.initialize();
|
helper.initialize();
|
||||||
// create regions
|
// create regions
|
||||||
List<HRegionInfo> regions = new ArrayList<HRegionInfo>(regionCount);
|
List<HRegionInfo> regions = new ArrayList<HRegionInfo>(regionCount);
|
||||||
|
@ -262,15 +280,15 @@ public class TestFavoredNodeAssignmentHelper {
|
||||||
(primaryRSMap, helper, regions);
|
(primaryRSMap, helper, regions);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void primaryRSPlacement(int regionCount, Map<HRegionInfo, ServerName> primaryRSMap) {
|
private void primaryRSPlacement(int regionCount, Map<HRegionInfo, ServerName> primaryRSMap,
|
||||||
|
int firstRackSize, int secondRackSize, int thirdRackSize) {
|
||||||
Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
|
Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
|
||||||
rackToServerCount.put("rack1", 10);
|
rackToServerCount.put("rack1", firstRackSize);
|
||||||
rackToServerCount.put("rack2", 10);
|
rackToServerCount.put("rack2", secondRackSize);
|
||||||
rackToServerCount.put("rack3", 10);
|
rackToServerCount.put("rack3", thirdRackSize);
|
||||||
List<ServerName> servers = getServersFromRack(rackToServerCount);
|
List<ServerName> servers = getServersFromRack(rackToServerCount);
|
||||||
FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers,
|
FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers,
|
||||||
new Configuration());
|
rackManager);
|
||||||
helper.setRackManager(rackManager);
|
|
||||||
helper.initialize();
|
helper.initialize();
|
||||||
|
|
||||||
assertTrue(helper.canPlaceFavoredNodes());
|
assertTrue(helper.canPlaceFavoredNodes());
|
||||||
|
@ -291,21 +309,51 @@ public class TestFavoredNodeAssignmentHelper {
|
||||||
int regionsOnRack1 = 0;
|
int regionsOnRack1 = 0;
|
||||||
int regionsOnRack2 = 0;
|
int regionsOnRack2 = 0;
|
||||||
int regionsOnRack3 = 0;
|
int regionsOnRack3 = 0;
|
||||||
for (Map.Entry<HRegionInfo, ServerName> entry : primaryRSMap.entrySet()) {
|
for (HRegionInfo region : regions) {
|
||||||
if (rackManager.getRack(entry.getValue()).equals("rack1")) {
|
if (rackManager.getRack(primaryRSMap.get(region)).equals("rack1")) {
|
||||||
regionsOnRack1++;
|
regionsOnRack1++;
|
||||||
} else if (rackManager.getRack(entry.getValue()).equals("rack2")) {
|
} else if (rackManager.getRack(primaryRSMap.get(region)).equals("rack2")) {
|
||||||
regionsOnRack2++;
|
regionsOnRack2++;
|
||||||
} else if (rackManager.getRack(entry.getValue()).equals("rack3")) {
|
} else if (rackManager.getRack(primaryRSMap.get(region)).equals("rack3")) {
|
||||||
regionsOnRack3++;
|
regionsOnRack3++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
int numRegionsPerRack = (int)Math.ceil((double)regionCount/3); //since there are 3 servers
|
// Verify that the regions got placed in the way we expect (documented in
|
||||||
assertTrue(regionsOnRack1 == numRegionsPerRack && regionsOnRack2 == numRegionsPerRack
|
// FavoredNodeAssignmentHelper#placePrimaryRSAsRoundRobin)
|
||||||
&& regionsOnRack3 == numRegionsPerRack);
|
checkNumRegions(regionCount, firstRackSize, secondRackSize, thirdRackSize, regionsOnRack1,
|
||||||
int numServersPerRack = (int)Math.ceil((double)regionCount/30); //since there are 30 servers
|
regionsOnRack2, regionsOnRack3, assignmentMap);
|
||||||
for (Map.Entry<ServerName, List<HRegionInfo>> entry : assignmentMap.entrySet()) {
|
}
|
||||||
assertTrue(entry.getValue().size() == numServersPerRack);
|
|
||||||
}
|
private void checkNumRegions(int regionCount, int firstRackSize, int secondRackSize,
|
||||||
|
int thirdRackSize, int regionsOnRack1, int regionsOnRack2, int regionsOnRack3,
|
||||||
|
Map<ServerName, List<HRegionInfo>> assignmentMap) {
|
||||||
|
//The regions should be distributed proportionately to the racksizes
|
||||||
|
//Verify the ordering was as expected by inserting the racks and regions
|
||||||
|
//in sorted maps. The keys being the racksize and numregions; values are
|
||||||
|
//the relative positions of the racksizes and numregions respectively
|
||||||
|
SortedMap<Integer, Integer> rackMap = new TreeMap<Integer, Integer>();
|
||||||
|
rackMap.put(firstRackSize, 1);
|
||||||
|
rackMap.put(secondRackSize, 2);
|
||||||
|
rackMap.put(thirdRackSize, 3);
|
||||||
|
SortedMap<Integer, Integer> regionMap = new TreeMap<Integer, Integer>();
|
||||||
|
regionMap.put(regionsOnRack1, 1);
|
||||||
|
regionMap.put(regionsOnRack2, 2);
|
||||||
|
regionMap.put(regionsOnRack3, 3);
|
||||||
|
assertTrue(printProportions(firstRackSize, secondRackSize, thirdRackSize,
|
||||||
|
regionsOnRack1, regionsOnRack2, regionsOnRack3),
|
||||||
|
rackMap.get(firstRackSize) == regionMap.get(regionsOnRack1));
|
||||||
|
assertTrue(printProportions(firstRackSize, secondRackSize, thirdRackSize,
|
||||||
|
regionsOnRack1, regionsOnRack2, regionsOnRack3),
|
||||||
|
rackMap.get(secondRackSize) == regionMap.get(regionsOnRack2));
|
||||||
|
assertTrue(printProportions(firstRackSize, secondRackSize, thirdRackSize,
|
||||||
|
regionsOnRack1, regionsOnRack2, regionsOnRack3),
|
||||||
|
rackMap.get(thirdRackSize) == regionMap.get(regionsOnRack3));
|
||||||
|
}
|
||||||
|
|
||||||
|
private String printProportions(int firstRackSize, int secondRackSize,
|
||||||
|
int thirdRackSize, int regionsOnRack1, int regionsOnRack2, int regionsOnRack3) {
|
||||||
|
return "The rack sizes" + firstRackSize + " " + secondRackSize
|
||||||
|
+ " " + thirdRackSize + " " + regionsOnRack1 + " " + regionsOnRack2 +
|
||||||
|
" " + regionsOnRack3;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue