diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml
index ce5011deba2..85a0b314f03 100644
--- a/hbase-common/src/main/resources/hbase-default.xml
+++ b/hbase-common/src/main/resources/hbase-default.xml
@@ -619,11 +619,20 @@ possible configurations would overwhelm and obscure the important.
hbase.regions.slop
- 0.001
- Rebalance if any regionserver has average + (average * slop) regions.
- The default value of this parameter is 0.001 in StochasticLoadBalancer (the default load
- balancer), while the default is 0.2 in other load balancers (i.e.,
- SimpleLoadBalancer).
+ 0.2
+ The load balancer can trigger for several reasons. This value controls one of
+ those reasons. Run the balancer if any regionserver has a region count outside the range of
+ average +/- (average * slop) regions.
+ If the value of slop is negative, disable sloppiness checks. The balancer can still run for
+ other reasons, but sloppiness will not be one of them.
+ If the value of slop is 0, run the balancer if any server has a region count more than 1
+ from the average.
+ If the value of slop is 100, run the balancer if any server has a region count greater than
+ 101 times the average.
+ The default value of this parameter is 0.2, which runs the balancer if any server has a region
+ count less than 80% of the average, or greater than 120% of the average.
+ Note that for the default StochasticLoadBalancer, this does not guarantee any balancing
+ actions will be taken, but only that the balancer will attempt to run.
hbase.normalizer.period
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
index 1052617b59f..93117405b01 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
@@ -26,6 +26,7 @@ import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
+import java.util.NavigableMap;
import java.util.Random;
import java.util.Set;
import java.util.TreeMap;
@@ -249,6 +250,28 @@ public abstract class BaseLoadBalancer implements LoadBalancer {
return isServerExistsWithMoreRegions && isServerExistsWithZeroRegions;
}
+ protected final boolean sloppyRegionServerExist(ClusterLoadState cs) {
+ if (slop < 0) {
+ LOG.debug("Slop is less than zero, not checking for sloppiness.");
+ return false;
+ }
+ float average = cs.getLoadAverage(); // for logging
+ int floor = (int) Math.floor(average * (1 - slop));
+ int ceiling = (int) Math.ceil(average * (1 + slop));
+ if (!(cs.getMaxLoad() > ceiling || cs.getMinLoad() < floor)) {
+ NavigableMap> serversByLoad = cs.getServersByLoad();
+ if (LOG.isTraceEnabled()) {
+ // If nothing to balance, then don't say anything unless trace-level logging.
+ LOG.trace("Skipping load balancing because balanced cluster; " + "servers=" +
+ cs.getNumServers() + " regions=" + cs.getNumRegions() + " average=" + average +
+ " mostloaded=" + serversByLoad.lastKey().getLoad() + " leastloaded=" +
+ serversByLoad.firstKey().getLoad());
+ }
+ return false;
+ }
+ return true;
+ }
+
/**
* Generates a bulk assignment plan to be used on cluster startup using a
* simple round-robin assignment.
@@ -532,16 +555,6 @@ public abstract class BaseLoadBalancer implements LoadBalancer {
return assignments;
}
- protected final float normalizeSlop(float slop) {
- if (slop < 0) {
- return 0;
- }
- if (slop > 1) {
- return 1;
- }
- return slop;
- }
-
protected float getDefaultSlop() {
return 0.2f;
}
@@ -554,11 +567,9 @@ public abstract class BaseLoadBalancer implements LoadBalancer {
}
protected void loadConf(Configuration conf) {
- this.slop =normalizeSlop(conf.getFloat("hbase.regions.slop", getDefaultSlop()));
-
- this.onlySystemTablesOnMaster = LoadBalancer.isSystemTablesOnlyOnMaster(conf);
-
+ this.slop = conf.getFloat("hbase.regions.slop", getDefaultSlop());
this.rackManager = new RackManager(getConf());
+ this.onlySystemTablesOnMaster = LoadBalancer.isSystemTablesOnlyOnMaster(conf);
useRegionFinder = conf.getBoolean("hbase.master.balancer.uselocality", true);
if (useRegionFinder) {
regionFinder = createRegionLocationFinder(conf);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java
index 4e180aad9d2..8bea2584a3f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java
@@ -195,24 +195,9 @@ public class SimpleLoadBalancer extends BaseLoadBalancer {
if (idleRegionServerExist(c)) {
return true;
}
-
// Check if we even need to do any load balancing
// HBASE-3681 check sloppiness first
- float average = cs.getLoadAverage(); // for logging
- int floor = (int) Math.floor(average * (1 - slop));
- int ceiling = (int) Math.ceil(average * (1 + slop));
- if (!(cs.getMaxLoad() > ceiling || cs.getMinLoad() < floor)) {
- NavigableMap> serversByLoad = cs.getServersByLoad();
- if (LOG.isTraceEnabled()) {
- // If nothing to balance, then don't say anything unless trace-level logging.
- LOG.trace("Skipping load balancing because balanced cluster; " + "servers=" +
- cs.getNumServers() + " regions=" + cs.getNumRegions() + " average=" + average +
- " mostloaded=" + serversByLoad.lastKey().getLoad() + " leastloaded=" +
- serversByLoad.firstKey().getLoad());
- }
- return false;
- }
- return true;
+ return sloppyRegionServerExist(cs);
}
/**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
index a69931e63ca..a798b4a3dd0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
@@ -211,11 +211,6 @@ public class StochasticLoadBalancer extends BaseLoadBalancer {
return this.candidateGenerators;
}
- @Override
- protected float getDefaultSlop() {
- return 0.001f;
- }
-
protected List createCandidateGenerators() {
List candidateGenerators = new ArrayList(4);
candidateGenerators.add(GeneratorType.RANDOM.ordinal(), new RandomCandidateGenerator());
@@ -370,6 +365,12 @@ public class StochasticLoadBalancer extends BaseLoadBalancer {
return true;
}
+ if (sloppyRegionServerExist(cs)) {
+ LOG.info("Running balancer because cluster has sloppy server(s)."+
+ " function cost={}", functionCost());
+ return true;
+ }
+
double total = 0.0;
for (CostFunction c : costFunctions) {
if (!c.isNeeded()) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java
index 6c779f692e1..05a0e8ed9b5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java
@@ -75,7 +75,6 @@ public class BalancerTestBase {
public static void beforeAllTests() throws Exception {
conf = HBaseConfiguration.create();
conf.setClass("hbase.util.ip.to.rack.determiner", MockMapping.class, DNSToSwitchMapping.class);
- conf.setFloat("hbase.regions.slop", 0.0f);
conf.setFloat("hbase.master.balancer.stochastic.localityCost", 0);
loadBalancer = new StochasticLoadBalancer(dummyMetricsStochasticBalancer);
MasterServices services = mock(MasterServices.class);
@@ -85,83 +84,122 @@ public class BalancerTestBase {
}
protected int[] largeCluster = new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 56 };
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 56 };
// int[testnum][servernumber] -> numregions
protected int[][] clusterStateMocks = new int[][]{
- // 1 node
- new int[]{0},
- new int[]{1},
- new int[]{10},
- // 2 node
- new int[]{0, 0},
- new int[]{2, 0},
- new int[]{2, 1},
- new int[]{2, 2},
- new int[]{2, 3},
- new int[]{2, 4},
- new int[]{1, 1},
- new int[]{0, 1},
- new int[]{10, 1},
- new int[]{514, 1432},
- new int[]{48, 53},
- // 3 node
- new int[]{0, 1, 2},
- new int[]{1, 2, 3},
- new int[]{0, 2, 2},
- new int[]{0, 3, 0},
- new int[]{0, 4, 0},
- new int[]{20, 20, 0},
- // 4 node
- new int[]{0, 1, 2, 3},
- new int[]{4, 0, 0, 0},
- new int[]{5, 0, 0, 0},
- new int[]{6, 6, 0, 0},
- new int[]{6, 2, 0, 0},
- new int[]{6, 1, 0, 0},
- new int[]{6, 0, 0, 0},
- new int[]{4, 4, 4, 7},
- new int[]{4, 4, 4, 8},
- new int[]{0, 0, 0, 7},
- // 5 node
- new int[]{1, 1, 1, 1, 4},
- // 6 nodes
- new int[]{1500, 500, 500, 500, 10, 0},
- new int[]{1500, 500, 500, 500, 500, 0},
- // more nodes
- new int[]{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
- new int[]{0, 0, 0, 0, 0, 0, 0, 0, 0, 10},
- new int[]{6, 6, 5, 6, 6, 6, 6, 6, 6, 1},
- new int[]{0, 0, 0, 0, 0, 0, 0, 0, 0, 54},
- new int[]{0, 0, 0, 0, 0, 0, 0, 0, 0, 55},
- new int[]{0, 0, 0, 0, 0, 0, 0, 0, 0, 56},
- new int[]{0, 0, 0, 0, 0, 0, 0, 0, 0, 16},
- new int[]{1, 1, 1, 1, 1, 1, 1, 1, 1, 8},
- new int[]{1, 1, 1, 1, 1, 1, 1, 1, 1, 9},
- new int[]{1, 1, 1, 1, 1, 1, 1, 1, 1, 10},
- new int[]{1, 1, 1, 1, 1, 1, 1, 1, 1, 123},
- new int[]{1, 1, 1, 1, 1, 1, 1, 1, 1, 155},
- new int[]{10, 7, 12, 8, 11, 10, 9, 14},
- new int[]{13, 14, 6, 10, 10, 10, 8, 10},
- new int[]{130, 14, 60, 10, 100, 10, 80, 10},
- new int[]{130, 140, 60, 100, 100, 100, 80, 100},
- new int[]{0, 5 , 5, 5, 5},
- largeCluster,
+ // 1 node
+ new int[]{0},
+ new int[]{1},
+ new int[]{10},
+ // 2 node
+ new int[]{0, 0},
+ new int[]{2, 0},
+ new int[]{2, 1},
+ new int[]{2, 2},
+ new int[]{2, 3},
+ new int[]{2, 4},
+ new int[]{1, 1},
+ new int[]{0, 1},
+ new int[]{10, 1},
+ new int[]{514, 1432},
+ new int[]{48, 53},
+ // 3 node
+ new int[]{0, 1, 2},
+ new int[]{1, 2, 3},
+ new int[]{0, 2, 2},
+ new int[]{0, 3, 0},
+ new int[]{0, 4, 0},
+ new int[]{20, 20, 0},
+ // 4 node
+ new int[]{0, 1, 2, 3},
+ new int[]{4, 0, 0, 0},
+ new int[]{5, 0, 0, 0},
+ new int[]{6, 6, 0, 0},
+ new int[]{6, 2, 0, 0},
+ new int[]{6, 1, 0, 0},
+ new int[]{6, 0, 0, 0},
+ new int[]{4, 4, 4, 7},
+ new int[]{4, 4, 4, 8},
+ new int[]{0, 0, 0, 7},
+ // 5 node
+ new int[]{1, 1, 1, 1, 4},
+ // 6 nodes
+ new int[]{1500, 500, 500, 500, 10, 0},
+ new int[]{1500, 500, 500, 500, 500, 0},
+ // more nodes
+ new int[]{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
+ new int[]{0, 0, 0, 0, 0, 0, 0, 0, 0, 10},
+ new int[]{6, 6, 5, 6, 6, 6, 6, 6, 6, 1},
+ new int[]{0, 0, 0, 0, 0, 0, 0, 0, 0, 54},
+ new int[]{0, 0, 0, 0, 0, 0, 0, 0, 0, 55},
+ new int[]{0, 0, 0, 0, 0, 0, 0, 0, 0, 56},
+ new int[]{0, 0, 0, 0, 0, 0, 0, 0, 0, 16},
+ new int[]{1, 1, 1, 1, 1, 1, 1, 1, 1, 8},
+ new int[]{1, 1, 1, 1, 1, 1, 1, 1, 1, 9},
+ new int[]{1, 1, 1, 1, 1, 1, 1, 1, 1, 10},
+ new int[]{1, 1, 1, 1, 1, 1, 1, 1, 1, 123},
+ new int[]{1, 1, 1, 1, 1, 1, 1, 1, 1, 155},
+ new int[]{10, 7, 12, 8, 11, 10, 9, 14},
+ new int[]{13, 14, 6, 10, 10, 10, 8, 10},
+ new int[]{130, 14, 60, 10, 100, 10, 80, 10},
+ new int[]{130, 140, 60, 100, 100, 100, 80, 100},
+ new int[]{0, 5 , 5, 5, 5},
+ largeCluster,
};
+ // int[testnum][servernumber] -> numregions
+ protected int[][] clusterStateMocksWithNoSlop = new int[][] {
+ // 1 node
+ new int[]{0},
+ new int[]{1},
+ new int[]{10},
+ // 2 node
+ new int[]{0, 0},
+ new int[]{2, 1},
+ new int[]{2, 2},
+ new int[]{2, 3},
+ new int[]{1, 1},
+ new int[]{80, 120},
+ new int[]{1428, 1432},
+ // more nodes
+ new int[]{100, 90, 120, 90, 110, 100, 90, 120},
+ };
+
+ // int[testnum][servernumber] -> numregions
+ protected int[][] clusterStateMocksWithSlop = new int[][] {
+ // 2 node
+ new int[]{1, 4},
+ new int[]{10, 20},
+ new int[]{80, 123},
+ // more nodes
+ new int[]{100, 100, 100, 100, 100, 100, 100, 100, 100, 200},
+ new int[] {
+ 10, 5, 5, 5, 5, 5, 5, 5, 5, 5
+ , 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
+ , 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
+ , 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
+ , 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
+ , 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
+ , 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
+ , 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
+ , 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
+ , 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
+ },
+ };
// This class is introduced because IP to rack resolution can be lengthy.
public static class MockMapping implements DNSToSwitchMapping {
@@ -380,6 +418,23 @@ public class BalancerTestBase {
map.put(sn, sal);
}
+ protected TreeMap> mockClusterServers(int[][] mockCluster) {
+ // dimension1: table, dimension2: regions per server
+ int numTables = mockCluster.length;
+ TreeMap> servers = new TreeMap<>();
+ for (int i = 0; i < numTables; i++) {
+ TableName tableName = TableName.valueOf("table" + i);
+ for (int j = 0; j < mockCluster[i].length; j++) {
+ ServerName serverName = ServerName.valueOf("server" + j, 1000, -1);
+ int numRegions = mockCluster[i][j];
+ List regions = createRegions(numRegions, tableName);
+ servers.putIfAbsent(serverName, new ArrayList<>());
+ servers.get(serverName).addAll(regions);
+ }
+ }
+ return servers;
+ }
+
protected TreeMap> mockClusterServers(int[] mockCluster) {
return mockClusterServers(mockCluster, -1);
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBalancerDecision.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBalancerDecision.java
index 74419951f28..76169066873 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBalancerDecision.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBalancerDecision.java
@@ -57,7 +57,9 @@ public class TestBalancerDecision extends BalancerTestBase {
conf.setBoolean("hbase.master.balancer.decision.buffer.enabled", true);
loadBalancer.onConfigurationChange(conf);
float minCost = conf.getFloat("hbase.master.balancer.stochastic.minCostNeedBalance", 0.05f);
+ float slop = conf.getFloat(HConstants.LOAD_BALANCER_SLOP_KEY, 0.2f);
conf.setFloat("hbase.master.balancer.stochastic.minCostNeedBalance", 1.0f);
+ conf.setFloat(HConstants.LOAD_BALANCER_SLOP_KEY, -1f);
try {
// Test with/without per table balancer.
boolean[] perTableBalancerConfigs = {true, false};
@@ -92,6 +94,7 @@ public class TestBalancerDecision extends BalancerTestBase {
// reset config
conf.unset(HConstants.HBASE_MASTER_LOADBALANCE_BYTABLE);
conf.setFloat("hbase.master.balancer.stochastic.minCostNeedBalance", minCost);
+ conf.setFloat(HConstants.LOAD_BALANCER_SLOP_KEY, slop);
loadBalancer.onConfigurationChange(conf);
}
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestSimpleLoadBalancer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestSimpleLoadBalancer.java
index ecad7a25503..2ae6b680d7a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestSimpleLoadBalancer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestSimpleLoadBalancer.java
@@ -225,28 +225,19 @@ public class TestSimpleLoadBalancer extends BalancerTestBase {
}
@Test
- public void testBalanceClusterOverallStrictly() throws Exception {
- int[] regionNumOfTable1PerServer = { 3, 3, 4, 4, 4, 4, 5, 5, 5 };
- int[] regionNumOfTable2PerServer = { 2, 2, 2, 2, 2, 2, 2, 2, 1 };
- TreeMap> serverRegionInfo = new TreeMap<>();
- List serverAndLoads = new ArrayList<>();
- for (int i = 0; i < regionNumOfTable1PerServer.length; i++) {
- ServerName serverName = ServerName.valueOf("server" + i, 1000, -1);
- List regions1 =
- createRegions(regionNumOfTable1PerServer[i], TableName.valueOf("table1"));
- List regions2 =
- createRegions(regionNumOfTable2PerServer[i], TableName.valueOf("table2"));
- regions1.addAll(regions2);
- serverRegionInfo.put(serverName, regions1);
- ServerAndLoad serverAndLoad = new ServerAndLoad(serverName,
- regionNumOfTable1PerServer[i] + regionNumOfTable2PerServer[i]);
- serverAndLoads.add(serverAndLoad);
- }
- HashMap>> LoadOfAllTable =
+ public void testBalanceClusterOverallStrictly() {
+ int[][] regionsPerServerPerTable = new int[][]{
+ new int[]{ 3, 3, 4, 4, 4, 4, 5, 5, 5 },
+ new int[]{ 2, 2, 2, 2, 2, 2, 2, 2, 1 },
+ };
+ TreeMap> serverRegionInfo =
+ mockClusterServers(regionsPerServerPerTable);
+ List serverAndLoads = convertToList(serverRegionInfo);
+ Map>> loadOfAllTable =
mockClusterServersWithTables(serverRegionInfo);
- loadBalancer.setClusterLoad((Map) LoadOfAllTable);
- List partialplans = loadBalancer.balanceTable(TableName.valueOf("table1"),
- LoadOfAllTable.get(TableName.valueOf("table1")));
+ loadBalancer.setClusterLoad((Map) loadOfAllTable);
+ List partialplans = loadBalancer.balanceTable(TableName.valueOf("table0"),
+ loadOfAllTable.get(TableName.valueOf("table0")));
List balancedServerLoads =
reconcile(serverAndLoads, partialplans, serverRegionInfo);
for (ServerAndLoad serverAndLoad : balancedServerLoads) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java
index 620dd033735..14b6b0ea75a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hbase.master.balancer;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
@@ -256,7 +257,9 @@ public class TestStochasticLoadBalancer extends BalancerTestBase {
@Test
public void testNeedBalance() {
float minCost = conf.getFloat("hbase.master.balancer.stochastic.minCostNeedBalance", 0.05f);
+ float slop = conf.getFloat(HConstants.LOAD_BALANCER_SLOP_KEY, 0.2f);
conf.setFloat("hbase.master.balancer.stochastic.minCostNeedBalance", 1.0f);
+ conf.setFloat(HConstants.LOAD_BALANCER_SLOP_KEY, -1f);
try {
// Test with/without per table balancer.
boolean[] perTableBalancerConfigs = {true, false};
@@ -265,22 +268,80 @@ public class TestStochasticLoadBalancer extends BalancerTestBase {
loadBalancer.onConfigurationChange(conf);
for (int[] mockCluster : clusterStateMocks) {
- Map> servers = mockClusterServers(mockCluster);
- Map>> LoadOfAllTable =
- (Map) mockClusterServersWithTables(servers);
- List plans = loadBalancer.balanceCluster(LoadOfAllTable);
- boolean emptyPlans = plans == null || plans.isEmpty();
- assertTrue(emptyPlans || needsBalanceIdleRegion(mockCluster));
+ assertTrue(hasEmptyBalancerPlans(mockCluster) || needsBalanceIdleRegion(mockCluster));
}
}
} finally {
// reset config
conf.unset(HConstants.HBASE_MASTER_LOADBALANCE_BYTABLE);
conf.setFloat("hbase.master.balancer.stochastic.minCostNeedBalance", minCost);
+ conf.setFloat(HConstants.LOAD_BALANCER_SLOP_KEY, slop);
loadBalancer.onConfigurationChange(conf);
}
}
+ @Test
+ public void testBalanceOfSloppyServers() {
+ float minCost = conf.getFloat("hbase.master.balancer.stochastic.minCostNeedBalance", 0.025f);
+ try {
+ // We are testing slop checks, so don't "accidentally" balance due to a minCost calculation.
+ // During development, imbalance of a 100 server cluster, with one node having 10 regions
+ // and the rest having 5, is 0.0048. With minCostNeedBalance default of 0.025, test should
+ // validate slop checks without this override. We override just to ensure we will always
+ // validate slop check here, and for small clusters as well.
+ conf.setFloat("hbase.master.balancer.stochastic.minCostNeedBalance", 1.0f);
+ loadBalancer.onConfigurationChange(conf);
+ for (int[] mockCluster : clusterStateMocksWithNoSlop) {
+ assertTrue(hasEmptyBalancerPlans(mockCluster));
+ }
+ for (int[] mockCluster : clusterStateMocksWithSlop) {
+ assertFalse(hasEmptyBalancerPlans(mockCluster));
+ }
+ } finally {
+ // reset config
+ conf.setFloat("hbase.master.balancer.stochastic.minCostNeedBalance", minCost);
+ loadBalancer.onConfigurationChange(conf);
+ }
+ }
+
+ @Test
+ public void testSloppyTablesLoadBalanceByTable() {
+ int[][] regionsPerServerPerTable = new int[][] {
+ new int[] { 8, 2, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
+ , 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5},
+ new int[] { 2, 8, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
+ , 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5},
+ };
+ float minCost = conf.getFloat("hbase.master.balancer.stochastic.minCostNeedBalance", 0.025f);
+ try {
+ conf.setFloat("hbase.master.balancer.stochastic.minCostNeedBalance", 1.0f);
+ conf.setBoolean(HConstants.HBASE_MASTER_LOADBALANCE_BYTABLE, true);
+ loadBalancer.onConfigurationChange(conf);
+ assertFalse(hasEmptyBalancerPlans(regionsPerServerPerTable));
+ } finally {
+ conf.setFloat("hbase.master.balancer.stochastic.minCostNeedBalance", minCost);
+ conf.unset(HConstants.HBASE_MASTER_LOADBALANCE_BYTABLE);
+ loadBalancer.onConfigurationChange(conf);
+ }
+ }
+
+ private boolean hasEmptyBalancerPlans(int[] mockCluster) {
+ Map> servers = mockClusterServers(mockCluster);
+ return hasEmptyBalancerPlans(servers);
+ }
+
+ private boolean hasEmptyBalancerPlans(int[][] mockCluster) {
+ Map> servers = mockClusterServers(mockCluster);
+ return hasEmptyBalancerPlans(servers);
+ }
+
+ private boolean hasEmptyBalancerPlans(Map> servers) {
+ Map>> loadOfAllTable =
+ (Map) mockClusterServersWithTables(servers);
+ List plans = loadBalancer.balanceCluster(loadOfAllTable);
+ return plans == null || plans.isEmpty();
+ }
+
@Test
public void testLocalityCost() throws Exception {
Configuration conf = HBaseConfiguration.create();