HBASE-17706 TableSkewCostFunction improperly computes max skew - revert due to test failure
This commit is contained in:
parent
7f0e6f1c9e
commit
a69c23abfe
|
@ -739,15 +739,18 @@ public abstract class BaseLoadBalancer implements LoadBalancer {
|
||||||
}
|
}
|
||||||
numRegionsPerServerPerTable[newServer][tableIndex]++;
|
numRegionsPerServerPerTable[newServer][tableIndex]++;
|
||||||
|
|
||||||
// if old server had max num regions, assume (for now)
|
//check whether this caused maxRegionsPerTable in the new Server to be updated
|
||||||
// max num regions went down since we moved the region
|
if (numRegionsPerServerPerTable[newServer][tableIndex] > numMaxRegionsPerTable[tableIndex]) {
|
||||||
if (oldServer >= 0 &&
|
numMaxRegionsPerTable[tableIndex] = numRegionsPerServerPerTable[newServer][tableIndex];
|
||||||
(numRegionsPerServerPerTable[oldServer][tableIndex] + 1) == numMaxRegionsPerTable[tableIndex]) {
|
} else if (oldServer >= 0 && (numRegionsPerServerPerTable[oldServer][tableIndex] + 1)
|
||||||
numMaxRegionsPerTable[tableIndex]--;
|
== numMaxRegionsPerTable[tableIndex]) {
|
||||||
|
//recompute maxRegionsPerTable since the previous value was coming from the old server
|
||||||
|
for (int serverIndex = 0 ; serverIndex < numRegionsPerServerPerTable.length; serverIndex++) {
|
||||||
|
if (numRegionsPerServerPerTable[serverIndex][tableIndex] > numMaxRegionsPerTable[tableIndex]) {
|
||||||
|
numMaxRegionsPerTable[tableIndex] = numRegionsPerServerPerTable[serverIndex][tableIndex];
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// Now check if new server sets new max
|
|
||||||
numMaxRegionsPerTable[tableIndex] =
|
|
||||||
Math.max(numMaxRegionsPerTable[tableIndex], numRegionsPerServerPerTable[newServer][tableIndex]);
|
|
||||||
|
|
||||||
// update for servers
|
// update for servers
|
||||||
int primary = regionIndexToPrimaryIndex[region];
|
int primary = regionIndexToPrimaryIndex[region];
|
||||||
|
|
|
@ -272,6 +272,14 @@ public class StochasticLoadBalancer extends BaseLoadBalancer {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected boolean needsBalance(Cluster cluster) {
|
protected boolean needsBalance(Cluster cluster) {
|
||||||
|
ClusterLoadState cs = new ClusterLoadState(cluster.clusterState);
|
||||||
|
if (cs.getNumServers() < MIN_SERVER_BALANCE) {
|
||||||
|
if (LOG.isDebugEnabled()) {
|
||||||
|
LOG.debug("Not running balancer because only " + cs.getNumServers()
|
||||||
|
+ " active regionserver(s)");
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
if (areSomeRegionReplicasColocated(cluster)) {
|
if (areSomeRegionReplicasColocated(cluster)) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -298,17 +306,6 @@ public class StochasticLoadBalancer extends BaseLoadBalancer {
|
||||||
+ minCostNeedBalance);
|
+ minCostNeedBalance);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
ClusterLoadState cs = new ClusterLoadState(cluster.clusterState);
|
|
||||||
if (cs.getNumServers() < MIN_SERVER_BALANCE) {
|
|
||||||
if (LOG.isDebugEnabled()) {
|
|
||||||
LOG.debug("Not running balancer because only " + cs.getNumServers()
|
|
||||||
+ " active regionserver(s)");
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -50,7 +50,6 @@ import org.apache.hadoop.hbase.master.RegionPlan;
|
||||||
import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster;
|
import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster;
|
||||||
import org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer.CandidateGenerator;
|
import org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer.CandidateGenerator;
|
||||||
import org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer.TableSkewCandidateGenerator;
|
import org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer.TableSkewCandidateGenerator;
|
||||||
import org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer.LoadCandidateGenerator;
|
|
||||||
import org.apache.hadoop.hbase.testclassification.FlakeyTests;
|
import org.apache.hadoop.hbase.testclassification.FlakeyTests;
|
||||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
@ -238,33 +237,6 @@ public class TestStochasticLoadBalancer extends BalancerTestBase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testTableSkewCostProperlyDecreases() {
|
|
||||||
int replication = 1;
|
|
||||||
Configuration conf = HBaseConfiguration.create();
|
|
||||||
StochasticLoadBalancer.CostFunction
|
|
||||||
costFunction = new StochasticLoadBalancer.TableSkewCostFunction(conf);
|
|
||||||
CandidateGenerator generator = new LoadCandidateGenerator();
|
|
||||||
// Start out with 100 regions on one server and 0 regions on the other
|
|
||||||
int numNodes = 2;
|
|
||||||
int numTables = 1;
|
|
||||||
int numRegions = 100;
|
|
||||||
int numRegionsPerServer = 0;
|
|
||||||
|
|
||||||
Map<ServerName, List<HRegionInfo>> serverMap = createServerMap(numNodes, numRegions, numRegionsPerServer, replication, numTables);
|
|
||||||
BaseLoadBalancer.Cluster cluster = new Cluster(serverMap, null, null, null);
|
|
||||||
costFunction.init(cluster);
|
|
||||||
double cost = costFunction.cost();
|
|
||||||
assertEquals(1.0, cost, .0001);
|
|
||||||
for (int i = 0; i < 100; i++) {
|
|
||||||
Cluster.Action action = generator.generate(cluster);
|
|
||||||
cluster.doAction(action);
|
|
||||||
costFunction.postAction(action);
|
|
||||||
cost = costFunction.cost();
|
|
||||||
}
|
|
||||||
assertTrue(cost < 0.5);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testRegionLoadCost() {
|
public void testRegionLoadCost() {
|
||||||
List<BalancerRegionLoad> regionLoads = new ArrayList<>();
|
List<BalancerRegionLoad> regionLoads = new ArrayList<>();
|
||||||
|
|
Loading…
Reference in New Issue