HBASE-25757 Move BaseLoadBalancer to hbase-balancer module (#3191)
Signed-off-by: Yulin Niu <niuyulin@apache.org>
This commit is contained in:
parent
f36e153964
commit
a4d954e606
|
@ -133,6 +133,11 @@
|
|||
<artifactId>log4j-slf4j-impl</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.logging.log4j</groupId>
|
||||
<artifactId>log4j-1.2-api</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<profiles>
|
||||
|
|
|
@ -75,7 +75,7 @@ public class FavoredNodesPlan {
|
|||
}
|
||||
|
||||
/**
|
||||
* @return the list of favored region server for this region based on the plan
|
||||
* Returns the list of favored region server for this region based on the plan
|
||||
*/
|
||||
public List<ServerName> getFavoredNodes(RegionInfo region) {
|
||||
return favoredNodesMap.get(region.getRegionNameAsString());
|
||||
|
|
|
@ -30,21 +30,19 @@ import org.apache.hadoop.hbase.Stoppable;
|
|||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||
import org.apache.hadoop.hbase.conf.ConfigurationObserver;
|
||||
import org.apache.hadoop.hbase.master.balancer.ClusterInfoProvider;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
/**
|
||||
* Makes decisions about the placement and movement of Regions across
|
||||
* RegionServers.
|
||||
*
|
||||
* <p>Cluster-wide load balancing will occur only when there are no regions in
|
||||
* transition and according to a fixed period of a time using {@link #balanceCluster(Map)}.
|
||||
*
|
||||
* <p>On cluster startup, bulk assignment can be used to determine
|
||||
* locations for all Regions in a cluster.
|
||||
*
|
||||
* <p>This class produces plans for the
|
||||
* {@link org.apache.hadoop.hbase.master.assignment.AssignmentManager}
|
||||
* to execute.
|
||||
* Makes decisions about the placement and movement of Regions across RegionServers.
|
||||
* <p/>
|
||||
* Cluster-wide load balancing will occur only when there are no regions in transition and according
|
||||
* to a fixed period of a time using {@link #balanceCluster(Map)}.
|
||||
* <p/>
|
||||
* On cluster startup, bulk assignment can be used to determine locations for all Regions in a
|
||||
* cluster.
|
||||
* <p/>
|
||||
* This class produces plans for the {@code AssignmentManager} to execute.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public interface LoadBalancer extends Configurable, Stoppable, ConfigurationObserver {
|
||||
|
@ -69,15 +67,14 @@ public interface LoadBalancer extends Configurable, Stoppable, ConfigurationObse
|
|||
|
||||
|
||||
/**
|
||||
* Set the master service.
|
||||
* Set the cluster info provider. Usually it is just a wrapper of master.
|
||||
*/
|
||||
void setMasterServices(MasterServices masterServices);
|
||||
void setClusterInfoProvider(ClusterInfoProvider provider);
|
||||
|
||||
/**
|
||||
* Perform the major balance operation for cluster, will invoke {@link #balanceTable} to do actual
|
||||
* balance. Normally not need override this method, except
|
||||
* {@link org.apache.hadoop.hbase.master.balancer.SimpleLoadBalancer} and
|
||||
* {@link org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer}
|
||||
* balance. Normally not need override this method, except {@link SimpleLoadBalancer} and
|
||||
* {@code RSGroupBasedLoadBalancer}
|
||||
* @param loadOfAllTable region load of servers for all table
|
||||
* @return a list of regions to be moved, including source and destination, or null if cluster is
|
||||
* already balanced
|
|
@ -30,18 +30,14 @@ import java.util.Random;
|
|||
import java.util.Set;
|
||||
import java.util.TreeMap;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.function.Predicate;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.ClusterMetrics;
|
||||
import org.apache.hadoop.hbase.HBaseIOException;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.ServerMetrics;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||
import org.apache.hadoop.hbase.master.LoadBalancer;
|
||||
import org.apache.hadoop.hbase.master.MasterServices;
|
||||
import org.apache.hadoop.hbase.master.RackManager;
|
||||
import org.apache.hadoop.hbase.master.RegionPlan;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
@ -55,10 +51,8 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
|
|||
|
||||
/**
|
||||
* The base class for load balancers. It provides the the functions used to by
|
||||
* {@link org.apache.hadoop.hbase.master.assignment.AssignmentManager} to assign regions
|
||||
* in the edge cases. It doesn't provide an implementation of the
|
||||
* actual balancing algorithm.
|
||||
*
|
||||
* {@code AssignmentManager} to assign regions in the edge cases. It doesn't provide an
|
||||
* implementation of the actual balancing algorithm.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public abstract class BaseLoadBalancer implements LoadBalancer {
|
||||
|
@ -72,9 +66,6 @@ public abstract class BaseLoadBalancer implements LoadBalancer {
|
|||
protected static final int MIN_SERVER_BALANCE = 2;
|
||||
private volatile boolean stopped = false;
|
||||
|
||||
private static final Predicate<ServerMetrics> IDLE_SERVER_PREDICATOR
|
||||
= load -> load.getRegionMetrics().isEmpty();
|
||||
|
||||
protected RegionHDFSBlockLocationFinder regionFinder;
|
||||
protected boolean useRegionFinder;
|
||||
protected boolean isByTable = false;
|
||||
|
@ -88,7 +79,8 @@ public abstract class BaseLoadBalancer implements LoadBalancer {
|
|||
protected MetricsBalancer metricsBalancer = null;
|
||||
protected ClusterMetrics clusterStatus = null;
|
||||
protected ServerName masterServerName;
|
||||
protected MasterServices services;
|
||||
protected ClusterInfoProvider provider;
|
||||
|
||||
/**
|
||||
* The constructor that uses the basic MetricsBalancer
|
||||
*/
|
||||
|
@ -151,24 +143,17 @@ public abstract class BaseLoadBalancer implements LoadBalancer {
|
|||
|
||||
|
||||
@Override
|
||||
public void setMasterServices(MasterServices masterServices) {
|
||||
masterServerName = masterServices.getServerName();
|
||||
this.services = masterServices;
|
||||
public void setClusterInfoProvider(ClusterInfoProvider provider) {
|
||||
this.provider = provider;
|
||||
if (useRegionFinder) {
|
||||
this.regionFinder.setClusterInfoProvider(new MasterClusterInfoProvider(services));
|
||||
this.regionFinder.setClusterInfoProvider(provider);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void postMasterStartupInitialize() {
|
||||
if (services != null && regionFinder != null) {
|
||||
try {
|
||||
Set<RegionInfo> regions =
|
||||
services.getAssignmentManager().getRegionStates().getRegionAssignments().keySet();
|
||||
regionFinder.refreshAndWait(regions);
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Refreshing region HDFS Block dist failed with exception, ignoring", e);
|
||||
}
|
||||
if (provider != null && regionFinder != null) {
|
||||
regionFinder.refreshAndWait(provider.getAssignedRegions());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -277,22 +262,15 @@ public abstract class BaseLoadBalancer implements LoadBalancer {
|
|||
BalancerClusterState cluster = createCluster(servers, regions);
|
||||
Map<ServerName, List<RegionInfo>> assignments = new HashMap<>();
|
||||
roundRobinAssignment(cluster, regions, servers, assignments);
|
||||
return assignments;
|
||||
return Collections.unmodifiableMap(assignments);
|
||||
}
|
||||
|
||||
private BalancerClusterState createCluster(List<ServerName> servers,
|
||||
Collection<RegionInfo> regions) throws HBaseIOException {
|
||||
boolean hasRegionReplica = false;
|
||||
boolean hasRegionReplica= false;
|
||||
try {
|
||||
if (services != null && services.getTableDescriptors() != null) {
|
||||
Map<String, TableDescriptor> tds = services.getTableDescriptors().getAll();
|
||||
for (RegionInfo regionInfo : regions) {
|
||||
TableDescriptor td = tds.get(regionInfo.getTable().getNameWithNamespaceInclAsString());
|
||||
if (td != null && td.getRegionReplication() > 1) {
|
||||
hasRegionReplica = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (provider != null) {
|
||||
hasRegionReplica = provider.hasRegionReplica(regions);
|
||||
}
|
||||
} catch (IOException ioe) {
|
||||
throw new HBaseIOException(ioe);
|
||||
|
@ -320,8 +298,8 @@ public abstract class BaseLoadBalancer implements LoadBalancer {
|
|||
}
|
||||
|
||||
private List<ServerName> findIdleServers(List<ServerName> servers) {
|
||||
return this.services.getServerManager()
|
||||
.getOnlineServersListWithPredicator(servers, IDLE_SERVER_PREDICATOR);
|
||||
return provider.getOnlineServersListWithPredicator(servers,
|
||||
metrics -> metrics.getRegionMetrics().isEmpty());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -474,7 +452,7 @@ public abstract class BaseLoadBalancer implements LoadBalancer {
|
|||
|
||||
LOG.info("Reassigned " + regions.size() + " regions. " + numRetainedAssigments
|
||||
+ " retained the pre-restart assignment. " + randomAssignMsg);
|
||||
return assignments;
|
||||
return Collections.unmodifiableMap(assignments);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -503,6 +481,7 @@ public abstract class BaseLoadBalancer implements LoadBalancer {
|
|||
/**
|
||||
* Updates the balancer status tag reported to JMX
|
||||
*/
|
||||
@Override
|
||||
public void updateBalancerStatus(boolean status) {
|
||||
metricsBalancer.balancerStatus(status);
|
||||
}
|
||||
|
@ -607,13 +586,11 @@ public abstract class BaseLoadBalancer implements LoadBalancer {
|
|||
}
|
||||
}
|
||||
|
||||
private Map<ServerName, List<RegionInfo>> getRegionAssignmentsByServer(
|
||||
Collection<RegionInfo> regions) {
|
||||
if (this.services != null && this.services.getAssignmentManager() != null) {
|
||||
return this.services.getAssignmentManager().getSnapShotOfAssignment(regions);
|
||||
} else {
|
||||
return new HashMap<>();
|
||||
}
|
||||
// return a modifiable map, as we may add more entries into the returned map.
|
||||
private Map<ServerName, List<RegionInfo>>
|
||||
getRegionAssignmentsByServer(Collection<RegionInfo> regions) {
|
||||
return provider != null ? new HashMap<>(provider.getSnapShotOfAssignment(regions)) :
|
||||
new HashMap<>();
|
||||
}
|
||||
|
||||
private Map<ServerName, List<RegionInfo>> toEnsumbleTableLoad(
|
|
@ -35,16 +35,12 @@ abstract class CandidateGenerator {
|
|||
* From a list of regions pick a random one. Null can be returned which
|
||||
* {@link StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region move
|
||||
* rather than swap.
|
||||
*
|
||||
* @param cluster The state of the cluster
|
||||
* @param server index of the server
|
||||
* @param chanceOfNoSwap Chance that this will decide to try a move rather
|
||||
* than a swap.
|
||||
* @return a random {@link RegionInfo} or null if an asymmetrical move is
|
||||
* suggested.
|
||||
* @param chanceOfNoSwap Chance that this will decide to try a move rather than a swap.
|
||||
* @return a random {@link RegionInfo} or null if an asymmetrical move is suggested.
|
||||
*/
|
||||
int pickRandomRegion(BalancerClusterState cluster, int server,
|
||||
double chanceOfNoSwap) {
|
||||
int pickRandomRegion(BalancerClusterState cluster, int server, double chanceOfNoSwap) {
|
||||
// Check to see if this is just a move.
|
||||
if (cluster.regionsPerServer[server].length == 0
|
||||
|| ThreadLocalRandom.current().nextFloat() < chanceOfNoSwap) {
|
||||
|
@ -95,8 +91,7 @@ abstract class CandidateGenerator {
|
|||
}
|
||||
}
|
||||
|
||||
BalanceAction pickRandomRegions(BalancerClusterState cluster,
|
||||
int thisServer, int otherServer) {
|
||||
BalanceAction pickRandomRegions(BalancerClusterState cluster, int thisServer, int otherServer) {
|
||||
if (thisServer < 0 || otherServer < 0) {
|
||||
return BalanceAction.NULL_ACTION;
|
||||
}
|
||||
|
@ -115,14 +110,12 @@ abstract class CandidateGenerator {
|
|||
return getAction(thisServer, thisRegion, otherServer, otherRegion);
|
||||
}
|
||||
|
||||
protected BalanceAction getAction(int fromServer, int fromRegion,
|
||||
int toServer, int toRegion) {
|
||||
protected BalanceAction getAction(int fromServer, int fromRegion, int toServer, int toRegion) {
|
||||
if (fromServer < 0 || toServer < 0) {
|
||||
return BalanceAction.NULL_ACTION;
|
||||
}
|
||||
if (fromRegion >= 0 && toRegion >= 0) {
|
||||
return new SwapRegionsAction(fromServer, fromRegion,
|
||||
toServer, toRegion);
|
||||
return new SwapRegionsAction(fromServer, fromRegion, toServer, toRegion);
|
||||
} else if (fromRegion >= 0) {
|
||||
return new MoveRegionAction(fromRegion, fromServer, toServer);
|
||||
} else if (toRegion >= 0) {
|
|
@ -18,9 +18,14 @@
|
|||
package org.apache.hadoop.hbase.master.balancer;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.function.Predicate;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
|
||||
import org.apache.hadoop.hbase.ServerMetrics;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||
|
@ -45,6 +50,11 @@ public interface ClusterInfoProvider {
|
|||
*/
|
||||
TableDescriptor getTableDescriptor(TableName tableName) throws IOException;
|
||||
|
||||
/**
|
||||
* Returns the number of tables on this cluster.
|
||||
*/
|
||||
int getNumberOfTables() throws IOException;
|
||||
|
||||
/**
|
||||
* Compute the block distribution for the given region.
|
||||
* <p/>
|
||||
|
@ -52,4 +62,20 @@ public interface ClusterInfoProvider {
|
|||
*/
|
||||
HDFSBlocksDistribution computeHDFSBlocksDistribution(Configuration conf,
|
||||
TableDescriptor tableDescriptor, RegionInfo regionInfo) throws IOException;
|
||||
|
||||
/**
|
||||
* Check whether we have region replicas enabled for the tables of the given regions.
|
||||
*/
|
||||
boolean hasRegionReplica(Collection<RegionInfo> regions) throws IOException;
|
||||
|
||||
/**
|
||||
* Returns a copy of the internal list of online servers matched by the given {@code filter}.
|
||||
*/
|
||||
List<ServerName> getOnlineServersListWithPredicator(List<ServerName> servers,
|
||||
Predicate<ServerMetrics> filter);
|
||||
|
||||
/**
|
||||
* Get a snapshot of the current assignment status.
|
||||
*/
|
||||
Map<ServerName, List<RegionInfo>> getSnapShotOfAssignment(Collection<RegionInfo> regions);
|
||||
}
|
||||
|
|
|
@ -36,8 +36,8 @@ class LocalityBasedCandidateGenerator extends CandidateGenerator {
|
|||
int currentServer = cluster.regionIndexToServerIndex[region];
|
||||
if (currentServer != cluster.getOrComputeRegionsToMostLocalEntities(
|
||||
BalancerClusterState.LocalityType.SERVER)[region]) {
|
||||
Optional<BalanceAction> potential = tryMoveOrSwap(cluster,
|
||||
currentServer, region, cluster.getOrComputeRegionsToMostLocalEntities(
|
||||
Optional<BalanceAction> potential = tryMoveOrSwap(cluster, currentServer, region,
|
||||
cluster.getOrComputeRegionsToMostLocalEntities(
|
||||
BalancerClusterState.LocalityType.SERVER)[region]);
|
||||
if (potential.isPresent()) {
|
||||
return potential.get();
|
||||
|
@ -48,16 +48,16 @@ class LocalityBasedCandidateGenerator extends CandidateGenerator {
|
|||
return BalanceAction.NULL_ACTION;
|
||||
}
|
||||
|
||||
private Optional<BalanceAction> tryMoveOrSwap(BalancerClusterState cluster,
|
||||
int fromServer, int fromRegion, int toServer) {
|
||||
private Optional<BalanceAction> tryMoveOrSwap(BalancerClusterState cluster, int fromServer,
|
||||
int fromRegion, int toServer) {
|
||||
// Try move first. We know apriori fromRegion has the highest locality on toServer
|
||||
if (cluster.serverHasTooFewRegions(toServer)) {
|
||||
return Optional.of(getAction(fromServer, fromRegion, toServer, -1));
|
||||
}
|
||||
// Compare locality gain/loss from swapping fromRegion with regions on toServer
|
||||
double fromRegionLocalityDelta = getWeightedLocality(cluster, fromRegion, toServer)
|
||||
- getWeightedLocality(cluster, fromRegion, fromServer);
|
||||
int toServertotalRegions = cluster.regionsPerServer[toServer].length;
|
||||
double fromRegionLocalityDelta = getWeightedLocality(cluster, fromRegion, toServer) -
|
||||
getWeightedLocality(cluster, fromRegion, fromServer);
|
||||
int toServertotalRegions = cluster.regionsPerServer[toServer].length;
|
||||
if (toServertotalRegions > 0) {
|
||||
int startIndex = ThreadLocalRandom.current().nextInt(toServertotalRegions);
|
||||
for (int i = 0; i < toServertotalRegions; i++) {
|
|
@ -0,0 +1,35 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.master.balancer;
|
||||
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
@InterfaceAudience.Private
|
||||
class RandomCandidateGenerator extends CandidateGenerator {
|
||||
|
||||
@Override
|
||||
BalanceAction generate(BalancerClusterState cluster) {
|
||||
|
||||
int thisServer = pickRandomServer(cluster);
|
||||
|
||||
// Pick the other server
|
||||
int otherServer = pickOtherRandomServer(cluster, thisServer);
|
||||
|
||||
return pickRandomRegions(cluster, thisServer, otherServer);
|
||||
}
|
||||
}
|
|
@ -22,8 +22,8 @@ import java.util.concurrent.ThreadLocalRandom;
|
|||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
/**
|
||||
* Generates candidates which moves the replicas out of the region server for
|
||||
* co-hosted region replicas
|
||||
* Generates candidates which moves the replicas out of the region server for co-hosted region
|
||||
* replicas
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
class RegionReplicaCandidateGenerator extends CandidateGenerator {
|
||||
|
@ -31,17 +31,16 @@ class RegionReplicaCandidateGenerator extends CandidateGenerator {
|
|||
protected final RandomCandidateGenerator randomGenerator = new RandomCandidateGenerator();
|
||||
|
||||
/**
|
||||
* Randomly select one regionIndex out of all region replicas co-hosted in the same group
|
||||
* (a group is a server, host or rack)
|
||||
*
|
||||
* Randomly select one regionIndex out of all region replicas co-hosted in the same group (a group
|
||||
* is a server, host or rack)
|
||||
* @param primariesOfRegionsPerGroup either Cluster.primariesOfRegionsPerServer,
|
||||
* primariesOfRegionsPerHost or primariesOfRegionsPerRack
|
||||
* primariesOfRegionsPerHost or primariesOfRegionsPerRack
|
||||
* @param regionsPerGroup either Cluster.regionsPerServer, regionsPerHost or regionsPerRack
|
||||
* @param regionIndexToPrimaryIndex Cluster.regionsIndexToPrimaryIndex
|
||||
* @return a regionIndex for the selected primary or -1 if there is no co-locating
|
||||
*/
|
||||
int selectCoHostedRegionPerGroup(int[] primariesOfRegionsPerGroup, int[] regionsPerGroup,
|
||||
int[] regionIndexToPrimaryIndex) {
|
||||
int[] regionIndexToPrimaryIndex) {
|
||||
int currentPrimary = -1;
|
||||
int currentPrimaryIndex = -1;
|
||||
int selectedPrimaryIndex = -1;
|
||||
|
@ -50,8 +49,7 @@ class RegionReplicaCandidateGenerator extends CandidateGenerator {
|
|||
// ids for the regions hosted in server, a consecutive repetition means that replicas
|
||||
// are co-hosted
|
||||
for (int j = 0; j <= primariesOfRegionsPerGroup.length; j++) {
|
||||
int primary = j < primariesOfRegionsPerGroup.length
|
||||
? primariesOfRegionsPerGroup[j] : -1;
|
||||
int primary = j < primariesOfRegionsPerGroup.length ? primariesOfRegionsPerGroup[j] : -1;
|
||||
if (primary != currentPrimary) { // check for whether we see a new primary
|
||||
int numReplicas = j - currentPrimaryIndex;
|
||||
if (numReplicas > 1) { // means consecutive primaries, indicating co-location
|
||||
|
@ -89,10 +87,8 @@ class RegionReplicaCandidateGenerator extends CandidateGenerator {
|
|||
return BalanceAction.NULL_ACTION;
|
||||
}
|
||||
|
||||
int regionIndex = selectCoHostedRegionPerGroup(
|
||||
cluster.primariesOfRegionsPerServer[serverIndex],
|
||||
cluster.regionsPerServer[serverIndex],
|
||||
cluster.regionIndexToPrimaryIndex);
|
||||
int regionIndex = selectCoHostedRegionPerGroup(cluster.primariesOfRegionsPerServer[serverIndex],
|
||||
cluster.regionsPerServer[serverIndex], cluster.regionIndexToPrimaryIndex);
|
||||
|
||||
// if there are no pairs of region replicas co-hosted, default to random generator
|
||||
if (regionIndex == -1) {
|
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.master.balancer;
|
||||
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
/**
|
||||
* Generates candidates which moves the replicas out of the rack for co-hosted region replicas in
|
||||
* the same rack
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
class RegionReplicaRackCandidateGenerator extends RegionReplicaCandidateGenerator {
|
||||
@Override
|
||||
BalanceAction generate(BalancerClusterState cluster) {
|
||||
int rackIndex = pickRandomRack(cluster);
|
||||
if (cluster.numRacks <= 1 || rackIndex == -1) {
|
||||
return super.generate(cluster);
|
||||
}
|
||||
|
||||
int regionIndex = selectCoHostedRegionPerGroup(cluster.primariesOfRegionsPerRack[rackIndex],
|
||||
cluster.regionsPerRack[rackIndex], cluster.regionIndexToPrimaryIndex);
|
||||
|
||||
// if there are no pairs of region replicas co-hosted, default to random generator
|
||||
if (regionIndex == -1) {
|
||||
// default to randompicker
|
||||
return randomGenerator.generate(cluster);
|
||||
}
|
||||
|
||||
int serverIndex = cluster.regionIndexToServerIndex[regionIndex];
|
||||
int toRackIndex = pickOtherRandomRack(cluster, rackIndex);
|
||||
|
||||
int rand = ThreadLocalRandom.current().nextInt(cluster.serversPerRack[toRackIndex].length);
|
||||
int toServerIndex = cluster.serversPerRack[toRackIndex][rand];
|
||||
int toRegionIndex = pickRandomRegion(cluster, toServerIndex, 0.9f);
|
||||
return getAction(serverIndex, regionIndex, toServerIndex, toRegionIndex);
|
||||
}
|
||||
}
|
|
@ -48,8 +48,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.MinMaxPriorityQueue
|
|||
* On cluster startup, bulk assignment can be used to determine locations for all Regions in a
|
||||
* cluster.
|
||||
* <p/>
|
||||
* This classes produces plans for the
|
||||
* {@link org.apache.hadoop.hbase.master.assignment.AssignmentManager} to execute.
|
||||
* This classes produces plans for the {@code AssignmentManager} to execute.
|
||||
*/
|
||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
|
||||
public class SimpleLoadBalancer extends BaseLoadBalancer {
|
||||
|
@ -313,7 +312,7 @@ public class SimpleLoadBalancer extends BaseLoadBalancer {
|
|||
}
|
||||
}
|
||||
serverBalanceInfo.put(sal.getServerName(),
|
||||
new BalanceInfo(numToOffload, (-1)*numTaken, server.getValue()));
|
||||
new BalanceInfo(numToOffload, -numTaken, server.getValue()));
|
||||
}
|
||||
int totalNumMoved = regionsToMove.size();
|
||||
|
|
@ -17,28 +17,24 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.master.balancer;
|
||||
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.util.ArrayDeque;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.NavigableSet;
|
||||
import java.util.Queue;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
import java.util.SortedSet;
|
||||
import java.util.TreeMap;
|
||||
import java.util.TreeSet;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||
|
@ -49,7 +45,6 @@ import org.apache.hadoop.hbase.master.RegionPlan;
|
|||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.net.DNSToSwitchMapping;
|
||||
import org.junit.Assert;
|
||||
import org.junit.BeforeClass;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -63,18 +58,6 @@ public class BalancerTestBase {
|
|||
private static final Logger LOG = LoggerFactory.getLogger(BalancerTestBase.class);
|
||||
static int regionId = 0;
|
||||
protected static Configuration conf;
|
||||
protected static StochasticLoadBalancer loadBalancer;
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeAllTests() throws Exception {
|
||||
conf = HBaseConfiguration.create();
|
||||
conf.setClass("hbase.util.ip.to.rack.determiner", MockMapping.class, DNSToSwitchMapping.class);
|
||||
conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 0.75f);
|
||||
conf.setFloat("hbase.regions.slop", 0.0f);
|
||||
conf.setFloat("hbase.master.balancer.stochastic.localityCost", 0);
|
||||
loadBalancer = new StochasticLoadBalancer();
|
||||
loadBalancer.setConf(conf);
|
||||
}
|
||||
|
||||
protected int[] largeCluster = new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
|
@ -165,11 +148,11 @@ public class BalancerTestBase {
|
|||
return Stream.generate(() -> "rack").limit(names.size()).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
// do not add @Override annotations here. It mighty break compilation with earlier Hadoops
|
||||
@Override
|
||||
public void reloadCachedMappings() {
|
||||
}
|
||||
|
||||
// do not add @Override annotations here. It mighty break compilation with earlier Hadoops
|
||||
@Override
|
||||
public void reloadCachedMappings(List<String> arg0) {
|
||||
}
|
||||
}
|
||||
|
@ -254,7 +237,7 @@ public class BalancerTestBase {
|
|||
TreeMap<String, Set<RegionInfo>> regionsPerHost = new TreeMap<>();
|
||||
TreeMap<String, Set<RegionInfo>> regionsPerRack = new TreeMap<>();
|
||||
|
||||
for (Entry<ServerName, List<RegionInfo>> entry : serverMap.entrySet()) {
|
||||
for (Map.Entry<ServerName, List<RegionInfo>> entry : serverMap.entrySet()) {
|
||||
String hostname = entry.getKey().getHostname();
|
||||
Set<RegionInfo> infos = regionsPerHost.get(hostname);
|
||||
if (infos == null) {
|
||||
|
@ -274,7 +257,7 @@ public class BalancerTestBase {
|
|||
return;
|
||||
}
|
||||
|
||||
for (Entry<ServerName, List<RegionInfo>> entry : serverMap.entrySet()) {
|
||||
for (Map.Entry<ServerName, List<RegionInfo>> entry : serverMap.entrySet()) {
|
||||
String rack = rackManager.getRack(entry.getKey());
|
||||
Set<RegionInfo> infos = regionsPerRack.get(rack);
|
||||
if (infos == null) {
|
||||
|
@ -313,7 +296,7 @@ public class BalancerTestBase {
|
|||
}
|
||||
|
||||
protected String printMock(List<ServerAndLoad> balancedCluster) {
|
||||
SortedSet<ServerAndLoad> sorted = new TreeSet<>(balancedCluster);
|
||||
NavigableSet<ServerAndLoad> sorted = new TreeSet<>(balancedCluster);
|
||||
ServerAndLoad[] arr = sorted.toArray(new ServerAndLoad[sorted.size()]);
|
||||
StringBuilder sb = new StringBuilder(sorted.size() * 4 + 4);
|
||||
sb.append("{ ");
|
||||
|
@ -332,14 +315,10 @@ public class BalancerTestBase {
|
|||
/**
|
||||
* This assumes the RegionPlan HSI instances are the same ones in the map, so
|
||||
* actually no need to even pass in the map, but I think it's clearer.
|
||||
*
|
||||
* @param list
|
||||
* @param plans
|
||||
* @return a list of all added {@link ServerAndLoad} values.
|
||||
*/
|
||||
protected List<ServerAndLoad> reconcile(List<ServerAndLoad> list,
|
||||
List<RegionPlan> plans,
|
||||
Map<ServerName, List<RegionInfo>> servers) {
|
||||
protected List<ServerAndLoad> reconcile(List<ServerAndLoad> list, List<RegionPlan> plans,
|
||||
Map<ServerName, List<RegionInfo>> servers) {
|
||||
List<ServerAndLoad> result = new ArrayList<>(list.size());
|
||||
|
||||
Map<ServerName, ServerAndLoad> map = new HashMap<>(list.size());
|
||||
|
@ -432,7 +411,7 @@ public class BalancerTestBase {
|
|||
return result;
|
||||
}
|
||||
|
||||
private Queue<RegionInfo> regionQueue = new LinkedList<>();
|
||||
private Queue<RegionInfo> regionQueue = new ArrayDeque<>();
|
||||
|
||||
protected List<RegionInfo> randomRegions(int numRegions) {
|
||||
return randomRegions(numRegions, -1);
|
||||
|
@ -511,7 +490,7 @@ public class BalancerTestBase {
|
|||
regionQueue.addAll(regions);
|
||||
}
|
||||
|
||||
private Queue<ServerName> serverQueue = new LinkedList<>();
|
||||
private Queue<ServerName> serverQueue = new ArrayDeque<>();
|
||||
|
||||
protected ServerAndLoad randomServer(final int numRegionsPerServer) {
|
||||
if (!this.serverQueue.isEmpty()) {
|
||||
|
@ -542,61 +521,13 @@ public class BalancerTestBase {
|
|||
this.serverQueue.addAll(servers);
|
||||
}
|
||||
|
||||
protected void testWithCluster(int numNodes,
|
||||
int numRegions,
|
||||
int numRegionsPerServer,
|
||||
int replication,
|
||||
int numTables,
|
||||
boolean assertFullyBalanced, boolean assertFullyBalancedForReplicas) {
|
||||
Map<ServerName, List<RegionInfo>> serverMap =
|
||||
createServerMap(numNodes, numRegions, numRegionsPerServer, replication, numTables);
|
||||
testWithCluster(serverMap, null, assertFullyBalanced, assertFullyBalancedForReplicas);
|
||||
}
|
||||
|
||||
protected void testWithCluster(Map<ServerName, List<RegionInfo>> serverMap,
|
||||
RackManager rackManager, boolean assertFullyBalanced, boolean assertFullyBalancedForReplicas) {
|
||||
List<ServerAndLoad> list = convertToList(serverMap);
|
||||
LOG.info("Mock Cluster : " + printMock(list) + " " + printStats(list));
|
||||
|
||||
loadBalancer.setRackManager(rackManager);
|
||||
// Run the balancer.
|
||||
Map<TableName, Map<ServerName, List<RegionInfo>>> LoadOfAllTable =
|
||||
(Map) mockClusterServersWithTables(serverMap);
|
||||
List<RegionPlan> plans = loadBalancer.balanceCluster(LoadOfAllTable);
|
||||
assertNotNull("Initial cluster balance should produce plans.", plans);
|
||||
|
||||
// Check to see that this actually got to a stable place.
|
||||
if (assertFullyBalanced || assertFullyBalancedForReplicas) {
|
||||
// Apply the plan to the mock cluster.
|
||||
List<ServerAndLoad> balancedCluster = reconcile(list, plans, serverMap);
|
||||
|
||||
// Print out the cluster loads to make debugging easier.
|
||||
LOG.info("Mock Balance : " + printMock(balancedCluster));
|
||||
|
||||
if (assertFullyBalanced) {
|
||||
assertClusterAsBalanced(balancedCluster);
|
||||
LoadOfAllTable = (Map) mockClusterServersWithTables(serverMap);
|
||||
List<RegionPlan> secondPlans = loadBalancer.balanceCluster(LoadOfAllTable);
|
||||
assertNull("Given a requirement to be fully balanced, second attempt at plans should " +
|
||||
"produce none.", secondPlans);
|
||||
}
|
||||
|
||||
if (assertFullyBalancedForReplicas) {
|
||||
assertRegionReplicaPlacement(serverMap, rackManager);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected Map<ServerName, List<RegionInfo>> createServerMap(int numNodes,
|
||||
int numRegions,
|
||||
int numRegionsPerServer,
|
||||
int replication,
|
||||
int numTables) {
|
||||
//construct a cluster of numNodes, having a total of numRegions. Each RS will hold
|
||||
//numRegionsPerServer many regions except for the last one, which will host all the
|
||||
//remaining regions
|
||||
protected Map<ServerName, List<RegionInfo>> createServerMap(int numNodes, int numRegions,
|
||||
int numRegionsPerServer, int replication, int numTables) {
|
||||
// construct a cluster of numNodes, having a total of numRegions. Each RS will hold
|
||||
// numRegionsPerServer many regions except for the last one, which will host all the
|
||||
// remaining regions
|
||||
int[] cluster = new int[numNodes];
|
||||
for (int i =0; i < numNodes; i++) {
|
||||
for (int i = 0; i < numNodes; i++) {
|
||||
cluster[i] = numRegionsPerServer;
|
||||
}
|
||||
cluster[cluster.length - 1] = numRegions - ((cluster.length - 1) * numRegionsPerServer);
|
||||
|
@ -606,7 +537,7 @@ public class BalancerTestBase {
|
|||
for (List<RegionInfo> regions : clusterState.values()) {
|
||||
int length = regions.size();
|
||||
for (int i = 0; i < length; i++) {
|
||||
for (int r = 1; r < replication ; r++) {
|
||||
for (int r = 1; r < replication; r++) {
|
||||
regions.add(RegionReplicaUtil.getRegionInfoForReplica(regions.get(i), r));
|
||||
}
|
||||
}
|
||||
|
@ -615,5 +546,4 @@ public class BalancerTestBase {
|
|||
|
||||
return clusterState;
|
||||
}
|
||||
|
||||
}
|
|
@ -45,10 +45,8 @@ import org.apache.hadoop.hbase.client.RegionInfo;
|
|||
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
|
||||
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
|
||||
import org.apache.hadoop.hbase.master.LoadBalancer;
|
||||
import org.apache.hadoop.hbase.master.MasterServices;
|
||||
import org.apache.hadoop.hbase.master.RackManager;
|
||||
import org.apache.hadoop.hbase.master.RegionPlan;
|
||||
import org.apache.hadoop.hbase.master.ServerManager;
|
||||
import org.apache.hadoop.hbase.testclassification.MasterTests;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
@ -94,9 +92,8 @@ public class TestBaseLoadBalancer extends BalancerTestBase {
|
|||
conf.setClass("hbase.util.ip.to.rack.determiner", MockMapping.class, DNSToSwitchMapping.class);
|
||||
loadBalancer = new MockBalancer();
|
||||
loadBalancer.setConf(conf);
|
||||
MasterServices st = mock(MasterServices.class);
|
||||
when(st.getServerName()).thenReturn(master);
|
||||
loadBalancer.setMasterServices(st);
|
||||
ClusterInfoProvider provider = mock(ClusterInfoProvider.class);
|
||||
loadBalancer.setClusterInfoProvider(provider);
|
||||
|
||||
// Set up the rack topologies (5 machines per rack)
|
||||
rackManager = mock(RackManager.class);
|
||||
|
@ -133,8 +130,6 @@ public class TestBaseLoadBalancer extends BalancerTestBase {
|
|||
*
|
||||
* Round-robin. Should yield a balanced cluster so same invariant as the load
|
||||
* balancer holds, all servers holding either floor(avg) or ceiling(avg).
|
||||
*
|
||||
* @throws Exception
|
||||
*/
|
||||
@Test
|
||||
public void testBulkAssignment() throws Exception {
|
||||
|
@ -171,7 +166,6 @@ public class TestBaseLoadBalancer extends BalancerTestBase {
|
|||
/**
|
||||
* Test the cluster startup bulk assignment which attempts to retain
|
||||
* assignment info.
|
||||
* @throws Exception
|
||||
*/
|
||||
@Test
|
||||
public void testRetainAssignment() throws Exception {
|
||||
|
@ -230,11 +224,11 @@ public class TestBaseLoadBalancer extends BalancerTestBase {
|
|||
Configuration conf = HBaseConfiguration.create();
|
||||
conf.setClass("hbase.util.ip.to.rack.determiner", MockMapping.class, DNSToSwitchMapping.class);
|
||||
balancer.setConf(conf);
|
||||
ServerManager sm = mock(ServerManager.class);
|
||||
when(sm.getOnlineServersListWithPredicator(anyList(), any())).thenReturn(idleServers);
|
||||
MasterServices services = mock(MasterServices.class);
|
||||
when(services.getServerManager()).thenReturn(sm);
|
||||
balancer.setMasterServices(services);
|
||||
ClusterInfoProvider provider = mock(ClusterInfoProvider.class);
|
||||
when(
|
||||
provider.getOnlineServersListWithPredicator(anyList(), any()))
|
||||
.thenReturn(idleServers);
|
||||
balancer.setClusterInfoProvider(provider);
|
||||
RegionInfo hri1 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
|
||||
.setStartKey(Bytes.toBytes("key1"))
|
||||
.setEndKey(Bytes.toBytes("key2"))
|
||||
|
@ -406,9 +400,6 @@ public class TestBaseLoadBalancer extends BalancerTestBase {
|
|||
* <li>If a region had an existing assignment to a server with the same
|
||||
* address a a currently online server, it will be assigned to it
|
||||
* </ul>
|
||||
* @param existing
|
||||
* @param servers
|
||||
* @param assignment
|
||||
*/
|
||||
private void assertRetainedAssignment(Map<RegionInfo, ServerName> existing,
|
||||
List<ServerName> servers, Map<ServerName, List<RegionInfo>> assignment) {
|
|
@ -27,10 +27,13 @@ import static org.mockito.Mockito.when;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
import java.util.function.Predicate;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.ClusterMetrics;
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
|
@ -111,6 +114,28 @@ public class TestRegionHDFSBlockLocationFinder {
|
|||
TableDescriptor tableDescriptor, RegionInfo regionInfo) throws IOException {
|
||||
return generate(regionInfo);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasRegionReplica(Collection<RegionInfo> regions) throws IOException {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<ServerName> getOnlineServersListWithPredicator(List<ServerName> servers,
|
||||
Predicate<ServerMetrics> filter) {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<ServerName, List<RegionInfo>>
|
||||
getSnapShotOfAssignment(Collection<RegionInfo> regions) {
|
||||
return Collections.emptyMap();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getNumberOfTables() {
|
||||
return 0;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
@ -34,7 +34,6 @@ import org.apache.hadoop.hbase.client.RegionInfo;
|
|||
import org.apache.hadoop.hbase.master.RegionPlan;
|
||||
import org.apache.hadoop.hbase.testclassification.MasterTests;
|
||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||
import org.apache.hadoop.hbase.util.Pair;
|
||||
import org.apache.hadoop.net.DNSToSwitchMapping;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
|
@ -68,57 +67,7 @@ public class TestSimpleLoadBalancer extends BalancerTestBase {
|
|||
loadBalancer.setConf(conf);
|
||||
}
|
||||
|
||||
// int[testnum][servernumber] -> numregions
|
||||
int[][] clusterStateMocks = new int[][] {
|
||||
// 1 node
|
||||
new int[] { 0 },
|
||||
new int[] { 1 },
|
||||
new int[] { 10 },
|
||||
// 2 node
|
||||
new int[] { 0, 0 },
|
||||
new int[] { 2, 0 },
|
||||
new int[] { 2, 1 },
|
||||
new int[] { 2, 2 },
|
||||
new int[] { 2, 3 },
|
||||
new int[] { 2, 4 },
|
||||
new int[] { 1, 1 },
|
||||
new int[] { 0, 1 },
|
||||
new int[] { 10, 1 },
|
||||
new int[] { 14, 1432 },
|
||||
new int[] { 47, 53 },
|
||||
// 3 node
|
||||
new int[] { 0, 1, 2 },
|
||||
new int[] { 1, 2, 3 },
|
||||
new int[] { 0, 2, 2 },
|
||||
new int[] { 0, 3, 0 },
|
||||
new int[] { 0, 4, 0 },
|
||||
new int[] { 20, 20, 0 },
|
||||
// 4 node
|
||||
new int[] { 0, 1, 2, 3 },
|
||||
new int[] { 4, 0, 0, 0 },
|
||||
new int[] { 5, 0, 0, 0 },
|
||||
new int[] { 6, 6, 0, 0 },
|
||||
new int[] { 6, 2, 0, 0 },
|
||||
new int[] { 6, 1, 0, 0 },
|
||||
new int[] { 6, 0, 0, 0 },
|
||||
new int[] { 4, 4, 4, 7 },
|
||||
new int[] { 4, 4, 4, 8 },
|
||||
new int[] { 0, 0, 0, 7 },
|
||||
// 5 node
|
||||
new int[] { 1, 1, 1, 1, 4 },
|
||||
// more nodes
|
||||
new int[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
|
||||
new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 10 }, new int[] { 6, 6, 5, 6, 6, 6, 6, 6, 6, 1 },
|
||||
new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 54 }, new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 55 },
|
||||
new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 56 }, new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 16 },
|
||||
new int[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 8 }, new int[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 9 },
|
||||
new int[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 10 }, new int[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 123 },
|
||||
new int[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 155 },
|
||||
new int[] { 0, 0, 144, 1, 1, 1, 1, 1123, 133, 138, 12, 1444 },
|
||||
new int[] { 0, 0, 144, 1, 0, 4, 1, 1123, 133, 138, 12, 1444 },
|
||||
new int[] { 1538, 1392, 1561, 1557, 1535, 1553, 1385, 1542, 1619 } };
|
||||
|
||||
int [] mockUniformCluster = new int[] { 5, 5, 5, 5, 5 ,0};
|
||||
int[] mockUniformCluster = new int[] { 5, 5, 5, 5, 5, 0 };
|
||||
|
||||
@Rule
|
||||
public TestName name = new TestName();
|
||||
|
@ -140,7 +89,6 @@ public class TestSimpleLoadBalancer extends BalancerTestBase {
|
|||
mockClusterServersWithTables(clusterServers);
|
||||
loadBalancer.setClusterLoad(clusterLoad);
|
||||
List<RegionPlan> clusterplans = new ArrayList<>();
|
||||
List<Pair<TableName, Integer>> regionAmountList = new ArrayList<>();
|
||||
for (Map.Entry<TableName, TreeMap<ServerName, List<RegionInfo>>> mapEntry : result
|
||||
.entrySet()) {
|
||||
TableName tableName = mapEntry.getKey();
|
||||
|
@ -169,7 +117,6 @@ public class TestSimpleLoadBalancer extends BalancerTestBase {
|
|||
* ceiling(average) at both table level and cluster level
|
||||
* Deliberately generate a special case to show the overall strategy can achieve cluster
|
||||
* level balance while the bytable strategy cannot
|
||||
* @throws Exception
|
||||
*/
|
||||
@Test
|
||||
public void testImpactOfBalanceClusterOverall() throws Exception {
|
||||
|
@ -196,7 +143,6 @@ public class TestSimpleLoadBalancer extends BalancerTestBase {
|
|||
loadBalancer.setClusterLoad(clusterLoad);
|
||||
}
|
||||
List<RegionPlan> clusterplans1 = new ArrayList<RegionPlan>();
|
||||
List<Pair<TableName, Integer>> regionAmountList = new ArrayList<Pair<TableName, Integer>>();
|
||||
for (Map.Entry<TableName, TreeMap<ServerName, List<RegionInfo>>> mapEntry : LoadOfAllTable
|
||||
.entrySet()) {
|
||||
TableName tableName = mapEntry.getKey();
|
|
@ -296,6 +296,12 @@
|
|||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-balancer</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<artifactId>hbase-balancer</artifactId>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<type>test-jar</type>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-common</artifactId>
|
||||
|
|
|
@ -29,7 +29,6 @@ import java.util.HashMap;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HBaseIOException;
|
||||
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
||||
|
@ -38,6 +37,7 @@ import org.apache.hadoop.hbase.ServerName;
|
|||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||
import org.apache.hadoop.hbase.favored.FavoredNodesPlan.Position;
|
||||
import org.apache.hadoop.hbase.master.MasterServices;
|
||||
import org.apache.hadoop.hbase.master.RackManager;
|
||||
import org.apache.hadoop.hbase.master.RegionPlan;
|
||||
import org.apache.hadoop.hbase.master.ServerManager;
|
||||
|
@ -70,6 +70,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
|
|||
public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements FavoredNodesPromoter {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(FavoredNodeLoadBalancer.class);
|
||||
|
||||
private MasterServices services;
|
||||
private RackManager rackManager;
|
||||
private Configuration conf;
|
||||
private FavoredNodesManager fnm;
|
||||
|
@ -79,6 +80,10 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored
|
|||
this.conf = conf;
|
||||
}
|
||||
|
||||
public void setMasterServices(MasterServices services) {
|
||||
this.services = services;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void initialize() throws HBaseIOException {
|
||||
super.initialize();
|
||||
|
@ -95,7 +100,7 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored
|
|||
List<RegionPlan> plans = new ArrayList<>();
|
||||
// perform a scan of the meta to get the latest updates (if any)
|
||||
SnapshotOfRegionAssignmentFromMeta snaphotOfRegionAssignment =
|
||||
new SnapshotOfRegionAssignmentFromMeta(super.services.getConnection());
|
||||
new SnapshotOfRegionAssignmentFromMeta(services.getConnection());
|
||||
try {
|
||||
snaphotOfRegionAssignment.initialize();
|
||||
} catch (IOException ie) {
|
||||
|
@ -105,7 +110,7 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored
|
|||
// This is not used? Findbugs says so: Map<ServerName, ServerName>
|
||||
// serverNameToServerNameWithoutCode = new HashMap<>();
|
||||
Map<ServerName, ServerName> serverNameWithoutCodeToServerName = new HashMap<>();
|
||||
ServerManager serverMgr = super.services.getServerManager();
|
||||
ServerManager serverMgr = services.getServerManager();
|
||||
for (ServerName sn : serverMgr.getOnlineServersList()) {
|
||||
ServerName s = ServerName.valueOf(sn.getHostname(), sn.getPort(), ServerName.NON_STARTCODE);
|
||||
// FindBugs complains about useless store! serverNameToServerNameWithoutCode.put(sn, s);
|
||||
|
@ -136,9 +141,9 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored
|
|||
}
|
||||
// the region is currently on none of the favored nodes
|
||||
// get it on one of them if possible
|
||||
ServerMetrics l1 = super.services.getServerManager()
|
||||
ServerMetrics l1 = services.getServerManager()
|
||||
.getLoad(serverNameWithoutCodeToServerName.get(favoredNodes.get(1)));
|
||||
ServerMetrics l2 = super.services.getServerManager()
|
||||
ServerMetrics l2 = services.getServerManager()
|
||||
.getLoad(serverNameWithoutCodeToServerName.get(favoredNodes.get(2)));
|
||||
if (l1 != null && l2 != null) {
|
||||
if (l1.getRegionMetrics().size() > l2.getRegionMetrics().size()) {
|
||||
|
@ -245,10 +250,11 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored
|
|||
}
|
||||
|
||||
private Pair<Map<ServerName, List<RegionInfo>>, List<RegionInfo>>
|
||||
segregateRegionsAndAssignRegionsWithFavoredNodes(List<RegionInfo> regions,
|
||||
segregateRegionsAndAssignRegionsWithFavoredNodes(List<RegionInfo> regions,
|
||||
List<ServerName> availableServers) {
|
||||
Map<ServerName, List<RegionInfo>> assignmentMapForFavoredNodes = new HashMap<>(regions.size() / 2);
|
||||
List<RegionInfo> regionsWithNoFavoredNodes = new ArrayList<>(regions.size()/2);
|
||||
Map<ServerName, List<RegionInfo>> assignmentMapForFavoredNodes =
|
||||
new HashMap<>(regions.size() / 2);
|
||||
List<RegionInfo> regionsWithNoFavoredNodes = new ArrayList<>(regions.size() / 2);
|
||||
for (RegionInfo region : regions) {
|
||||
List<ServerName> favoredNodes = fnm.getFavoredNodes(region);
|
||||
ServerName primaryHost = null;
|
||||
|
@ -301,8 +307,8 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored
|
|||
// assign the region to the one with a lower load
|
||||
// (both have the desired hdfs blocks)
|
||||
ServerName s;
|
||||
ServerMetrics tertiaryLoad = super.services.getServerManager().getLoad(tertiaryHost);
|
||||
ServerMetrics secondaryLoad = super.services.getServerManager().getLoad(secondaryHost);
|
||||
ServerMetrics tertiaryLoad = services.getServerManager().getLoad(tertiaryHost);
|
||||
ServerMetrics secondaryLoad = services.getServerManager().getLoad(secondaryHost);
|
||||
if (secondaryLoad.getRegionMetrics().size() < tertiaryLoad.getRegionMetrics().size()) {
|
||||
s = secondaryHost;
|
||||
} else {
|
||||
|
@ -317,9 +323,9 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored
|
|||
}
|
||||
|
||||
private void addRegionToMap(Map<ServerName, List<RegionInfo>> assignmentMapForFavoredNodes,
|
||||
RegionInfo region, ServerName host) {
|
||||
List<RegionInfo> regionsOnServer = null;
|
||||
if ((regionsOnServer = assignmentMapForFavoredNodes.get(host)) == null) {
|
||||
RegionInfo region, ServerName host) {
|
||||
List<RegionInfo> regionsOnServer = assignmentMapForFavoredNodes.get(host);
|
||||
if (regionsOnServer == null) {
|
||||
regionsOnServer = new ArrayList<>();
|
||||
assignmentMapForFavoredNodes.put(host, regionsOnServer);
|
||||
}
|
||||
|
|
|
@ -77,6 +77,12 @@ public class FavoredStochasticBalancer extends StochasticLoadBalancer implements
|
|||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(FavoredStochasticBalancer.class);
|
||||
private FavoredNodesManager fnm;
|
||||
private MasterServices services;
|
||||
|
||||
public void setMasterServices(MasterServices services) {
|
||||
this.services = services;
|
||||
this.fnm = services.getFavoredNodesManager();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void initialize() throws HBaseIOException {
|
||||
|
@ -91,13 +97,7 @@ public class FavoredStochasticBalancer extends StochasticLoadBalancer implements
|
|||
setCandidateGenerators(fnPickers);
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void setMasterServices(MasterServices masterServices) {
|
||||
super.setMasterServices(masterServices);
|
||||
fnm = masterServices.getFavoredNodesManager();
|
||||
}
|
||||
|
||||
/*
|
||||
/**
|
||||
* Round robin assignment: Segregate the regions into two types:
|
||||
*
|
||||
* 1. The regions that have favored node assignment where at least one of the favored node
|
||||
|
@ -183,12 +183,12 @@ public class FavoredStochasticBalancer extends StochasticLoadBalancer implements
|
|||
return assignmentMap;
|
||||
}
|
||||
|
||||
/*
|
||||
/**
|
||||
* Return a pair - one with assignments when favored nodes are present and another with regions
|
||||
* without favored nodes.
|
||||
*/
|
||||
private Pair<Map<ServerName, List<RegionInfo>>, List<RegionInfo>>
|
||||
segregateRegionsAndAssignRegionsWithFavoredNodes(Collection<RegionInfo> regions,
|
||||
segregateRegionsAndAssignRegionsWithFavoredNodes(Collection<RegionInfo> regions,
|
||||
List<ServerName> onlineServers) throws HBaseIOException {
|
||||
|
||||
// Since we expect FN to be present most of the time, lets create map with same size
|
||||
|
@ -227,17 +227,16 @@ public class FavoredStochasticBalancer extends StochasticLoadBalancer implements
|
|||
}
|
||||
|
||||
private void addRegionToMap(Map<ServerName, List<RegionInfo>> assignmentMapForFavoredNodes,
|
||||
RegionInfo region, ServerName host) {
|
||||
|
||||
List<RegionInfo> regionsOnServer;
|
||||
if ((regionsOnServer = assignmentMapForFavoredNodes.get(host)) == null) {
|
||||
RegionInfo region, ServerName host) {
|
||||
List<RegionInfo> regionsOnServer = assignmentMapForFavoredNodes.get(host);
|
||||
if (regionsOnServer == null) {
|
||||
regionsOnServer = Lists.newArrayList();
|
||||
assignmentMapForFavoredNodes.put(host, regionsOnServer);
|
||||
}
|
||||
regionsOnServer.add(region);
|
||||
}
|
||||
|
||||
/*
|
||||
/**
|
||||
* Get the ServerName for the FavoredNode. Since FN's startcode is -1, we could want to get the
|
||||
* ServerName with the correct start code from the list of provided servers.
|
||||
*/
|
||||
|
@ -266,8 +265,8 @@ public class FavoredStochasticBalancer extends StochasticLoadBalancer implements
|
|||
|
||||
// Assign the region to the one with a lower load (both have the desired hdfs blocks)
|
||||
ServerName s;
|
||||
ServerMetrics tertiaryLoad = super.services.getServerManager().getLoad(tertiaryHost);
|
||||
ServerMetrics secondaryLoad = super.services.getServerManager().getLoad(secondaryHost);
|
||||
ServerMetrics tertiaryLoad = services.getServerManager().getLoad(tertiaryHost);
|
||||
ServerMetrics secondaryLoad = services.getServerManager().getLoad(secondaryHost);
|
||||
if (secondaryLoad != null && tertiaryLoad != null) {
|
||||
if (secondaryLoad.getRegionMetrics().size() < tertiaryLoad.getRegionMetrics().size()) {
|
||||
s = secondaryHost;
|
||||
|
@ -446,11 +445,11 @@ public class FavoredStochasticBalancer extends StochasticLoadBalancer implements
|
|||
}
|
||||
|
||||
@Override
|
||||
public synchronized List<ServerName> getFavoredNodes(RegionInfo regionInfo) {
|
||||
public List<ServerName> getFavoredNodes(RegionInfo regionInfo) {
|
||||
return this.fnm.getFavoredNodes(regionInfo);
|
||||
}
|
||||
|
||||
/*
|
||||
/**
|
||||
* Generate Favored Nodes for daughters during region split.
|
||||
*
|
||||
* If the parent does not have FN, regenerates them for the daughters.
|
||||
|
@ -463,7 +462,6 @@ public class FavoredStochasticBalancer extends StochasticLoadBalancer implements
|
|||
@Override
|
||||
public void generateFavoredNodesForDaughter(List<ServerName> servers, RegionInfo parent,
|
||||
RegionInfo regionA, RegionInfo regionB) throws IOException {
|
||||
|
||||
Map<RegionInfo, List<ServerName>> result = new HashMap<>();
|
||||
FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager);
|
||||
helper.initialize();
|
||||
|
@ -662,16 +660,14 @@ public class FavoredStochasticBalancer extends StochasticLoadBalancer implements
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
/**
|
||||
* For all regions correctly assigned to favored nodes, we just use the stochastic balancer
|
||||
* implementation. For the misplaced regions, we assign a bogus server to it and AM takes care.
|
||||
*/
|
||||
@Override
|
||||
public synchronized List<RegionPlan> balanceTable(TableName tableName,
|
||||
public List<RegionPlan> balanceTable(TableName tableName,
|
||||
Map<ServerName, List<RegionInfo>> loadOfOneTable) {
|
||||
|
||||
if (this.services != null) {
|
||||
|
||||
List<RegionPlan> regionPlans = Lists.newArrayList();
|
||||
Map<ServerName, List<RegionInfo>> correctAssignments = new HashMap<>();
|
||||
int misplacedRegions = 0;
|
||||
|
|
|
@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.ServerName;
|
|||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||
import org.apache.hadoop.hbase.master.LoadBalancer;
|
||||
import org.apache.hadoop.hbase.master.MasterServices;
|
||||
import org.apache.hadoop.hbase.master.RegionPlan;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
|
@ -57,7 +56,7 @@ public class MaintenanceLoadBalancer extends Configured implements LoadBalancer
|
|||
}
|
||||
|
||||
@Override
|
||||
public void setMasterServices(MasterServices masterServices) {
|
||||
public void setClusterInfoProvider(ClusterInfoProvider provider) {
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -129,4 +128,5 @@ public class MaintenanceLoadBalancer extends Configured implements LoadBalancer
|
|||
@Override
|
||||
public void updateBalancerStatus(boolean status) {
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -18,15 +18,21 @@
|
|||
package org.apache.hadoop.hbase.master.balancer;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.function.Predicate;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
|
||||
import org.apache.hadoop.hbase.ServerMetrics;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.TableDescriptors;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||
import org.apache.hadoop.hbase.master.MasterServices;
|
||||
import org.apache.hadoop.hbase.master.ServerManager;
|
||||
import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
@ -35,11 +41,11 @@ import org.apache.yetus.audience.InterfaceAudience;
|
|||
* Master based cluster info provider.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
class MasterClusterInfoProvider implements ClusterInfoProvider {
|
||||
public class MasterClusterInfoProvider implements ClusterInfoProvider {
|
||||
|
||||
private final MasterServices services;
|
||||
|
||||
MasterClusterInfoProvider(MasterServices services) {
|
||||
public MasterClusterInfoProvider(MasterServices services) {
|
||||
this.services = services;
|
||||
}
|
||||
|
||||
|
@ -60,4 +66,38 @@ class MasterClusterInfoProvider implements ClusterInfoProvider {
|
|||
TableDescriptor tableDescriptor, RegionInfo regionInfo) throws IOException {
|
||||
return HRegion.computeHDFSBlocksDistribution(conf, tableDescriptor, regionInfo);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasRegionReplica(Collection<RegionInfo> regions) throws IOException {
|
||||
TableDescriptors tds = services.getTableDescriptors();
|
||||
if (tds == null) {
|
||||
return false;
|
||||
}
|
||||
for (RegionInfo region : regions) {
|
||||
TableDescriptor td = tds.get(region.getTable());
|
||||
if (td != null && td.getRegionReplication() > 1) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<ServerName> getOnlineServersListWithPredicator(List<ServerName> servers,
|
||||
Predicate<ServerMetrics> filter) {
|
||||
ServerManager sm = services.getServerManager();
|
||||
return sm != null ? sm.getOnlineServersListWithPredicator(servers, filter) :
|
||||
Collections.emptyList();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<ServerName, List<RegionInfo>> getSnapShotOfAssignment(Collection<RegionInfo> regions) {
|
||||
AssignmentManager am = services.getAssignmentManager();
|
||||
return am != null ? am.getSnapShotOfAssignment(regions) : Collections.emptyMap();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getNumberOfTables() throws IOException {
|
||||
return services.getTableDescriptors().getAll().size();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -277,14 +277,14 @@ public class StochasticLoadBalancer extends BaseLoadBalancer {
|
|||
public synchronized void setClusterMetrics(ClusterMetrics st) {
|
||||
super.setClusterMetrics(st);
|
||||
updateRegionLoad();
|
||||
for(CostFromRegionLoadFunction cost : regionLoadFunctions) {
|
||||
for (CostFromRegionLoadFunction cost : regionLoadFunctions) {
|
||||
cost.setClusterMetrics(st);
|
||||
}
|
||||
|
||||
// update metrics size
|
||||
try {
|
||||
// by-table or ensemble mode
|
||||
int tablesCount = isByTable ? services.getTableDescriptors().getAll().size() : 1;
|
||||
int tablesCount = isByTable ? provider.getNumberOfTables() : 1;
|
||||
int functionsCount = getCostFunctionNames().length;
|
||||
|
||||
updateMetricsSize(tablesCount * (functionsCount + 1)); // +1 for overall
|
||||
|
@ -298,7 +298,7 @@ public class StochasticLoadBalancer extends BaseLoadBalancer {
|
|||
*/
|
||||
public void updateMetricsSize(int size) {
|
||||
if (metricsBalancer instanceof MetricsStochasticBalancer) {
|
||||
((MetricsStochasticBalancer) metricsBalancer).updateMetricsSize(size);
|
||||
((MetricsStochasticBalancer) metricsBalancer).updateMetricsSize(size);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -507,7 +507,9 @@ public class StochasticLoadBalancer extends BaseLoadBalancer {
|
|||
* update costs to JMX
|
||||
*/
|
||||
private void updateStochasticCosts(TableName tableName, Double overall, Double[] subCosts) {
|
||||
if (tableName == null) return;
|
||||
if (tableName == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
// check if the metricsBalancer is MetricsStochasticBalancer before casting
|
||||
if (metricsBalancer instanceof MetricsStochasticBalancer) {
|
||||
|
@ -638,7 +640,9 @@ public class StochasticLoadBalancer extends BaseLoadBalancer {
|
|||
* Get the names of the cost functions
|
||||
*/
|
||||
public String[] getCostFunctionNames() {
|
||||
if (costFunctions == null) return null;
|
||||
if (costFunctions == null) {
|
||||
return null;
|
||||
}
|
||||
String[] ret = new String[costFunctions.size()];
|
||||
for (int i = 0; i < costFunctions.size(); i++) {
|
||||
CostFunction c = costFunctions.get(i);
|
||||
|
@ -808,7 +812,9 @@ public class StochasticLoadBalancer extends BaseLoadBalancer {
|
|||
if (max <= min || value <= min) {
|
||||
return 0;
|
||||
}
|
||||
if ((max - min) == 0) return 0;
|
||||
if ((max - min) == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return Math.max(0d, Math.min(1d, (value - min) / (max - min)));
|
||||
}
|
||||
|
@ -1028,9 +1034,12 @@ public class StochasticLoadBalancer extends BaseLoadBalancer {
|
|||
|
||||
@Override
|
||||
protected void regionMoved(int region, int oldServer, int newServer) {
|
||||
int oldEntity = type == LocalityType.SERVER ? oldServer : cluster.serverIndexToRackIndex[oldServer];
|
||||
int newEntity = type == LocalityType.SERVER ? newServer : cluster.serverIndexToRackIndex[newServer];
|
||||
double localityDelta = getWeightedLocality(region, newEntity) - getWeightedLocality(region, oldEntity);
|
||||
int oldEntity =
|
||||
type == LocalityType.SERVER ? oldServer : cluster.serverIndexToRackIndex[oldServer];
|
||||
int newEntity =
|
||||
type == LocalityType.SERVER ? newServer : cluster.serverIndexToRackIndex[newServer];
|
||||
double localityDelta =
|
||||
getWeightedLocality(region, newEntity) - getWeightedLocality(region, oldEntity);
|
||||
double normalizedDelta = bestLocality == 0 ? 0.0 : localityDelta / bestLocality;
|
||||
locality += normalizedDelta;
|
||||
}
|
||||
|
@ -1067,7 +1076,8 @@ public class StochasticLoadBalancer extends BaseLoadBalancer {
|
|||
|
||||
static class RackLocalityCostFunction extends LocalityBasedCostFunction {
|
||||
|
||||
private static final String RACK_LOCALITY_COST_KEY = "hbase.master.balancer.stochastic.rackLocalityCost";
|
||||
private static final String RACK_LOCALITY_COST_KEY =
|
||||
"hbase.master.balancer.stochastic.rackLocalityCost";
|
||||
private static final float DEFAULT_RACK_LOCALITY_COST = 15;
|
||||
|
||||
public RackLocalityCostFunction(Configuration conf) {
|
||||
|
|
|
@ -34,12 +34,16 @@ import org.apache.hadoop.hbase.ServerName;
|
|||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||
import org.apache.hadoop.hbase.constraint.ConstraintException;
|
||||
import org.apache.hadoop.hbase.favored.FavoredNodeLoadBalancer;
|
||||
import org.apache.hadoop.hbase.favored.FavoredNodesManager;
|
||||
import org.apache.hadoop.hbase.favored.FavoredNodesPromoter;
|
||||
import org.apache.hadoop.hbase.master.LoadBalancer;
|
||||
import org.apache.hadoop.hbase.master.MasterServices;
|
||||
import org.apache.hadoop.hbase.master.RegionPlan;
|
||||
import org.apache.hadoop.hbase.master.balancer.ClusterInfoProvider;
|
||||
import org.apache.hadoop.hbase.master.balancer.FavoredStochasticBalancer;
|
||||
import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
|
||||
import org.apache.hadoop.hbase.master.balancer.MasterClusterInfoProvider;
|
||||
import org.apache.hadoop.hbase.net.Address;
|
||||
import org.apache.hadoop.hbase.util.Pair;
|
||||
import org.apache.hadoop.hbase.util.ReflectionUtils;
|
||||
|
@ -114,7 +118,6 @@ public class RSGroupBasedLoadBalancer implements LoadBalancer {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setMasterServices(MasterServices masterServices) {
|
||||
this.masterServices = masterServices;
|
||||
}
|
||||
|
@ -362,14 +365,23 @@ public class RSGroupBasedLoadBalancer implements LoadBalancer {
|
|||
balancerClass = LoadBalancerFactory.getDefaultLoadBalancerClass();
|
||||
}
|
||||
internalBalancer = ReflectionUtils.newInstance(balancerClass);
|
||||
if (internalBalancer instanceof FavoredNodesPromoter) {
|
||||
favoredNodesManager = new FavoredNodesManager(masterServices);
|
||||
}
|
||||
internalBalancer.setConf(config);
|
||||
internalBalancer.setMasterServices(masterServices);
|
||||
internalBalancer.setClusterInfoProvider(new MasterClusterInfoProvider(masterServices));
|
||||
if(clusterStatus != null) {
|
||||
internalBalancer.setClusterMetrics(clusterStatus);
|
||||
}
|
||||
// special handling for favor node balancers
|
||||
if (internalBalancer instanceof FavoredNodesPromoter) {
|
||||
favoredNodesManager = new FavoredNodesManager(masterServices);
|
||||
if (internalBalancer instanceof FavoredNodeLoadBalancer) {
|
||||
((FavoredNodeLoadBalancer) internalBalancer).setMasterServices(masterServices);
|
||||
}
|
||||
if (internalBalancer instanceof FavoredStochasticBalancer) {
|
||||
((FavoredStochasticBalancer) internalBalancer).setMasterServices(masterServices);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
internalBalancer.initialize();
|
||||
// init fallback groups
|
||||
this.fallbackEnabled = config.getBoolean(FALLBACK_GROUP_ENABLE_KEY, false);
|
||||
|
@ -479,4 +491,9 @@ public class RSGroupBasedLoadBalancer implements LoadBalancer {
|
|||
}
|
||||
return serverNames == null || serverNames.isEmpty() ? servers : serverNames;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setClusterInfoProvider(ClusterInfoProvider provider) {
|
||||
throw new UnsupportedOperationException("Just call set master service instead");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.favored.FavoredNodeAssignmentHelper;
|
|||
import org.apache.hadoop.hbase.favored.FavoredNodeLoadBalancer;
|
||||
import org.apache.hadoop.hbase.favored.FavoredNodesPlan.Position;
|
||||
import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
|
||||
import org.apache.hadoop.hbase.master.balancer.MasterClusterInfoProvider;
|
||||
import org.apache.hadoop.hbase.testclassification.MasterTests;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.junit.AfterClass;
|
||||
|
@ -46,8 +47,6 @@ import org.junit.Rule;
|
|||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
import org.junit.rules.TestName;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
@Category({MasterTests.class, MediumTests.class})
|
||||
public class TestRegionPlacement2 {
|
||||
|
@ -56,7 +55,6 @@ public class TestRegionPlacement2 {
|
|||
public static final HBaseClassTestRule CLASS_RULE =
|
||||
HBaseClassTestRule.forClass(TestRegionPlacement2.class);
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(TestRegionPlacement2.class);
|
||||
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
||||
private final static int SLAVES = 7;
|
||||
private final static int PRIMARY = Position.PRIMARY.ordinal();
|
||||
|
@ -83,7 +81,10 @@ public class TestRegionPlacement2 {
|
|||
|
||||
@Test
|
||||
public void testFavoredNodesPresentForRoundRobinAssignment() throws IOException {
|
||||
LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(TEST_UTIL.getConfiguration());
|
||||
FavoredNodeLoadBalancer balancer =
|
||||
(FavoredNodeLoadBalancer) LoadBalancerFactory.getLoadBalancer(TEST_UTIL.getConfiguration());
|
||||
balancer.setClusterInfoProvider(
|
||||
new MasterClusterInfoProvider(TEST_UTIL.getMiniHBaseCluster().getMaster()));
|
||||
balancer.setMasterServices(TEST_UTIL.getMiniHBaseCluster().getMaster());
|
||||
balancer.initialize();
|
||||
List<ServerName> servers = new ArrayList<>();
|
||||
|
@ -144,7 +145,10 @@ public class TestRegionPlacement2 {
|
|||
|
||||
@Test
|
||||
public void testFavoredNodesPresentForRandomAssignment() throws IOException {
|
||||
LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(TEST_UTIL.getConfiguration());
|
||||
FavoredNodeLoadBalancer balancer =
|
||||
(FavoredNodeLoadBalancer) LoadBalancerFactory.getLoadBalancer(TEST_UTIL.getConfiguration());
|
||||
balancer.setClusterInfoProvider(
|
||||
new MasterClusterInfoProvider(TEST_UTIL.getMiniHBaseCluster().getMaster()));
|
||||
balancer.setMasterServices(TEST_UTIL.getMiniHBaseCluster().getMaster());
|
||||
balancer.initialize();
|
||||
List<ServerName> servers = new ArrayList<>();
|
||||
|
|
|
@ -58,7 +58,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
|
|||
/**
|
||||
* Base UT of RSGroupableBalancer.
|
||||
*/
|
||||
public class RSGroupableBalancerTestBase extends BalancerTestBase{
|
||||
public class RSGroupableBalancerTestBase extends BalancerTestBase {
|
||||
|
||||
static SecureRandom rand = new SecureRandom();
|
||||
static String[] groups = new String[] { RSGroupInfo.DEFAULT_GROUP, "dg2", "dg3", "dg4" };
|
||||
|
|
|
@ -0,0 +1,94 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.master.balancer;
|
||||
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertNull;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||
import org.apache.hadoop.hbase.master.RackManager;
|
||||
import org.apache.hadoop.hbase.master.RegionPlan;
|
||||
import org.apache.hadoop.net.DNSToSwitchMapping;
|
||||
import org.junit.BeforeClass;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public class StochasticBalancerTestBase extends BalancerTestBase {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(StochasticBalancerTestBase.class);
|
||||
|
||||
protected static StochasticLoadBalancer loadBalancer;
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeAllTests() throws Exception {
|
||||
conf = HBaseConfiguration.create();
|
||||
conf.setClass("hbase.util.ip.to.rack.determiner", MockMapping.class, DNSToSwitchMapping.class);
|
||||
conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 0.75f);
|
||||
conf.setFloat("hbase.regions.slop", 0.0f);
|
||||
conf.setFloat("hbase.master.balancer.stochastic.localityCost", 0);
|
||||
loadBalancer = new StochasticLoadBalancer();
|
||||
loadBalancer.setConf(conf);
|
||||
}
|
||||
|
||||
protected void testWithCluster(int numNodes, int numRegions, int numRegionsPerServer,
|
||||
int replication, int numTables, boolean assertFullyBalanced,
|
||||
boolean assertFullyBalancedForReplicas) {
|
||||
Map<ServerName, List<RegionInfo>> serverMap =
|
||||
createServerMap(numNodes, numRegions, numRegionsPerServer, replication, numTables);
|
||||
testWithCluster(serverMap, null, assertFullyBalanced, assertFullyBalancedForReplicas);
|
||||
}
|
||||
|
||||
protected void testWithCluster(Map<ServerName, List<RegionInfo>> serverMap,
|
||||
RackManager rackManager, boolean assertFullyBalanced, boolean assertFullyBalancedForReplicas) {
|
||||
List<ServerAndLoad> list = convertToList(serverMap);
|
||||
LOG.info("Mock Cluster : " + printMock(list) + " " + printStats(list));
|
||||
|
||||
loadBalancer.setRackManager(rackManager);
|
||||
// Run the balancer.
|
||||
Map<TableName, Map<ServerName, List<RegionInfo>>> LoadOfAllTable =
|
||||
(Map) mockClusterServersWithTables(serverMap);
|
||||
List<RegionPlan> plans = loadBalancer.balanceCluster(LoadOfAllTable);
|
||||
assertNotNull("Initial cluster balance should produce plans.", plans);
|
||||
|
||||
// Check to see that this actually got to a stable place.
|
||||
if (assertFullyBalanced || assertFullyBalancedForReplicas) {
|
||||
// Apply the plan to the mock cluster.
|
||||
List<ServerAndLoad> balancedCluster = reconcile(list, plans, serverMap);
|
||||
|
||||
// Print out the cluster loads to make debugging easier.
|
||||
LOG.info("Mock Balance : " + printMock(balancedCluster));
|
||||
|
||||
if (assertFullyBalanced) {
|
||||
assertClusterAsBalanced(balancedCluster);
|
||||
LoadOfAllTable = (Map) mockClusterServersWithTables(serverMap);
|
||||
List<RegionPlan> secondPlans = loadBalancer.balanceCluster(LoadOfAllTable);
|
||||
assertNull("Given a requirement to be fully balanced, second attempt at plans should " +
|
||||
"produce none.", secondPlans);
|
||||
}
|
||||
|
||||
if (assertFullyBalancedForReplicas) {
|
||||
assertRegionReplicaPlacement(serverMap, rackManager);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -20,7 +20,7 @@ package org.apache.hadoop.hbase.master.balancer;
|
|||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
public class BalancerTestBase2 extends BalancerTestBase {
|
||||
public class StochasticBalancerTestBase2 extends StochasticBalancerTestBase {
|
||||
|
||||
@Before
|
||||
public void before() {
|
|
@ -47,7 +47,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RecentLogs;
|
|||
* Test BalancerDecision ring buffer using namedQueue interface
|
||||
*/
|
||||
@Category({ MasterTests.class, MediumTests.class })
|
||||
public class TestBalancerDecision extends BalancerTestBase {
|
||||
public class TestBalancerDecision extends StochasticBalancerTestBase {
|
||||
|
||||
@ClassRule
|
||||
public static final HBaseClassTestRule CLASS_RULE =
|
||||
|
|
|
@ -59,7 +59,7 @@ import org.junit.experimental.categories.Category;
|
|||
import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils;
|
||||
|
||||
@Category({ MasterTests.class, MediumTests.class })
|
||||
public class TestStochasticLoadBalancer extends BalancerTestBase {
|
||||
public class TestStochasticLoadBalancer extends StochasticBalancerTestBase {
|
||||
|
||||
@ClassRule
|
||||
public static final HBaseClassTestRule CLASS_RULE =
|
||||
|
|
|
@ -35,7 +35,7 @@ import org.slf4j.Logger;
|
|||
import org.slf4j.LoggerFactory;
|
||||
|
||||
@Category({ MasterTests.class, LargeTests.class })
|
||||
public class TestStochasticLoadBalancerBalanceCluster extends BalancerTestBase {
|
||||
public class TestStochasticLoadBalancerBalanceCluster extends StochasticBalancerTestBase {
|
||||
|
||||
@ClassRule
|
||||
public static final HBaseClassTestRule CLASS_RULE =
|
||||
|
|
|
@ -47,7 +47,7 @@ import org.slf4j.Logger;
|
|||
import org.slf4j.LoggerFactory;
|
||||
|
||||
@Category({ MasterTests.class, MediumTests.class })
|
||||
public class TestStochasticLoadBalancerHeterogeneousCost extends BalancerTestBase {
|
||||
public class TestStochasticLoadBalancerHeterogeneousCost extends StochasticBalancerTestBase {
|
||||
@ClassRule
|
||||
public static final HBaseClassTestRule CLASS_RULE =
|
||||
HBaseClassTestRule.forClass(TestStochasticLoadBalancerHeterogeneousCost.class);
|
||||
|
@ -60,23 +60,23 @@ public class TestStochasticLoadBalancerHeterogeneousCost extends BalancerTestBas
|
|||
|
||||
@BeforeClass
|
||||
public static void beforeAllTests() throws IOException {
|
||||
BalancerTestBase.conf = HTU.getConfiguration();
|
||||
BalancerTestBase.conf.setFloat("hbase.master.balancer.stochastic.regionCountCost", 0);
|
||||
BalancerTestBase.conf.setFloat("hbase.master.balancer.stochastic.primaryRegionCountCost", 0);
|
||||
BalancerTestBase.conf.setFloat("hbase.master.balancer.stochastic.tableSkewCost", 0);
|
||||
BalancerTestBase.conf.setBoolean("hbase.master.balancer.stochastic.runMaxSteps", true);
|
||||
BalancerTestBase.conf.set(StochasticLoadBalancer.COST_FUNCTIONS_COST_FUNCTIONS_KEY,
|
||||
conf = HTU.getConfiguration();
|
||||
conf.setFloat("hbase.master.balancer.stochastic.regionCountCost", 0);
|
||||
conf.setFloat("hbase.master.balancer.stochastic.primaryRegionCountCost", 0);
|
||||
conf.setFloat("hbase.master.balancer.stochastic.tableSkewCost", 0);
|
||||
conf.setBoolean("hbase.master.balancer.stochastic.runMaxSteps", true);
|
||||
conf.set(StochasticLoadBalancer.COST_FUNCTIONS_COST_FUNCTIONS_KEY,
|
||||
HeterogeneousRegionCountCostFunction.class.getName());
|
||||
// Need to ensure test dir has been created.
|
||||
assertTrue(FileSystem.get(HTU.getConfiguration()).mkdirs(HTU.getDataTestDir()));
|
||||
RULES_FILE = HTU.getDataTestDir(
|
||||
TestStochasticLoadBalancerHeterogeneousCostRules.DEFAULT_RULES_FILE_NAME).toString();
|
||||
BalancerTestBase.conf.set(
|
||||
conf.set(
|
||||
HeterogeneousRegionCountCostFunction.HBASE_MASTER_BALANCER_HETEROGENEOUS_RULES_FILE,
|
||||
RULES_FILE);
|
||||
BalancerTestBase.loadBalancer = new StochasticLoadBalancer();
|
||||
BalancerTestBase.loadBalancer.setConf(BalancerTestBase.conf);
|
||||
BalancerTestBase.loadBalancer.getCandidateGenerators().add(new FairRandomCandidateGenerator());
|
||||
loadBalancer = new StochasticLoadBalancer();
|
||||
loadBalancer.setConf(BalancerTestBase.conf);
|
||||
loadBalancer.getCandidateGenerators().add(new FairRandomCandidateGenerator());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -141,8 +141,6 @@ public class TestStochasticLoadBalancerHeterogeneousCost extends BalancerTestBas
|
|||
|
||||
@Test
|
||||
public void testOverloaded() throws IOException {
|
||||
final List<String> rules = Collections.singletonList("rs[0-1] 50");
|
||||
|
||||
final int numNodes = 2;
|
||||
final int numRegions = 120;
|
||||
final int numRegionsPerServer = 60;
|
||||
|
@ -151,7 +149,7 @@ public class TestStochasticLoadBalancerHeterogeneousCost extends BalancerTestBas
|
|||
final Map<ServerName, List<RegionInfo>> serverMap =
|
||||
this.createServerMap(numNodes, numRegions, numRegionsPerServer, 1, 1);
|
||||
final List<RegionPlan> plans =
|
||||
BalancerTestBase.loadBalancer.balanceTable(HConstants.ENSEMBLE_TABLE_NAME, serverMap);
|
||||
loadBalancer.balanceTable(HConstants.ENSEMBLE_TABLE_NAME, serverMap);
|
||||
// As we disabled all the other cost functions, balancing only according to
|
||||
// the heterogeneous cost function should return nothing.
|
||||
assertNull(plans);
|
||||
|
@ -172,11 +170,11 @@ public class TestStochasticLoadBalancerHeterogeneousCost extends BalancerTestBas
|
|||
final List<ServerAndLoad> list = this.convertToList(serverMap);
|
||||
LOG.info("Mock Cluster : " + this.printMock(list) + " " + this.printStats(list));
|
||||
|
||||
BalancerTestBase.loadBalancer.setRackManager(rackManager);
|
||||
loadBalancer.setRackManager(rackManager);
|
||||
|
||||
// Run the balancer.
|
||||
final List<RegionPlan> plans =
|
||||
BalancerTestBase.loadBalancer.balanceTable(HConstants.ENSEMBLE_TABLE_NAME, serverMap);
|
||||
loadBalancer.balanceTable(HConstants.ENSEMBLE_TABLE_NAME, serverMap);
|
||||
assertNotNull(plans);
|
||||
|
||||
// Check to see that this actually got to a stable place.
|
||||
|
@ -189,7 +187,7 @@ public class TestStochasticLoadBalancerHeterogeneousCost extends BalancerTestBas
|
|||
|
||||
if (assertFullyBalanced) {
|
||||
final List<RegionPlan> secondPlans =
|
||||
BalancerTestBase.loadBalancer.balanceTable(HConstants.ENSEMBLE_TABLE_NAME, serverMap);
|
||||
loadBalancer.balanceTable(HConstants.ENSEMBLE_TABLE_NAME, serverMap);
|
||||
assertNull(secondPlans);
|
||||
|
||||
// create external cost function to retrieve limit
|
||||
|
|
|
@ -39,10 +39,8 @@ import org.junit.Test;
|
|||
import org.junit.experimental.categories.Category;
|
||||
import org.junit.rules.TestName;
|
||||
|
||||
import static junit.framework.TestCase.assertTrue;
|
||||
|
||||
@Category({ MasterTests.class, MediumTests.class })
|
||||
public class TestStochasticLoadBalancerHeterogeneousCostRules extends BalancerTestBase {
|
||||
public class TestStochasticLoadBalancerHeterogeneousCostRules extends StochasticBalancerTestBase {
|
||||
@ClassRule
|
||||
public static final HBaseClassTestRule CLASS_RULE =
|
||||
HBaseClassTestRule.forClass(TestStochasticLoadBalancerHeterogeneousCostRules.class);
|
||||
|
|
|
@ -25,7 +25,7 @@ import org.junit.Test;
|
|||
import org.junit.experimental.categories.Category;
|
||||
|
||||
@Category({ MasterTests.class, MediumTests.class })
|
||||
public class TestStochasticLoadBalancerLargeCluster extends BalancerTestBase {
|
||||
public class TestStochasticLoadBalancerLargeCluster extends StochasticBalancerTestBase {
|
||||
|
||||
@ClassRule
|
||||
public static final HBaseClassTestRule CLASS_RULE =
|
||||
|
|
|
@ -25,7 +25,7 @@ import org.junit.Test;
|
|||
import org.junit.experimental.categories.Category;
|
||||
|
||||
@Category({ MasterTests.class, LargeTests.class })
|
||||
public class TestStochasticLoadBalancerMidCluster extends BalancerTestBase {
|
||||
public class TestStochasticLoadBalancerMidCluster extends StochasticBalancerTestBase {
|
||||
|
||||
@ClassRule
|
||||
public static final HBaseClassTestRule CLASS_RULE =
|
||||
|
|
|
@ -42,7 +42,7 @@ import org.junit.Test;
|
|||
import org.junit.experimental.categories.Category;
|
||||
|
||||
@Category({ MasterTests.class, LargeTests.class })
|
||||
public class TestStochasticLoadBalancerRegionReplica extends BalancerTestBase {
|
||||
public class TestStochasticLoadBalancerRegionReplica extends StochasticBalancerTestBase {
|
||||
|
||||
@ClassRule
|
||||
public static final HBaseClassTestRule CLASS_RULE =
|
||||
|
|
|
@ -25,7 +25,8 @@ import org.junit.Test;
|
|||
import org.junit.experimental.categories.Category;
|
||||
|
||||
@Category({ MasterTests.class, MediumTests.class })
|
||||
public class TestStochasticLoadBalancerRegionReplicaHighReplication extends BalancerTestBase2 {
|
||||
public class TestStochasticLoadBalancerRegionReplicaHighReplication
|
||||
extends StochasticBalancerTestBase2 {
|
||||
|
||||
@ClassRule
|
||||
public static final HBaseClassTestRule CLASS_RULE =
|
||||
|
|
|
@ -25,7 +25,8 @@ import org.junit.Test;
|
|||
import org.junit.experimental.categories.Category;
|
||||
|
||||
@Category({ MasterTests.class, LargeTests.class })
|
||||
public class TestStochasticLoadBalancerRegionReplicaLargeCluster extends BalancerTestBase2 {
|
||||
public class TestStochasticLoadBalancerRegionReplicaLargeCluster
|
||||
extends StochasticBalancerTestBase2 {
|
||||
|
||||
@ClassRule
|
||||
public static final HBaseClassTestRule CLASS_RULE =
|
||||
|
|
|
@ -25,7 +25,7 @@ import org.junit.Test;
|
|||
import org.junit.experimental.categories.Category;
|
||||
|
||||
@Category({ MasterTests.class, LargeTests.class })
|
||||
public class TestStochasticLoadBalancerRegionReplicaMidCluster extends BalancerTestBase2 {
|
||||
public class TestStochasticLoadBalancerRegionReplicaMidCluster extends StochasticBalancerTestBase2 {
|
||||
|
||||
@ClassRule
|
||||
public static final HBaseClassTestRule CLASS_RULE =
|
||||
|
|
|
@ -26,7 +26,7 @@ import org.junit.experimental.categories.Category;
|
|||
|
||||
@Category({ MasterTests.class, LargeTests.class })
|
||||
public class TestStochasticLoadBalancerRegionReplicaReplicationGreaterThanNumNodes
|
||||
extends BalancerTestBase2 {
|
||||
extends StochasticBalancerTestBase2 {
|
||||
|
||||
@ClassRule
|
||||
public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule
|
||||
|
|
|
@ -31,7 +31,7 @@ import org.junit.Test;
|
|||
import org.junit.experimental.categories.Category;
|
||||
|
||||
@Category({ MasterTests.class, MediumTests.class })
|
||||
public class TestStochasticLoadBalancerRegionReplicaSameHosts extends BalancerTestBase {
|
||||
public class TestStochasticLoadBalancerRegionReplicaSameHosts extends StochasticBalancerTestBase {
|
||||
|
||||
@ClassRule
|
||||
public static final HBaseClassTestRule CLASS_RULE =
|
||||
|
|
|
@ -30,7 +30,7 @@ import org.junit.Test;
|
|||
import org.junit.experimental.categories.Category;
|
||||
|
||||
@Category({ MasterTests.class, LargeTests.class })
|
||||
public class TestStochasticLoadBalancerRegionReplicaWithRacks extends BalancerTestBase {
|
||||
public class TestStochasticLoadBalancerRegionReplicaWithRacks extends StochasticBalancerTestBase {
|
||||
|
||||
@ClassRule
|
||||
public static final HBaseClassTestRule CLASS_RULE =
|
||||
|
|
|
@ -25,7 +25,7 @@ import org.junit.Test;
|
|||
import org.junit.experimental.categories.Category;
|
||||
|
||||
@Category({ MasterTests.class, MediumTests.class })
|
||||
public class TestStochasticLoadBalancerSmallCluster extends BalancerTestBase {
|
||||
public class TestStochasticLoadBalancerSmallCluster extends StochasticBalancerTestBase {
|
||||
|
||||
@ClassRule
|
||||
public static final HBaseClassTestRule CLASS_RULE =
|
||||
|
|
7
pom.xml
7
pom.xml
|
@ -1927,6 +1927,13 @@
|
|||
<groupId>org.apache.hbase</groupId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<artifactId>hbase-balancer</artifactId>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<version>${project.version}</version>
|
||||
<type>test-jar</type>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<artifactId>hbase-http</artifactId>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
|
|
Loading…
Reference in New Issue