HBASE-8390 Trunk/0.95 cannot simply compile against Hadoop 1.0
HBASE-8391 StochasticLoadBalancer doesn't call needsBalance git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1471050 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
e5ea7fb8b3
commit
0aadc0b0f4
|
@ -375,6 +375,34 @@ checkHadoop20Compile () {
|
|||
return 0
|
||||
}
|
||||
|
||||
###############################################################################
|
||||
### Attempt to compile against the hadoop 1.0
|
||||
checkHadoop10Compile () {
|
||||
echo ""
|
||||
echo ""
|
||||
echo "======================================================================"
|
||||
echo "======================================================================"
|
||||
echo " Checking against hadoop 1.0 build"
|
||||
echo "======================================================================"
|
||||
echo "======================================================================"
|
||||
echo ""
|
||||
echo ""
|
||||
|
||||
export MAVEN_OPTS="${MAVEN_OPTS}"
|
||||
# build core and tests
|
||||
$MVN clean test help:active-profiles -X -DskipTests -Dhadoop.profile=1.0 -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/trunk1.0JavacWarnings.txt 2>&1
|
||||
if [[ $? != 0 ]] ; then
|
||||
JIRA_COMMENT="$JIRA_COMMENT
|
||||
|
||||
{color:red}-1 hadoop1.0{color}. The patch failed to compile against the hadoop 1.0 profile."
|
||||
cleanupAndExit 1
|
||||
fi
|
||||
JIRA_COMMENT="$JIRA_COMMENT
|
||||
|
||||
{color:green}+1 hadoop1.0{color}. The patch compiles against the hadoop 1.0 profile."
|
||||
return 0
|
||||
}
|
||||
|
||||
|
||||
###############################################################################
|
||||
### Check there are no javadoc warnings
|
||||
|
@ -836,6 +864,8 @@ if [[ $? != 0 ]] ; then
|
|||
cleanupAndExit 1
|
||||
fi
|
||||
|
||||
checkHadoop10Compile
|
||||
(( RESULT = RESULT + $? ))
|
||||
checkHadoop20Compile
|
||||
(( RESULT = RESULT + $? ))
|
||||
checkJavadocWarnings
|
||||
|
|
|
@ -211,11 +211,11 @@
|
|||
</properties>
|
||||
</profile>
|
||||
|
||||
<!-- profile against Hadoop 1.0.x: This is the default. It has to have the same
|
||||
activation property as the parent Hadoop 1.0.x profile to make sure it gets run at
|
||||
<!-- profile against Hadoop 1.1.x: This is the default. It has to have the same
|
||||
activation property as the parent Hadoop 1.1.x profile to make sure it gets run at
|
||||
the same time. -->
|
||||
<profile>
|
||||
<id>hadoop-1.0</id>
|
||||
<id>hadoop-1.1</id>
|
||||
<activation>
|
||||
<property>
|
||||
<name>!hadoop.profile</name>
|
||||
|
@ -229,6 +229,29 @@
|
|||
</dependencies>
|
||||
</profile>
|
||||
|
||||
<!-- profile against Hadoop 1.0.x:
|
||||
mvn -Dhadoop.profile=1.0
|
||||
-->
|
||||
<profile>
|
||||
<id>hadoop-1.0</id>
|
||||
<activation>
|
||||
<property>
|
||||
<name>hadoop.profile</name>
|
||||
<value>1.0</value>
|
||||
</property>
|
||||
</activation>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-core</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-io</groupId>
|
||||
<artifactId>commons-io</artifactId>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</profile>
|
||||
|
||||
<!--
|
||||
profile for building against Hadoop 2.0.0-alpha. Activate using:
|
||||
mvn -Dhadoop.profile=2.0
|
||||
|
|
|
@ -110,11 +110,11 @@
|
|||
<!-- Profiles for building against different hadoop versions -->
|
||||
<!-- There are a lot of common dependencies used here, should investigate
|
||||
if we can combine these profiles somehow -->
|
||||
<!-- profile against Hadoop 1.0.x: This is the default. It has to have the same
|
||||
activation property as the parent Hadoop 1.0.x profile to make sure it gets run at
|
||||
<!-- profile against Hadoop 1.1.x: This is the default. It has to have the same
|
||||
activation property as the parent Hadoop 1.1.x profile to make sure it gets run at
|
||||
the same time. -->
|
||||
<profile>
|
||||
<id>hadoop-1.0</id>
|
||||
<id>hadoop-1.1</id>
|
||||
<activation>
|
||||
<property>
|
||||
<name>!hadoop.profile</name>
|
||||
|
|
|
@ -186,11 +186,11 @@
|
|||
</properties>
|
||||
</profile>
|
||||
|
||||
<!-- profile against Hadoop 1.0.x: This is the default. It has to have the same
|
||||
activation property as the parent Hadoop 1.0.x profile to make sure it gets run at
|
||||
<!-- profile against Hadoop 1.1.x: This is the default. It has to have the same
|
||||
activation property as the parent Hadoop 1.1.x profile to make sure it gets run at
|
||||
the same time. -->
|
||||
<profile>
|
||||
<id>hadoop-1.0</id>
|
||||
<id>hadoop-1.1</id>
|
||||
<activation>
|
||||
<property>
|
||||
<name>!hadoop.profile</name>
|
||||
|
|
|
@ -564,14 +564,33 @@
|
|||
<!-- Profiles for building against different hadoop versions -->
|
||||
<!-- There are a lot of common dependencies used here, should investigate
|
||||
if we can combine these profiles somehow -->
|
||||
<!-- profile against Hadoop 1.0.x: This is the default. It has to have the same
|
||||
activation property as the parent Hadoop 1.0.x profile to make sure it gets run at
|
||||
<!-- profile against Hadoop 1.1.x: This is the default. It has to have the same
|
||||
activation property as the parent Hadoop 1.1.x profile to make sure it gets run at
|
||||
the same time. -->
|
||||
<profile>
|
||||
<id>hadoop-1.1</id>
|
||||
<activation>
|
||||
<property>
|
||||
<name>!hadoop.profile</name>
|
||||
</property>
|
||||
</activation>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-core</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-test</artifactId>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</profile>
|
||||
<profile>
|
||||
<id>hadoop-1.0</id>
|
||||
<activation>
|
||||
<property>
|
||||
<name>!hadoop.profile</name>
|
||||
<name>hadoop.profile</name>
|
||||
<value>1.0</value>
|
||||
</property>
|
||||
</activation>
|
||||
<dependencies>
|
||||
|
|
|
@ -25,6 +25,7 @@ import java.util.Map.Entry;
|
|||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
import java.util.TreeMap;
|
||||
import java.util.NavigableMap;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
@ -294,13 +295,25 @@ public abstract class BaseLoadBalancer implements LoadBalancer {
|
|||
}
|
||||
|
||||
protected boolean needsBalance(ClusterLoadState cs) {
|
||||
if (cs.getNumServers() == 0) {
|
||||
LOG.debug("numServers=0 so skipping load balancing");
|
||||
return false;
|
||||
}
|
||||
// Check if we even need to do any load balancing
|
||||
float average = cs.getLoadAverage(); // for logging
|
||||
// HBASE-3681 check sloppiness first
|
||||
float average = cs.getLoadAverage(); // for logging
|
||||
int floor = (int) Math.floor(average * (1 - slop));
|
||||
int ceiling = (int) Math.ceil(average * (1 + slop));
|
||||
|
||||
return cs.getMinLoad() > ceiling || cs.getMaxLoad() < floor;
|
||||
if (!(cs.getMinLoad() > ceiling || cs.getMaxLoad() < floor)) {
|
||||
NavigableMap<ServerAndLoad, List<HRegionInfo>> serversByLoad = cs.getServersByLoad();
|
||||
LOG.info("Skipping load balancing because balanced cluster; " +
|
||||
"servers=" + cs.getNumServers() + " " +
|
||||
"regions=" + cs.getNumRegions() + " average=" + average + " " +
|
||||
"mostloaded=" + serversByLoad.lastKey().getLoad() +
|
||||
" leastloaded=" + serversByLoad.firstKey().getLoad());
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -183,29 +183,13 @@ public class DefaultLoadBalancer extends BaseLoadBalancer {
|
|||
boolean emptyRegionServerPresent = false;
|
||||
long startTime = System.currentTimeMillis();
|
||||
|
||||
|
||||
ClusterLoadState cs = new ClusterLoadState(clusterMap);
|
||||
|
||||
if (!this.needsBalance(cs)) return null;
|
||||
|
||||
int numServers = cs.getNumServers();
|
||||
if (numServers == 0) {
|
||||
LOG.debug("numServers=0 so skipping load balancing");
|
||||
return null;
|
||||
}
|
||||
NavigableMap<ServerAndLoad, List<HRegionInfo>> serversByLoad = cs.getServersByLoad();
|
||||
|
||||
int numRegions = cs.getNumRegions();
|
||||
|
||||
if (!this.needsBalance(cs)) {
|
||||
// Skipped because no server outside (min,max) range
|
||||
float average = cs.getLoadAverage(); // for logging
|
||||
LOG.info("Skipping load balancing because balanced cluster; " +
|
||||
"servers=" + numServers + " " +
|
||||
"regions=" + numRegions + " average=" + average + " " +
|
||||
"mostloaded=" + serversByLoad.lastKey().getLoad() +
|
||||
" leastloaded=" + serversByLoad.firstKey().getLoad());
|
||||
return null;
|
||||
}
|
||||
|
||||
int min = numRegions / numServers;
|
||||
int max = numRegions % numServers == 0 ? min : min + 1;
|
||||
|
||||
|
|
|
@ -116,7 +116,7 @@ public class StochasticLoadBalancer extends BaseLoadBalancer {
|
|||
// values are defaults
|
||||
private int maxSteps = 15000;
|
||||
private int stepsPerRegion = 110;
|
||||
private long maxRunningTime = 1 * 60 * 1000; //5 min
|
||||
private long maxRunningTime = 60 * 1000; //1 min
|
||||
private int maxMoves = 600;
|
||||
private int numRegionLoadsToRemember = 15;
|
||||
private float loadMultiplier = 100;
|
||||
|
@ -179,10 +179,8 @@ public class StochasticLoadBalancer extends BaseLoadBalancer {
|
|||
*/
|
||||
@Override
|
||||
public List<RegionPlan> balanceCluster(Map<ServerName, List<HRegionInfo>> clusterState) {
|
||||
|
||||
// No need to balance a one node cluster.
|
||||
if (clusterState.size() <= 1) {
|
||||
LOG.debug("Skipping load balance as cluster has only one node.");
|
||||
|
||||
if (!needsBalance(new ClusterLoadState(clusterState))) {
|
||||
return null;
|
||||
}
|
||||
|
||||
|
@ -242,7 +240,7 @@ public class StochasticLoadBalancer extends BaseLoadBalancer {
|
|||
List<RegionPlan> plans = createRegionPlans(cluster);
|
||||
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Finished computing new laod balance plan. Computation took "
|
||||
LOG.debug("Finished computing new load balance plan. Computation took "
|
||||
+ (endTime - startTime) + "ms to try " + step
|
||||
+ " different iterations. Found a solution that moves " + plans.size()
|
||||
+ " regions; Going from a computed cost of " + initCost + " to a new cost of "
|
||||
|
|
66
pom.xml
66
pom.xml
|
@ -1410,9 +1410,9 @@
|
|||
profiles with activation properties matching the profile here.
|
||||
Generally, it should be sufficient to copy the first
|
||||
few lines of the profile you want to match. -->
|
||||
<!-- profile against Hadoop 1.0.x: This is the default. -->
|
||||
<!-- profile against Hadoop 1.1.x: This is the default. -->
|
||||
<profile>
|
||||
<id>hadoop-1.0</id>
|
||||
<id>hadoop-1.1</id>
|
||||
<activation>
|
||||
<property>
|
||||
<name>!hadoop.profile</name>
|
||||
|
@ -1467,6 +1467,68 @@
|
|||
</dependencies>
|
||||
</dependencyManagement>
|
||||
</profile>
|
||||
|
||||
<!-- profile for building against Hadoop 1.0.x: -->
|
||||
<profile>
|
||||
<id>hadoop-1.0</id>
|
||||
<activation>
|
||||
<property>
|
||||
<name>hadoop.profile</name>
|
||||
<value>1.0</value>
|
||||
</property>
|
||||
</activation>
|
||||
<modules>
|
||||
<module>hbase-hadoop1-compat</module>
|
||||
</modules>
|
||||
<properties>
|
||||
<hadoop.version>1.0.4</hadoop.version>
|
||||
<!-- Need to set this for the Hadoop 1 compat module -->
|
||||
<hadoop-one.version>${hadoop.version}</hadoop-one.version>
|
||||
<slf4j.version>1.4.3</slf4j.version>
|
||||
<compat.module>hbase-hadoop1-compat</compat.module>
|
||||
<assembly.file>src/main/assembly/hadoop-one-compat.xml</assembly.file>
|
||||
</properties>
|
||||
<dependencyManagement>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-core</artifactId>
|
||||
<version>${hadoop.version}</version>
|
||||
<optional>true</optional>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>hsqldb</groupId>
|
||||
<artifactId>hsqldb</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>net.sf.kosmosfs</groupId>
|
||||
<artifactId>kfs</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.eclipse.jdt</groupId>
|
||||
<artifactId>core</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>net.java.dev.jets3t</groupId>
|
||||
<artifactId>jets3t</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>oro</groupId>
|
||||
<artifactId>oro</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-test</artifactId>
|
||||
<version>${hadoop.version}</version>
|
||||
<optional>true</optional>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</dependencyManagement>
|
||||
</profile>
|
||||
|
||||
<!-- profile for building against Hadoop 2.0.x
|
||||
Activate using: mvn -Dhadoop.profile=2.0 -->
|
||||
<profile>
|
||||
|
|
Loading…
Reference in New Issue