HBASE-5064 utilize surefire tests parallelization (N Keywal)
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1226083 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
10e6defda5
commit
d499eedb3c
|
@ -567,9 +567,9 @@ runTests () {
|
||||||
failed_tests=""
|
failed_tests=""
|
||||||
### Kill any rogue build processes from the last attempt
|
### Kill any rogue build processes from the last attempt
|
||||||
$PS auxwww | $GREP ${PROJECT_NAME}PatchProcess | $AWK '{print $2}' | /usr/bin/xargs -t -I {} /bin/kill -9 {} > /dev/null
|
$PS auxwww | $GREP ${PROJECT_NAME}PatchProcess | $AWK '{print $2}' | /usr/bin/xargs -t -I {} /bin/kill -9 {} > /dev/null
|
||||||
echo "$MVN clean test -D${PROJECT_NAME}PatchProcess"
|
echo "$MVN clean test -P runAllTests -D${PROJECT_NAME}PatchProcess -Dsurefire.secondPartThreadCount=4"
|
||||||
ulimit -a
|
ulimit -a
|
||||||
$MVN clean test -P runAllTests -D${PROJECT_NAME}PatchProcess
|
$MVN clean test -P runAllTests -D${PROJECT_NAME}PatchProcess -Dsurefire.secondPartThreadCount=4
|
||||||
if [[ $? != 0 ]] ; then
|
if [[ $? != 0 ]] ; then
|
||||||
### Find and format names of failed tests
|
### Find and format names of failed tests
|
||||||
failed_tests=`find . -name 'TEST*.xml' | xargs $GREP -l -E "<failure|<error" | sed -e "s|.*target/surefire-reports/TEST-| |g" | sed -e "s|\.xml||g"`
|
failed_tests=`find . -name 'TEST*.xml' | xargs $GREP -l -E "<failure|<error" | sed -e "s|.*target/surefire-reports/TEST-| |g" | sed -e "s|\.xml||g"`
|
||||||
|
|
15
pom.xml
15
pom.xml
|
@ -627,6 +627,7 @@
|
||||||
<parallel>${surefire.firstPartParallel}</parallel>
|
<parallel>${surefire.firstPartParallel}</parallel>
|
||||||
<perCoreThreadCount>false</perCoreThreadCount>
|
<perCoreThreadCount>false</perCoreThreadCount>
|
||||||
<threadCount>${surefire.firstPartThreadCount}</threadCount>
|
<threadCount>${surefire.firstPartThreadCount}</threadCount>
|
||||||
|
<parallel>classes</parallel><!-- surefire hack, if not we're using method parallelisation class !-->
|
||||||
<groups>${surefire.firstPartGroups}</groups>
|
<groups>${surefire.firstPartGroups}</groups>
|
||||||
<testFailureIgnore>false</testFailureIgnore>
|
<testFailureIgnore>false</testFailureIgnore>
|
||||||
</configuration>
|
</configuration>
|
||||||
|
@ -638,8 +639,10 @@
|
||||||
<configuration>
|
<configuration>
|
||||||
<skip>${surefire.skipSecondPart}</skip>
|
<skip>${surefire.skipSecondPart}</skip>
|
||||||
<testFailureIgnore>false</testFailureIgnore>
|
<testFailureIgnore>false</testFailureIgnore>
|
||||||
<forkMode>always</forkMode>
|
<forkMode>perThread</forkMode>
|
||||||
<parallel>none</parallel>
|
<perCoreThreadCount>false</perCoreThreadCount>
|
||||||
|
<threadCount>${surefire.secondPartThreadCount}</threadCount>
|
||||||
|
<parallel>classes</parallel><!-- surefire hack, if not we're using method parallelisation class !-->
|
||||||
<groups>${surefire.secondPartGroups}</groups>
|
<groups>${surefire.secondPartGroups}</groups>
|
||||||
</configuration>
|
</configuration>
|
||||||
</execution>
|
</execution>
|
||||||
|
@ -901,16 +904,17 @@
|
||||||
<unittest.include>**/Test*.java</unittest.include>
|
<unittest.include>**/Test*.java</unittest.include>
|
||||||
<integrationtest.include>**/IntegrationTest*.java</integrationtest.include>
|
<integrationtest.include>**/IntegrationTest*.java</integrationtest.include>
|
||||||
|
|
||||||
<surefire.version>2.11-TRUNK-HBASE-2</surefire.version>
|
<surefire.version>2.12-TRUNK-HBASE-2</surefire.version>
|
||||||
<surefire.provider>surefire-junit47</surefire.provider>
|
<surefire.provider>surefire-junit47</surefire.provider>
|
||||||
|
|
||||||
<!-- default: run small & medium -->
|
<!-- default: run small & medium, medium with 2 threads -->
|
||||||
<surefire.skipFirstPart>false</surefire.skipFirstPart>
|
<surefire.skipFirstPart>false</surefire.skipFirstPart>
|
||||||
<surefire.skipSecondPart>false</surefire.skipSecondPart>
|
<surefire.skipSecondPart>false</surefire.skipSecondPart>
|
||||||
|
|
||||||
<surefire.firstPartForkMode>once</surefire.firstPartForkMode>
|
<surefire.firstPartForkMode>once</surefire.firstPartForkMode>
|
||||||
<surefire.firstPartParallel>none</surefire.firstPartParallel>
|
<surefire.firstPartParallel>classes</surefire.firstPartParallel>
|
||||||
<surefire.firstPartThreadCount>1</surefire.firstPartThreadCount>
|
<surefire.firstPartThreadCount>1</surefire.firstPartThreadCount>
|
||||||
|
<surefire.secondPartThreadCount>2</surefire.secondPartThreadCount>
|
||||||
|
|
||||||
<surefire.firstPartGroups>org.apache.hadoop.hbase.SmallTests</surefire.firstPartGroups>
|
<surefire.firstPartGroups>org.apache.hadoop.hbase.SmallTests</surefire.firstPartGroups>
|
||||||
<surefire.secondPartGroups>org.apache.hadoop.hbase.MediumTests</surefire.secondPartGroups>
|
<surefire.secondPartGroups>org.apache.hadoop.hbase.MediumTests</surefire.secondPartGroups>
|
||||||
|
@ -2000,6 +2004,7 @@
|
||||||
<surefire.firstPartForkMode>once</surefire.firstPartForkMode>
|
<surefire.firstPartForkMode>once</surefire.firstPartForkMode>
|
||||||
<surefire.firstPartParallel>none</surefire.firstPartParallel>
|
<surefire.firstPartParallel>none</surefire.firstPartParallel>
|
||||||
<surefire.firstPartThreadCount>1</surefire.firstPartThreadCount>
|
<surefire.firstPartThreadCount>1</surefire.firstPartThreadCount>
|
||||||
|
<surefire.secondPartThreadCount>2</surefire.secondPartThreadCount>
|
||||||
|
|
||||||
<surefire.skipFirstPart>false</surefire.skipFirstPart>
|
<surefire.skipFirstPart>false</surefire.skipFirstPart>
|
||||||
<surefire.skipSecondPart>false</surefire.skipSecondPart>
|
<surefire.skipSecondPart>false</surefire.skipSecondPart>
|
||||||
|
|
|
@ -262,6 +262,11 @@ public class HBaseTestingUtility {
|
||||||
* instances -- another instance could grab the temporary
|
* instances -- another instance could grab the temporary
|
||||||
* value unintentionally -- but not anything can do about it at moment;
|
* value unintentionally -- but not anything can do about it at moment;
|
||||||
* single instance only is how the minidfscluster works.
|
* single instance only is how the minidfscluster works.
|
||||||
|
*
|
||||||
|
* We also create the underlying directory for
|
||||||
|
* hadoop.log.dir, mapred.local.dir and hadoop.tmp.dir, and set the values
|
||||||
|
* in the conf, and as a system property for hadoop.tmp.dir
|
||||||
|
*
|
||||||
* @return The calculated data test build directory.
|
* @return The calculated data test build directory.
|
||||||
*/
|
*/
|
||||||
private void setupDataTestDir() {
|
private void setupDataTestDir() {
|
||||||
|
@ -272,13 +277,62 @@ public class HBaseTestingUtility {
|
||||||
}
|
}
|
||||||
|
|
||||||
String randomStr = UUID.randomUUID().toString();
|
String randomStr = UUID.randomUUID().toString();
|
||||||
Path testPath= new Path(
|
Path testPath= new Path(getBaseTestDir(), randomStr);
|
||||||
getBaseTestDir(),
|
|
||||||
randomStr
|
|
||||||
);
|
|
||||||
|
|
||||||
dataTestDir = new File(testPath.toString()).getAbsoluteFile();
|
dataTestDir = new File(testPath.toString()).getAbsoluteFile();
|
||||||
dataTestDir.deleteOnExit();
|
dataTestDir.deleteOnExit();
|
||||||
|
|
||||||
|
createSubDirAndSystemProperty(
|
||||||
|
"hadoop.log.dir",
|
||||||
|
testPath, "hadoop-log-dir");
|
||||||
|
|
||||||
|
// This is defaulted in core-default.xml to /tmp/hadoop-${user.name}, but
|
||||||
|
// we want our own value to ensure uniqueness on the same machine
|
||||||
|
createSubDirAndSystemProperty(
|
||||||
|
"hadoop.tmp.dir",
|
||||||
|
testPath, "hadoop-tmp-dir");
|
||||||
|
|
||||||
|
// Read and modified in org.apache.hadoop.mapred.MiniMRCluster
|
||||||
|
createSubDir(
|
||||||
|
"mapred.local.dir",
|
||||||
|
testPath, "mapred-local-dir");
|
||||||
|
|
||||||
|
createSubDirAndSystemProperty(
|
||||||
|
"mapred.working.dir",
|
||||||
|
testPath, "mapred-working-dir");
|
||||||
|
}
|
||||||
|
|
||||||
|
private void createSubDir(String propertyName, Path parent, String subDirName){
|
||||||
|
Path newPath= new Path(parent, subDirName);
|
||||||
|
File newDir = new File(newPath.toString()).getAbsoluteFile();
|
||||||
|
newDir.deleteOnExit();
|
||||||
|
conf.set(propertyName, newDir.getAbsolutePath());
|
||||||
|
}
|
||||||
|
|
||||||
|
private void createSubDirAndSystemProperty(
|
||||||
|
String propertyName, Path parent, String subDirName){
|
||||||
|
|
||||||
|
String sysValue = System.getProperty(propertyName);
|
||||||
|
|
||||||
|
if (sysValue != null) {
|
||||||
|
// There is already a value set. So we do nothing but hope
|
||||||
|
// that there will be no conflicts
|
||||||
|
LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+
|
||||||
|
sysValue + " so I do NOT create it in "+dataTestDir.getAbsolutePath());
|
||||||
|
String confValue = conf.get(propertyName);
|
||||||
|
if (confValue != null && !confValue.endsWith(sysValue)){
|
||||||
|
LOG.warn(
|
||||||
|
propertyName + " property value differs in configuration and system: "+
|
||||||
|
"Configuration="+confValue+" while System="+sysValue+
|
||||||
|
" Erasing configuration value by system value."
|
||||||
|
);
|
||||||
|
}
|
||||||
|
conf.set(propertyName, sysValue);
|
||||||
|
} else {
|
||||||
|
// Ok, it's not set, so we create it as a subdirectory
|
||||||
|
createSubDir(propertyName, parent, subDirName);
|
||||||
|
System.setProperty(propertyName, conf.get(propertyName));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1215,13 +1269,11 @@ public class HBaseTestingUtility {
|
||||||
public void startMiniMapReduceCluster(final int servers) throws IOException {
|
public void startMiniMapReduceCluster(final int servers) throws IOException {
|
||||||
LOG.info("Starting mini mapreduce cluster...");
|
LOG.info("Starting mini mapreduce cluster...");
|
||||||
// These are needed for the new and improved Map/Reduce framework
|
// These are needed for the new and improved Map/Reduce framework
|
||||||
Configuration c = getConfiguration();
|
conf.set("mapred.output.dir", conf.get("hadoop.tmp.dir"));
|
||||||
System.setProperty("hadoop.log.dir", c.get("hadoop.log.dir"));
|
|
||||||
c.set("mapred.output.dir", c.get("hadoop.tmp.dir"));
|
|
||||||
mrCluster = new MiniMRCluster(servers,
|
mrCluster = new MiniMRCluster(servers,
|
||||||
FileSystem.get(c).getUri().toString(), 1);
|
FileSystem.get(conf).getUri().toString(), 1);
|
||||||
LOG.info("Mini mapreduce cluster started");
|
LOG.info("Mini mapreduce cluster started");
|
||||||
c.set("mapred.job.tracker",
|
conf.set("mapred.job.tracker",
|
||||||
mrCluster.createJobConf().get("mapred.job.tracker"));
|
mrCluster.createJobConf().get("mapred.job.tracker"));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -80,10 +80,6 @@ public class TestTimeRangeMapRed {
|
||||||
|
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
public static void beforeClass() throws Exception {
|
public static void beforeClass() throws Exception {
|
||||||
System.setProperty("hadoop.log.dir",
|
|
||||||
UTIL.getConfiguration().get("hadoop.log.dir"));
|
|
||||||
UTIL.getConfiguration().set("mapred.output.dir",
|
|
||||||
UTIL.getConfiguration().get("hadoop.tmp.dir"));
|
|
||||||
UTIL.startMiniCluster();
|
UTIL.startMiniCluster();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -122,10 +122,6 @@
|
||||||
Keep the maximum filesize small so we split more often in tests.
|
Keep the maximum filesize small so we split more often in tests.
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
<property>
|
|
||||||
<name>hadoop.log.dir</name>
|
|
||||||
<value>${user.dir}/../logs</value>
|
|
||||||
</property>
|
|
||||||
<property>
|
<property>
|
||||||
<name>hbase.zookeeper.property.clientPort</name>
|
<name>hbase.zookeeper.property.clientPort</name>
|
||||||
<value>21818</value>
|
<value>21818</value>
|
||||||
|
|
Loading…
Reference in New Issue