HBASE-5064 revert - need to figure out which test caused surefire to report timeout

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1226101 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Zhihong Yu 2011-12-31 15:25:58 +00:00
parent 497b4e781d
commit 50d332606d
5 changed files with 24 additions and 73 deletions

View File

@ -567,9 +567,9 @@ runTests () {
failed_tests="" failed_tests=""
### Kill any rogue build processes from the last attempt ### Kill any rogue build processes from the last attempt
$PS auxwww | $GREP ${PROJECT_NAME}PatchProcess | $AWK '{print $2}' | /usr/bin/xargs -t -I {} /bin/kill -9 {} > /dev/null $PS auxwww | $GREP ${PROJECT_NAME}PatchProcess | $AWK '{print $2}' | /usr/bin/xargs -t -I {} /bin/kill -9 {} > /dev/null
echo "$MVN clean test -P runAllTests -D${PROJECT_NAME}PatchProcess -Dsurefire.secondPartThreadCount=4" echo "$MVN clean test -D${PROJECT_NAME}PatchProcess"
ulimit -a ulimit -a
$MVN clean test -P runAllTests -D${PROJECT_NAME}PatchProcess -Dsurefire.secondPartThreadCount=4 $MVN clean test -P runAllTests -D${PROJECT_NAME}PatchProcess
if [[ $? != 0 ]] ; then if [[ $? != 0 ]] ; then
### Find and format names of failed tests ### Find and format names of failed tests
failed_tests=`find . -name 'TEST*.xml' | xargs $GREP -l -E "<failure|<error" | sed -e "s|.*target/surefire-reports/TEST-| |g" | sed -e "s|\.xml||g"` failed_tests=`find . -name 'TEST*.xml' | xargs $GREP -l -E "<failure|<error" | sed -e "s|.*target/surefire-reports/TEST-| |g" | sed -e "s|\.xml||g"`

15
pom.xml
View File

@ -627,7 +627,6 @@
<parallel>${surefire.firstPartParallel}</parallel> <parallel>${surefire.firstPartParallel}</parallel>
<perCoreThreadCount>false</perCoreThreadCount> <perCoreThreadCount>false</perCoreThreadCount>
<threadCount>${surefire.firstPartThreadCount}</threadCount> <threadCount>${surefire.firstPartThreadCount}</threadCount>
<parallel>classes</parallel><!-- surefire hack, if not we're using method parallelisation class !-->
<groups>${surefire.firstPartGroups}</groups> <groups>${surefire.firstPartGroups}</groups>
<testFailureIgnore>false</testFailureIgnore> <testFailureIgnore>false</testFailureIgnore>
</configuration> </configuration>
@ -639,10 +638,8 @@
<configuration> <configuration>
<skip>${surefire.skipSecondPart}</skip> <skip>${surefire.skipSecondPart}</skip>
<testFailureIgnore>false</testFailureIgnore> <testFailureIgnore>false</testFailureIgnore>
<forkMode>perThread</forkMode> <forkMode>always</forkMode>
<perCoreThreadCount>false</perCoreThreadCount> <parallel>none</parallel>
<threadCount>${surefire.secondPartThreadCount}</threadCount>
<parallel>classes</parallel><!-- surefire hack, if not we're using method parallelisation class !-->
<groups>${surefire.secondPartGroups}</groups> <groups>${surefire.secondPartGroups}</groups>
</configuration> </configuration>
</execution> </execution>
@ -904,17 +901,16 @@
<unittest.include>**/Test*.java</unittest.include> <unittest.include>**/Test*.java</unittest.include>
<integrationtest.include>**/IntegrationTest*.java</integrationtest.include> <integrationtest.include>**/IntegrationTest*.java</integrationtest.include>
<surefire.version>2.12-TRUNK-HBASE-2</surefire.version> <surefire.version>2.11-TRUNK-HBASE-2</surefire.version>
<surefire.provider>surefire-junit47</surefire.provider> <surefire.provider>surefire-junit47</surefire.provider>
<!-- default: run small & medium, medium with 2 threads --> <!-- default: run small & medium -->
<surefire.skipFirstPart>false</surefire.skipFirstPart> <surefire.skipFirstPart>false</surefire.skipFirstPart>
<surefire.skipSecondPart>false</surefire.skipSecondPart> <surefire.skipSecondPart>false</surefire.skipSecondPart>
<surefire.firstPartForkMode>once</surefire.firstPartForkMode> <surefire.firstPartForkMode>once</surefire.firstPartForkMode>
<surefire.firstPartParallel>classes</surefire.firstPartParallel> <surefire.firstPartParallel>none</surefire.firstPartParallel>
<surefire.firstPartThreadCount>1</surefire.firstPartThreadCount> <surefire.firstPartThreadCount>1</surefire.firstPartThreadCount>
<surefire.secondPartThreadCount>2</surefire.secondPartThreadCount>
<surefire.firstPartGroups>org.apache.hadoop.hbase.SmallTests</surefire.firstPartGroups> <surefire.firstPartGroups>org.apache.hadoop.hbase.SmallTests</surefire.firstPartGroups>
<surefire.secondPartGroups>org.apache.hadoop.hbase.MediumTests</surefire.secondPartGroups> <surefire.secondPartGroups>org.apache.hadoop.hbase.MediumTests</surefire.secondPartGroups>
@ -2004,7 +2000,6 @@
<surefire.firstPartForkMode>once</surefire.firstPartForkMode> <surefire.firstPartForkMode>once</surefire.firstPartForkMode>
<surefire.firstPartParallel>none</surefire.firstPartParallel> <surefire.firstPartParallel>none</surefire.firstPartParallel>
<surefire.firstPartThreadCount>1</surefire.firstPartThreadCount> <surefire.firstPartThreadCount>1</surefire.firstPartThreadCount>
<surefire.secondPartThreadCount>2</surefire.secondPartThreadCount>
<surefire.skipFirstPart>false</surefire.skipFirstPart> <surefire.skipFirstPart>false</surefire.skipFirstPart>
<surefire.skipSecondPart>false</surefire.skipSecondPart> <surefire.skipSecondPart>false</surefire.skipSecondPart>

View File

@ -262,11 +262,6 @@ public class HBaseTestingUtility {
* instances -- another instance could grab the temporary * instances -- another instance could grab the temporary
* value unintentionally -- but not anything can do about it at moment; * value unintentionally -- but not anything can do about it at moment;
* single instance only is how the minidfscluster works. * single instance only is how the minidfscluster works.
*
* We also create the underlying directory for
* hadoop.log.dir, mapred.local.dir and hadoop.tmp.dir, and set the values
* in the conf, and as a system property for hadoop.tmp.dir
*
* @return The calculated data test build directory. * @return The calculated data test build directory.
*/ */
private void setupDataTestDir() { private void setupDataTestDir() {
@ -277,62 +272,13 @@ public class HBaseTestingUtility {
} }
String randomStr = UUID.randomUUID().toString(); String randomStr = UUID.randomUUID().toString();
Path testPath= new Path(getBaseTestDir(), randomStr); Path testPath= new Path(
getBaseTestDir(),
randomStr
);
dataTestDir = new File(testPath.toString()).getAbsoluteFile(); dataTestDir = new File(testPath.toString()).getAbsoluteFile();
dataTestDir.deleteOnExit(); dataTestDir.deleteOnExit();
createSubDirAndSystemProperty(
"hadoop.log.dir",
testPath, "hadoop-log-dir");
// This is defaulted in core-default.xml to /tmp/hadoop-${user.name}, but
// we want our own value to ensure uniqueness on the same machine
createSubDirAndSystemProperty(
"hadoop.tmp.dir",
testPath, "hadoop-tmp-dir");
// Read and modified in org.apache.hadoop.mapred.MiniMRCluster
createSubDir(
"mapred.local.dir",
testPath, "mapred-local-dir");
createSubDirAndSystemProperty(
"mapred.working.dir",
testPath, "mapred-working-dir");
}
private void createSubDir(String propertyName, Path parent, String subDirName){
Path newPath= new Path(parent, subDirName);
File newDir = new File(newPath.toString()).getAbsoluteFile();
newDir.deleteOnExit();
conf.set(propertyName, newDir.getAbsolutePath());
}
private void createSubDirAndSystemProperty(
String propertyName, Path parent, String subDirName){
String sysValue = System.getProperty(propertyName);
if (sysValue != null) {
// There is already a value set. So we do nothing but hope
// that there will be no conflicts
LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+
sysValue + " so I do NOT create it in "+dataTestDir.getAbsolutePath());
String confValue = conf.get(propertyName);
if (confValue != null && !confValue.endsWith(sysValue)){
LOG.warn(
propertyName + " property value differs in configuration and system: "+
"Configuration="+confValue+" while System="+sysValue+
" Erasing configuration value by system value."
);
}
conf.set(propertyName, sysValue);
} else {
// Ok, it's not set, so we create it as a subdirectory
createSubDir(propertyName, parent, subDirName);
System.setProperty(propertyName, conf.get(propertyName));
}
} }
/** /**
@ -1269,11 +1215,13 @@ public class HBaseTestingUtility {
public void startMiniMapReduceCluster(final int servers) throws IOException { public void startMiniMapReduceCluster(final int servers) throws IOException {
LOG.info("Starting mini mapreduce cluster..."); LOG.info("Starting mini mapreduce cluster...");
// These are needed for the new and improved Map/Reduce framework // These are needed for the new and improved Map/Reduce framework
conf.set("mapred.output.dir", conf.get("hadoop.tmp.dir")); Configuration c = getConfiguration();
System.setProperty("hadoop.log.dir", c.get("hadoop.log.dir"));
c.set("mapred.output.dir", c.get("hadoop.tmp.dir"));
mrCluster = new MiniMRCluster(servers, mrCluster = new MiniMRCluster(servers,
FileSystem.get(conf).getUri().toString(), 1); FileSystem.get(c).getUri().toString(), 1);
LOG.info("Mini mapreduce cluster started"); LOG.info("Mini mapreduce cluster started");
conf.set("mapred.job.tracker", c.set("mapred.job.tracker",
mrCluster.createJobConf().get("mapred.job.tracker")); mrCluster.createJobConf().get("mapred.job.tracker"));
} }

View File

@ -80,6 +80,10 @@ public class TestTimeRangeMapRed {
@BeforeClass @BeforeClass
public static void beforeClass() throws Exception { public static void beforeClass() throws Exception {
System.setProperty("hadoop.log.dir",
UTIL.getConfiguration().get("hadoop.log.dir"));
UTIL.getConfiguration().set("mapred.output.dir",
UTIL.getConfiguration().get("hadoop.tmp.dir"));
UTIL.startMiniCluster(); UTIL.startMiniCluster();
} }

View File

@ -122,6 +122,10 @@
Keep the maximum filesize small so we split more often in tests. Keep the maximum filesize small so we split more often in tests.
</description> </description>
</property> </property>
<property>
<name>hadoop.log.dir</name>
<value>${user.dir}/../logs</value>
</property>
<property> <property>
<name>hbase.zookeeper.property.clientPort</name> <name>hbase.zookeeper.property.clientPort</name>
<value>21818</value> <value>21818</value>