HADOOP-13113 Enable parallel test execution for hadoop-aws. Chris Nauroth via stevel

This commit is contained in:
Steve Loughran 2016-05-13 10:46:15 +01:00
parent b5fd6dd4a8
commit a84850b4ca
7 changed files with 192 additions and 17 deletions

View File

@ -34,6 +34,7 @@
<properties> <properties>
<file.encoding>UTF-8</file.encoding> <file.encoding>UTF-8</file.encoding>
<downloadSources>true</downloadSources> <downloadSources>true</downloadSources>
<hadoop.tmp.dir>${project.build.directory}/test</hadoop.tmp.dir>
</properties> </properties>
<profiles> <profiles>
@ -59,7 +60,109 @@
<maven.test.skip>false</maven.test.skip> <maven.test.skip>false</maven.test.skip>
</properties> </properties>
</profile> </profile>
<profile>
<id>parallel-tests</id>
<build>
<plugins>
<plugin>
<artifactId>maven-antrun-plugin</artifactId>
<executions>
<execution>
<id>create-parallel-tests-dirs</id>
<phase>test-compile</phase>
<configuration>
<target>
<script language="javascript"><![CDATA[
var baseDirs = [
"${test.build.data}",
"${test.build.dir}",
"${hadoop.tmp.dir}" ];
for (var i in baseDirs) {
for (var j = 1; j <= ${testsThreadCount}; ++j) {
var mkdir = project.createTask("mkdir");
mkdir.setDir(new java.io.File(baseDirs[i], j));
mkdir.perform();
}
}
]]></script>
</target>
</configuration>
<goals>
<goal>run</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<executions>
<execution>
<id>default-test</id>
<phase>test</phase>
<goals>
<goal>test</goal>
</goals>
<configuration>
<forkCount>${testsThreadCount}</forkCount>
<reuseForks>false</reuseForks>
<argLine>${maven-surefire-plugin.argLine} -DminiClusterDedicatedDirs=true</argLine>
<systemPropertyVariables>
<test.build.data>${test.build.data}/${surefire.forkNumber}</test.build.data>
<test.build.dir>${test.build.dir}/${surefire.forkNumber}</test.build.dir>
<hadoop.tmp.dir>${hadoop.tmp.dir}/${surefire.forkNumber}</hadoop.tmp.dir>
<!-- Due to a Maven quirk, setting this to just -->
<!-- surefire.forkNumber won't do the parameter -->
<!-- substitution. Putting a prefix in front of it like -->
<!-- "fork-" makes it work. -->
<test.unique.fork.id>fork-${surefire.forkNumber}</test.unique.fork.id>
</systemPropertyVariables>
<!-- Some tests cannot run in parallel. Tests that cover -->
<!-- access to the root directory must run in isolation -->
<!-- from anything else that could modify the bucket. -->
<!-- S3A tests that cover multi-part upload must run in -->
<!-- isolation, because the file system is configured to -->
<!-- purge existing multi-part upload data on -->
<!-- initialization. MiniYARNCluster has not yet been -->
<!-- changed to handle parallel test execution gracefully. -->
<!-- Exclude all of these tests from parallel execution, -->
<!-- and instead run them sequentially in a separate -->
<!-- Surefire execution step later. -->
<excludes>
<exclude>**/TestJets3tNativeS3FileSystemContract.java</exclude>
<exclude>**/TestS3ABlockingThreadPool.java</exclude>
<exclude>**/TestS3AFastOutputStream.java</exclude>
<exclude>**/TestS3AFileSystemContract.java</exclude>
<exclude>**/TestS3AMiniYarnCluster.java</exclude>
<exclude>**/Test*Root*.java</exclude>
</excludes>
</configuration>
</execution>
<execution>
<id>sequential-tests</id>
<phase>test</phase>
<goals>
<goal>test</goal>
</goals>
<configuration>
<!-- Do a sequential run for tests that cannot handle -->
<!-- parallel execution. -->
<includes>
<include>**/TestJets3tNativeS3FileSystemContract.java</include>
<include>**/TestS3ABlockingThreadPool.java</include>
<include>**/TestS3AFastOutputStream.java</include>
<include>**/TestS3AFileSystemContract.java</include>
<include>**/TestS3AMiniYarnCluster.java</include>
<include>**/Test*Root*.java</include>
</includes>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>
</profiles> </profiles>
<build> <build>
@ -99,30 +202,17 @@
<artifactId>hadoop-common</artifactId> <artifactId>hadoop-common</artifactId>
<scope>compile</scope> <scope>compile</scope>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId> <artifactId>hadoop-common</artifactId>
<scope>test</scope> <scope>test</scope>
<type>test-jar</type> <type>test-jar</type>
</dependency> </dependency>
<!-- see ../../hadoop-project/pom.xml for versions -->
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-annotations</artifactId>
</dependency>
<dependency> <dependency>
<groupId>com.amazonaws</groupId> <groupId>com.amazonaws</groupId>
<artifactId>aws-java-sdk-s3</artifactId> <artifactId>aws-java-sdk-s3</artifactId>
<scope>compile</scope> <scope>compile</scope>
</dependency> </dependency>
<dependency> <dependency>
<groupId>junit</groupId> <groupId>junit</groupId>
<artifactId>junit</artifactId> <artifactId>junit</artifactId>

View File

@ -733,3 +733,25 @@ or in batch runs.
Smaller values should result in faster test runs, especially when the object Smaller values should result in faster test runs, especially when the object
store is a long way away. store is a long way away.
### Running the Tests
After completing the configuration, execute the test run through Maven.
mvn clean test
It's also possible to execute multiple test suites in parallel by enabling the
`parallel-tests` Maven profile. The tests spend most of their time blocked on
network I/O with the S3 service, so running in parallel tends to complete full
test runs faster.
mvn -Pparallel-tests clean test
Some tests must run with exclusive access to the S3 bucket, so even with the
`parallel-tests` profile enabled, several test suites will run in serial in a
separate Maven execution step after the parallel tests.
By default, the `parallel-tests` profile runs 4 test suites concurrently. This
can be tuned by passing the `testsThreadCount` argument.
mvn -Pparallel-tests -DtestsThreadCount=8 clean test

View File

@ -19,8 +19,12 @@
package org.apache.hadoop.fs.contract.s3; package org.apache.hadoop.fs.contract.s3;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.AbstractBondedFSContract; import org.apache.hadoop.fs.contract.AbstractBondedFSContract;
/**
* The contract of S3: only enabled if the test bucket is provided.
*/
public class S3Contract extends AbstractBondedFSContract { public class S3Contract extends AbstractBondedFSContract {
public static final String CONTRACT_XML = "contract/s3.xml"; public static final String CONTRACT_XML = "contract/s3.xml";
@ -37,4 +41,10 @@ public class S3Contract extends AbstractBondedFSContract {
return "s3"; return "s3";
} }
@Override
public Path getTestPath() {
String testUniqueForkId = System.getProperty("test.unique.fork.id");
return testUniqueForkId == null ? super.getTestPath() :
new Path("/" + testUniqueForkId, "test");
}
} }

View File

@ -19,10 +19,11 @@
package org.apache.hadoop.fs.contract.s3a; package org.apache.hadoop.fs.contract.s3a;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.AbstractBondedFSContract; import org.apache.hadoop.fs.contract.AbstractBondedFSContract;
/** /**
* The contract of S3A: only enabled if the test bucket is provided * The contract of S3A: only enabled if the test bucket is provided.
*/ */
public class S3AContract extends AbstractBondedFSContract { public class S3AContract extends AbstractBondedFSContract {
@ -40,4 +41,10 @@ public class S3AContract extends AbstractBondedFSContract {
return "s3a"; return "s3a";
} }
@Override
public Path getTestPath() {
String testUniqueForkId = System.getProperty("test.unique.fork.id");
return testUniqueForkId == null ? super.getTestPath() :
new Path("/" + testUniqueForkId, "test");
}
} }

View File

@ -18,18 +18,55 @@
package org.apache.hadoop.fs.contract.s3a; package org.apache.hadoop.fs.contract.s3a;
import java.io.FileNotFoundException;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest; import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest;
import org.apache.hadoop.fs.contract.AbstractFSContract; import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** /**
* root dir operations against an S3 bucket * root dir operations against an S3 bucket.
*/ */
public class TestS3AContractRootDir extends public class TestS3AContractRootDir extends
AbstractContractRootDirectoryTest { AbstractContractRootDirectoryTest {
private static final Logger LOG =
LoggerFactory.getLogger(TestS3AContractRootDir.class);
@Override @Override
protected AbstractFSContract createContract(Configuration conf) { protected AbstractFSContract createContract(Configuration conf) {
return new S3AContract(conf); return new S3AContract(conf);
} }
@Override
public void testListEmptyRootDirectory() throws IOException {
for (int attempt = 1, maxAttempts = 10; attempt <= maxAttempts; ++attempt) {
try {
super.testListEmptyRootDirectory();
break;
} catch (AssertionError | FileNotFoundException e) {
if (attempt < maxAttempts) {
LOG.info("Attempt {} of {} for empty root directory test failed. "
+ "This is likely caused by eventual consistency of S3 "
+ "listings. Attempting retry.", attempt, maxAttempts);
try {
Thread.sleep(1000);
} catch (InterruptedException e2) {
Thread.currentThread().interrupt();
fail("Test interrupted.");
break;
}
} else {
LOG.error(
"Empty root directory test failed {} attempts. Failing test.",
maxAttempts);
throw e;
}
}
}
}
} }

View File

@ -19,10 +19,11 @@
package org.apache.hadoop.fs.contract.s3n; package org.apache.hadoop.fs.contract.s3n;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.AbstractBondedFSContract; import org.apache.hadoop.fs.contract.AbstractBondedFSContract;
/** /**
* The contract of S3N: only enabled if the test bucket is provided * The contract of S3N: only enabled if the test bucket is provided.
*/ */
public class NativeS3Contract extends AbstractBondedFSContract { public class NativeS3Contract extends AbstractBondedFSContract {
@ -40,4 +41,10 @@ public class NativeS3Contract extends AbstractBondedFSContract {
return "s3n"; return "s3n";
} }
@Override
public Path getTestPath() {
String testUniqueForkId = System.getProperty("test.unique.fork.id");
return testUniqueForkId == null ? super.getTestPath() :
new Path("/" + testUniqueForkId, "test");
}
} }

View File

@ -127,7 +127,9 @@ public class S3AScaleTestBase extends Assert {
} }
protected Path getTestPath() { protected Path getTestPath() {
return new Path("/tests3a"); String testUniqueForkId = System.getProperty("test.unique.fork.id");
return testUniqueForkId == null ? new Path("/tests3a") :
new Path("/" + testUniqueForkId, "tests3a");
} }
protected long getOperationCount() { protected long getOperationCount() {