HADOOP-13446. Support running isolated unit tests separate from AWS integration tests. Contributed by Chris Nauroth.

(cherry picked from commit cda68de9b9)
This commit is contained in:
Chris Nauroth 2016-08-23 07:19:20 -07:00
parent d29dc39791
commit 027d76fa8c
56 changed files with 316 additions and 140 deletions

View File

@ -1048,6 +1048,11 @@
<artifactId>maven-surefire-plugin</artifactId>
<version>${maven-surefire-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<version>${maven-failsafe-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-install-plugin</artifactId>

View File

@ -46,7 +46,7 @@
</file>
</activation>
<properties>
<maven.test.skip>true</maven.test.skip>
<skipITs>true</skipITs>
</properties>
</profile>
<profile>
@ -57,11 +57,16 @@
</file>
</activation>
<properties>
<maven.test.skip>false</maven.test.skip>
<skipITs>false</skipITs>
</properties>
</profile>
<profile>
<id>parallel-tests</id>
<activation>
<property>
<name>parallel-tests</name>
</property>
</activation>
<build>
<plugins>
<plugin>
@ -96,12 +101,32 @@
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<forkCount>${testsThreadCount}</forkCount>
<reuseForks>false</reuseForks>
<argLine>${maven-surefire-plugin.argLine} -DminiClusterDedicatedDirs=true</argLine>
<systemPropertyVariables>
<test.build.data>${test.build.data}/${surefire.forkNumber}</test.build.data>
<test.build.dir>${test.build.dir}/${surefire.forkNumber}</test.build.dir>
<hadoop.tmp.dir>${hadoop.tmp.dir}/${surefire.forkNumber}</hadoop.tmp.dir>
<!-- Due to a Maven quirk, setting this to just -->
<!-- surefire.forkNumber won't do the parameter -->
<!-- substitution. Putting a prefix in front of it like -->
<!-- "fork-" makes it work. -->
<test.unique.fork.id>fork-${surefire.forkNumber}</test.unique.fork.id>
</systemPropertyVariables>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<executions>
<execution>
<id>default-test</id>
<phase>test</phase>
<id>default-integration-test</id>
<goals>
<goal>test</goal>
<goal>integration-test</goal>
<goal>verify</goal>
</goals>
<configuration>
<forkCount>${testsThreadCount}</forkCount>
@ -129,32 +154,35 @@
<!-- Exclude all of these tests from parallel execution, -->
<!-- and instead run them sequentially in a separate -->
<!-- Surefire execution step later. -->
<includes>
<include>**/ITest*.java</include>
</includes>
<excludes>
<exclude>**/TestJets3tNativeS3FileSystemContract.java</exclude>
<exclude>**/TestS3ABlockingThreadPool.java</exclude>
<exclude>**/TestS3AFastOutputStream.java</exclude>
<exclude>**/TestS3AFileSystemContract.java</exclude>
<exclude>**/TestS3AMiniYarnCluster.java</exclude>
<exclude>**/Test*Root*.java</exclude>
<exclude>**/ITestJets3tNativeS3FileSystemContract.java</exclude>
<exclude>**/ITestS3ABlockingThreadPool.java</exclude>
<exclude>**/ITestS3AFastOutputStream.java</exclude>
<exclude>**/ITestS3AFileSystemContract.java</exclude>
<exclude>**/ITestS3AMiniYarnCluster.java</exclude>
<exclude>**/ITest*Root*.java</exclude>
</excludes>
</configuration>
</execution>
<execution>
<id>sequential-tests</id>
<phase>test</phase>
<id>sequential-integration-tests</id>
<goals>
<goal>test</goal>
<goal>integration-test</goal>
<goal>verify</goal>
</goals>
<configuration>
<!-- Do a sequential run for tests that cannot handle -->
<!-- parallel execution. -->
<includes>
<include>**/TestJets3tNativeS3FileSystemContract.java</include>
<include>**/TestS3ABlockingThreadPool.java</include>
<include>**/TestS3AFastOutputStream.java</include>
<include>**/TestS3AFileSystemContract.java</include>
<include>**/TestS3AMiniYarnCluster.java</include>
<include>**/Test*Root*.java</include>
<include>**/ITestJets3tNativeS3FileSystemContract.java</include>
<include>**/ITestS3ABlockingThreadPool.java</include>
<include>**/ITestS3AFastOutputStream.java</include>
<include>**/ITestS3AFileSystemContract.java</include>
<include>**/ITestS3AMiniYarnCluster.java</include>
<include>**/ITest*Root*.java</include>
</includes>
</configuration>
</execution>
@ -163,6 +191,33 @@
</plugins>
</build>
</profile>
<profile>
<id>sequential-tests</id>
<activation>
<property>
<name>!parallel-tests</name>
</property>
</activation>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<executions>
<execution>
<goals>
<goal>integration-test</goal>
<goal>verify</goal>
</goals>
<configuration>
<forkedProcessTimeoutInSeconds>3600</forkedProcessTimeoutInSeconds>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>
</profiles>
<build>

View File

@ -1375,11 +1375,18 @@ works with S3 to something lower. See [AWS documentation](http://docs.aws.amazon
## Testing the S3 filesystem clients
Due to eventual consistency, tests may fail without reason. Transient
failures, which no longer occur upon rerunning the test, should thus be ignored.
This module includes both unit tests, which can run in isolation without
connecting to the S3 service, and integration tests, which require a working
connection to S3 to interact with a bucket. Unit test suites follow the naming
convention `Test*.java`. Integration tests follow the naming convention
`ITest*.java`.
To test the S3* filesystem clients, you need to provide two files
which pass in authentication details to the test runner
Due to eventual consistency, integration tests may fail without reason.
Transient failures, which no longer occur upon rerunning the test, should thus
be ignored.
To integration test the S3* filesystem clients, you need to provide two files
which pass in authentication details to the test runner.
1. `auth-keys.xml`
1. `core-site.xml`
@ -1399,7 +1406,8 @@ need to apply a specific, non-default property change during the tests.
The presence of this file triggers the testing of the S3 classes.
Without this file, *none of the tests in this module will be executed*
Without this file, *none of the integration tests in this module will be
executed*.
The XML file must contain all the ID/key information needed to connect
each of the filesystem clients to the object stores, and a URL for
@ -1586,23 +1594,50 @@ source code tree, it is not going to get accidentally committed.
After completing the configuration, execute the test run through Maven.
mvn clean test
mvn clean verify
It's also possible to execute multiple test suites in parallel by enabling the
`parallel-tests` Maven profile. The tests spend most of their time blocked on
network I/O with the S3 service, so running in parallel tends to complete full
test runs faster.
It's also possible to execute multiple test suites in parallel by passing the
`parallel-tests` property on the command line. The tests spend most of their
time blocked on network I/O with the S3 service, so running in parallel tends to
complete full test runs faster.
mvn -Pparallel-tests clean test
mvn -Dparallel-tests clean verify
Some tests must run with exclusive access to the S3 bucket, so even with the
`parallel-tests` profile enabled, several test suites will run in serial in a
separate Maven execution step after the parallel tests.
`parallel-tests` property, several test suites will run in serial in a separate
Maven execution step after the parallel tests.
By default, the `parallel-tests` profile runs 4 test suites concurrently. This
can be tuned by passing the `testsThreadCount` argument.
By default, `parallel-tests` runs 4 test suites concurrently. This can be tuned
by passing the `testsThreadCount` property.
mvn -Pparallel-tests -DtestsThreadCount=8 clean test
mvn -Dparallel-tests -DtestsThreadCount=8 clean verify
To run just unit tests, which do not require S3 connectivity or AWS credentials,
use any of the above invocations, but switch the goal to `test` instead of
`verify`.
mvn clean test
mvn -Dparallel-tests clean test
mvn -Dparallel-tests -DtestsThreadCount=8 clean test
To run only a specific named subset of tests, pass the `test` property for unit
tests or the `it.test` property for integration tests.
mvn clean test -Dtest=TestS3AInputPolicies
mvn clean verify -Dit.test=ITestS3AFileContextStatistics
mvn clean verify -Dtest=TestS3A* -Dit.test=ITestS3A*
Note that when running a specific subset of tests, the patterns passed in `test`
and `it.test` override the configuration of which tests need to run in isolation
in a separate serial phase (mentioned above). This can cause unpredictable
results, so the recommendation is to avoid passing `parallel-tests` in
combination with `test` or `it.test`. If you know that you are specifying only
tests that can run safely in parallel, then it will work. For wide patterns,
like `ITestS3A*` shown above, it may cause unpredictable test failures.
### Testing against different regions

View File

@ -21,10 +21,12 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.apache.hadoop.fs.contract.ContractTestUtils;
/**
* S3 contract tests creating files.
*/
@Deprecated
public class TestS3ContractCreate extends AbstractContractCreateTest {
public class ITestS3ContractCreate extends AbstractContractCreateTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {

View File

@ -22,8 +22,11 @@
import org.apache.hadoop.fs.contract.AbstractContractDeleteTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
/**
* S3 contract tests covering deletes.
*/
@Deprecated
public class TestS3ContractDelete extends AbstractContractDeleteTest {
public class ITestS3ContractDelete extends AbstractContractDeleteTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {

View File

@ -21,10 +21,12 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractMkdirTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.apache.hadoop.fs.contract.ContractTestUtils;
/**
* Test dir operations on S3.
*/
@Deprecated
public class TestS3ContractMkdir extends AbstractContractMkdirTest {
public class ITestS3ContractMkdir extends AbstractContractMkdirTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {

View File

@ -21,10 +21,12 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractOpenTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.apache.hadoop.fs.contract.ContractTestUtils;
/**
* S3 contract tests opening files.
*/
@Deprecated
public class TestS3ContractOpen extends AbstractContractOpenTest {
public class ITestS3ContractOpen extends AbstractContractOpenTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {

View File

@ -22,8 +22,11 @@
import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
/**
* S3 contract tests covering rename.
*/
@Deprecated
public class TestS3ContractRename extends AbstractContractRenameTest {
public class ITestS3ContractRename extends AbstractContractRenameTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {

View File

@ -26,10 +26,10 @@
import org.junit.Test;
/**
* root dir operations against an S3 bucket
* root dir operations against an S3 bucket.
*/
@Deprecated
public class TestS3ContractRootDir extends AbstractContractRootDirectoryTest {
public class ITestS3ContractRootDir extends AbstractContractRootDirectoryTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {

View File

@ -21,13 +21,15 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.junit.Ignore;
import org.junit.Test;
/**
* S3 contract tests covering file seek.
*/
@Deprecated
public class TestS3ContractSeek extends AbstractContractSeekTest {
public class ITestS3ContractSeek extends AbstractContractSeekTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {

View File

@ -21,9 +21,11 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.apache.hadoop.fs.contract.ContractTestUtils;
public class TestS3AContractCreate extends AbstractContractCreateTest {
/**
* S3A contract tests creating files.
*/
public class ITestS3AContractCreate extends AbstractContractCreateTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {

View File

@ -22,7 +22,10 @@
import org.apache.hadoop.fs.contract.AbstractContractDeleteTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestS3AContractDelete extends AbstractContractDeleteTest {
/**
* S3A contract tests covering deletes.
*/
public class ITestS3AContractDelete extends AbstractContractDeleteTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {

View File

@ -27,7 +27,7 @@
/**
* Contract test suite covering S3A integration with DistCp.
*/
public class TestS3AContractDistCp extends AbstractContractDistCpTest {
public class ITestS3AContractDistCp extends AbstractContractDistCpTest {
private static final long MULTIPART_SETTING = 8 * 1024 * 1024; // 8 MB

View File

@ -23,7 +23,11 @@
import org.apache.hadoop.fs.s3a.Constants;
import org.apache.hadoop.fs.s3a.S3ATestUtils;
public class TestS3AContractGetFileStatus extends AbstractContractGetFileStatusTest {
/**
* S3A contract tests covering getFileStatus.
*/
public class ITestS3AContractGetFileStatus
extends AbstractContractGetFileStatusTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {

View File

@ -23,9 +23,9 @@
import org.apache.hadoop.fs.contract.AbstractFSContract;
/**
* Test dir operations on S3
* Test dir operations on S3A.
*/
public class TestS3AContractMkdir extends AbstractContractMkdirTest {
public class ITestS3AContractMkdir extends AbstractContractMkdirTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {

View File

@ -22,7 +22,10 @@
import org.apache.hadoop.fs.contract.AbstractContractOpenTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestS3AContractOpen extends AbstractContractOpenTest {
/**
* S3A contract tests opening files.
*/
public class ITestS3AContractOpen extends AbstractContractOpenTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {

View File

@ -23,13 +23,14 @@
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.junit.Test;
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
import static org.apache.hadoop.fs.contract.ContractTestUtils.writeDataset;
public class TestS3AContractRename extends AbstractContractRenameTest {
/**
* S3A contract tests covering rename.
*/
public class ITestS3AContractRename extends AbstractContractRenameTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {

View File

@ -31,11 +31,11 @@
/**
* root dir operations against an S3 bucket.
*/
public class TestS3AContractRootDir extends
public class ITestS3AContractRootDir extends
AbstractContractRootDirectoryTest {
private static final Logger LOG =
LoggerFactory.getLogger(TestS3AContractRootDir.class);
LoggerFactory.getLogger(ITestS3AContractRootDir.class);
@Override
protected AbstractFSContract createContract(Configuration conf) {

View File

@ -22,7 +22,10 @@
import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestS3AContractSeek extends AbstractContractSeekTest {
/**
* S3A contract tests covering file seek.
*/
public class ITestS3AContractSeek extends AbstractContractSeekTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {

View File

@ -23,7 +23,10 @@
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.apache.hadoop.fs.contract.ContractTestUtils;
public class TestS3NContractCreate extends AbstractContractCreateTest {
/**
* S3N contract tests creating files.
*/
public class ITestS3NContractCreate extends AbstractContractCreateTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {

View File

@ -22,7 +22,10 @@
import org.apache.hadoop.fs.contract.AbstractContractDeleteTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestS3NContractDelete extends AbstractContractDeleteTest {
/**
* S3A contract tests covering deletes.
*/
public class ITestS3NContractDelete extends AbstractContractDeleteTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {

View File

@ -23,9 +23,9 @@
import org.apache.hadoop.fs.contract.AbstractFSContract;
/**
* Test dir operations on S3
* Test dir operations on S3.
*/
public class TestS3NContractMkdir extends AbstractContractMkdirTest {
public class ITestS3NContractMkdir extends AbstractContractMkdirTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {

View File

@ -22,7 +22,10 @@
import org.apache.hadoop.fs.contract.AbstractContractOpenTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestS3NContractOpen extends AbstractContractOpenTest {
/**
* S3N contract tests opening files.
*/
public class ITestS3NContractOpen extends AbstractContractOpenTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {

View File

@ -22,7 +22,10 @@
import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestS3NContractRename extends AbstractContractRenameTest {
/**
* S3N contract tests covering rename.
*/
public class ITestS3NContractRename extends AbstractContractRenameTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {

View File

@ -23,9 +23,9 @@
import org.apache.hadoop.fs.contract.AbstractFSContract;
/**
* root dir operations against an S3 bucket
* Root dir operations against an S3 bucket.
*/
public class TestS3NContractRootDir extends
public class ITestS3NContractRootDir extends
AbstractContractRootDirectoryTest {
@Override

View File

@ -22,7 +22,10 @@
import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestS3NContractSeek extends AbstractContractSeekTest {
/**
* S3N contract tests covering file seek.
*/
public class ITestS3NContractSeek extends AbstractContractSeekTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {

View File

@ -20,8 +20,11 @@
import java.io.IOException;
/**
* S3 basic contract tests through mock in-memory S3 implementation.
*/
@Deprecated
public class TestInMemoryS3FileSystemContract
public class ITestInMemoryS3FileSystemContract
extends S3FileSystemContractBaseTest {
@Override

View File

@ -33,7 +33,7 @@
/**
* Basic unit test for S3A's blocking executor service.
*/
public class TestBlockingThreadPoolExecutorService {
public class ITestBlockingThreadPoolExecutorService {
private static final Logger LOG = LoggerFactory.getLogger(
BlockingThreadPoolExecutorService.class);

View File

@ -48,9 +48,9 @@
* Tests for {@link Constants#AWS_CREDENTIALS_PROVIDER} logic.
*
*/
public class TestS3AAWSCredentialsProvider {
public class ITestS3AAWSCredentialsProvider {
private static final Logger LOG =
LoggerFactory.getLogger(TestS3AAWSCredentialsProvider.class);
LoggerFactory.getLogger(ITestS3AAWSCredentialsProvider.class);
@Rule
public Timeout testTimeout = new Timeout(1 * 60 * 1000);

View File

@ -34,7 +34,7 @@
* 4th part should not trigger an exception as it would with a
* non-blocking threadpool.
*/
public class TestS3ABlockingThreadPool {
public class ITestS3ABlockingThreadPool {
private Configuration conf;
private S3AFileSystem fs;

View File

@ -35,10 +35,13 @@
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
import static org.apache.hadoop.fs.contract.ContractTestUtils.fileStatsToString;
public class TestS3ABlocksize extends AbstractFSContractTestBase {
/**
* S3A tests for configuring block size.
*/
public class ITestS3ABlocksize extends AbstractFSContractTestBase {
private static final Logger LOG =
LoggerFactory.getLogger(TestS3ABlocksize.class);
LoggerFactory.getLogger(ITestS3ABlocksize.class);
@Override
protected AbstractFSContract createContract(Configuration conf) {

View File

@ -50,7 +50,10 @@
import org.apache.http.HttpStatus;
import org.junit.rules.TemporaryFolder;
public class TestS3AConfiguration {
/**
* S3A tests for configuration.
*/
public class ITestS3AConfiguration {
private static final String EXAMPLE_ID = "AKASOMEACCESSKEY";
private static final String EXAMPLE_KEY =
"RGV0cm9pdCBSZ/WQgY2xl/YW5lZCB1cAEXAMPLE";
@ -59,7 +62,7 @@ public class TestS3AConfiguration {
private S3AFileSystem fs;
private static final Logger LOG =
LoggerFactory.getLogger(TestS3AConfiguration.class);
LoggerFactory.getLogger(ITestS3AConfiguration.class);
private static final String TEST_ENDPOINT = "test.fs.s3a.endpoint";
@ -349,7 +352,8 @@ public void testExcludingS3ACredentialProvider() throws Exception {
}
@Test
public void shouldBeAbleToSwitchOnS3PathStyleAccessViaConfigProperty() throws Exception {
public void shouldBeAbleToSwitchOnS3PathStyleAccessViaConfigProperty()
throws Exception {
conf = new Configuration();
conf.set(Constants.PATH_STYLE_ACCESS, Boolean.toString(true));
@ -365,7 +369,9 @@ public void shouldBeAbleToSwitchOnS3PathStyleAccessViaConfigProperty() throws Ex
assertTrue("Expected to find path style access to be switched on!",
clientOptions.isPathStyleAccess());
byte[] file = ContractTestUtils.toAsciiByteArray("test file");
ContractTestUtils.writeAndRead(fs, new Path("/path/style/access/testFile"), file, file.length, conf.getInt(Constants.FS_S3A_BLOCK_SIZE, file.length), false, true);
ContractTestUtils.writeAndRead(fs,
new Path("/path/style/access/testFile"), file, file.length,
conf.getInt(Constants.FS_S3A_BLOCK_SIZE, file.length), false, true);
} catch (final AWSS3IOException e) {
LOG.error("Caught exception: ", e);
// Catch/pass standard path style access behaviour when live bucket

View File

@ -43,10 +43,10 @@
* set, and a check that an invalid set do at least get stripped out
* of the final URI
*/
public class TestS3ACredentialsInURL extends Assert {
public class ITestS3ACredentialsInURL extends Assert {
private S3AFileSystem fs;
private static final Logger LOG =
LoggerFactory.getLogger(TestS3ACredentialsInURL.class);
LoggerFactory.getLogger(ITestS3ACredentialsInURL.class);
@Rule
public Timeout testTimeout = new Timeout(30 * 60 * 1000);

View File

@ -35,7 +35,7 @@
* are made for different file sizes as there have been reports that the
* file length may be rounded up to match word boundaries.
*/
public class TestS3AEncryption extends AbstractS3ATestBase {
public class ITestS3AEncryption extends AbstractS3ATestBase {
private static final String AES256 = Constants.SERVER_SIDE_ENCRYPTION_AES256;
@Override

View File

@ -31,7 +31,8 @@
* Test whether or not encryption settings propagate by choosing an invalid
* one. We expect the write to fail with a 400 bad request error
*/
public class TestS3AEncryptionAlgorithmPropagation extends AbstractS3ATestBase {
public class ITestS3AEncryptionAlgorithmPropagation
extends AbstractS3ATestBase {
@Override
protected Configuration createConfiguration() {

View File

@ -24,7 +24,7 @@
* Run the encryption tests against the Fast output stream.
* This verifies that both file writing paths can encrypt their data.
*/
public class TestS3AEncryptionFastOutputStream extends TestS3AEncryption {
public class ITestS3AEncryptionFastOutputStream extends ITestS3AEncryption {
@Override
protected Configuration createConfiguration() {

View File

@ -21,7 +21,6 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.InvalidRequestException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.apache.hadoop.fs.contract.AbstractFSContractTestBase;
@ -33,7 +32,6 @@
import java.io.EOFException;
import java.io.FileNotFoundException;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import static org.apache.hadoop.fs.contract.ContractTestUtils.*;
import static org.apache.hadoop.fs.s3a.S3ATestUtils.*;
@ -43,9 +41,9 @@
* Test S3A Failure translation, including a functional test
* generating errors during stream IO.
*/
public class TestS3AFailureHandling extends AbstractFSContractTestBase {
public class ITestS3AFailureHandling extends AbstractFSContractTestBase {
private static final Logger LOG =
LoggerFactory.getLogger(TestS3AFailureHandling.class);
LoggerFactory.getLogger(ITestS3AFailureHandling.class);
@Override
protected AbstractFSContract createContract(Configuration conf) {

View File

@ -34,7 +34,7 @@
* Tests regular and multi-part upload functionality for S3AFastOutputStream.
* File sizes are kept small to reduce test duration on slow connections
*/
public class TestS3AFastOutputStream {
public class ITestS3AFastOutputStream {
private FileSystem fs;

View File

@ -42,13 +42,13 @@
* Use metrics to assert about the cost of file status queries.
* {@link S3AFileSystem#getFileStatus(Path)}.
*/
public class TestS3AFileOperationCost extends AbstractFSContractTestBase {
public class ITestS3AFileOperationCost extends AbstractFSContractTestBase {
private MetricDiff metadataRequests;
private MetricDiff listRequests;
private static final Logger LOG =
LoggerFactory.getLogger(TestS3AFileOperationCost.class);
LoggerFactory.getLogger(ITestS3AFileOperationCost.class);
@Override
protected AbstractFSContract createContract(Configuration conf) {

View File

@ -33,10 +33,10 @@
* properly making it impossible to skip the tests if we don't have a valid
* bucket.
**/
public class TestS3AFileSystemContract extends FileSystemContractBaseTest {
public class ITestS3AFileSystemContract extends FileSystemContractBaseTest {
protected static final Logger LOG =
LoggerFactory.getLogger(TestS3AFileSystemContract.class);
LoggerFactory.getLogger(ITestS3AFileSystemContract.class);
@Override
public void setUp() throws Exception {
@ -61,7 +61,9 @@ public void testMkdirsWithUmask() throws Exception {
@Override
public void testRenameFileAsExistingFile() throws Exception {
if (!renameSupported()) return;
if (!renameSupported()) {
return;
}
Path src = path("/test/hadoop/file");
createFile(src);

View File

@ -48,12 +48,12 @@
* should only be used against transient filesystems where you don't care about
* the data.
*/
public class TestS3ATemporaryCredentials extends AbstractFSContractTestBase {
public class ITestS3ATemporaryCredentials extends AbstractFSContractTestBase {
public static final String TEST_STS_ENABLED = "test.fs.s3a.sts.enabled";
public static final String TEST_STS_ENDPOINT = "test.fs.s3a.sts.endpoint";
private static final Logger LOG =
LoggerFactory.getLogger(TestS3ATemporaryCredentials.class);
LoggerFactory.getLogger(ITestS3ATemporaryCredentials.class);
private static final String PROVIDER_CLASS
= TemporaryAWSCredentialsProvider.NAME;

View File

@ -16,8 +16,8 @@
import org.apache.hadoop.fs.TestFileContext;
/**
* Implementation of TestFileContext for S3a
* Implementation of TestFileContext for S3a.
*/
public class TestS3AFileContext extends TestFileContext{
public class ITestS3AFileContext extends TestFileContext{
}

View File

@ -20,9 +20,9 @@
import org.junit.Before;
/**
* Extends FileContextCreateMkdirBaseTest for a S3a FileContext
* Extends FileContextCreateMkdirBaseTest for a S3a FileContext.
*/
public class TestS3AFileContextCreateMkdir
public class ITestS3AFileContextCreateMkdir
extends FileContextCreateMkdirBaseTest {
@Before

View File

@ -22,9 +22,9 @@
import org.junit.Test;
/**
* S3A implementation of FileContextMainOperationsBaseTest
* S3A implementation of FileContextMainOperationsBaseTest.
*/
public class TestS3AFileContextMainOperations
public class ITestS3AFileContextMainOperations
extends FileContextMainOperationsBaseTest {
@Before

View File

@ -24,15 +24,16 @@
import org.junit.Before;
/**
* S3a implementation of FCStatisticsBaseTest
* S3a implementation of FCStatisticsBaseTest.
*/
public class TestS3AFileContextStatistics extends FCStatisticsBaseTest {
public class ITestS3AFileContextStatistics extends FCStatisticsBaseTest {
@Before
public void setUp() throws Exception {
Configuration conf = new Configuration();
fc = S3ATestUtils.createTestFileContext(conf);
fc.mkdir(fileContextTestHelper.getTestRootPath(fc, "test"), FileContext.DEFAULT_PERM, true);
fc.mkdir(fileContextTestHelper.getTestRootPath(fc, "test"),
FileContext.DEFAULT_PERM, true);
}
@After

View File

@ -22,9 +22,9 @@
import org.junit.Test;
/**
* S3a implementation of FileContextURIBase
* S3a implementation of FileContextURIBase.
*/
public class TestS3AFileContextURI extends FileContextURIBase {
public class ITestS3AFileContextURI extends FileContextURIBase {
@Before
public void setUp() throws IOException, Exception {
@ -37,7 +37,8 @@ public void setUp() throws IOException, Exception {
@Test
@Ignore
public void testFileStatus() throws IOException {
//test disabled (the statistics tested with this method are not relevant for an S3FS)
// test disabled
// (the statistics tested with this method are not relevant for an S3FS)
}
}

View File

@ -20,9 +20,9 @@
import org.junit.Before;
/**
* S3A implementation of FileContextUtilBase
* S3A implementation of FileContextUtilBase.
*/
public class TestS3AFileContextUtil extends FileContextUtilBase {
public class ITestS3AFileContextUtil extends FileContextUtilBase {
@Before
public void setUp() throws IOException, Exception {

View File

@ -24,7 +24,10 @@
import java.io.IOException;
public class TestS3ADeleteFilesOneByOne extends TestS3ADeleteManyFiles {
/**
* Tests file deletion with multi-delete disabled.
*/
public class ITestS3ADeleteFilesOneByOne extends ITestS3ADeleteManyFiles {
@Override
protected Configuration createConfiguration() {
@ -33,6 +36,7 @@ protected Configuration createConfiguration() {
return configuration;
}
@Override
@Test
public void testOpenCreate() throws IOException {

View File

@ -35,9 +35,9 @@
/**
* Test some scalable operations related to file renaming and deletion.
*/
public class TestS3ADeleteManyFiles extends S3AScaleTestBase {
public class ITestS3ADeleteManyFiles extends S3AScaleTestBase {
private static final Logger LOG =
LoggerFactory.getLogger(TestS3ADeleteManyFiles.class);
LoggerFactory.getLogger(ITestS3ADeleteManyFiles.class);
@Test
public void testBulkRenameAndDelete() throws Throwable {

View File

@ -33,9 +33,9 @@
/**
* Test the performance of listing files/directories.
*/
public class TestS3ADirectoryPerformance extends S3AScaleTestBase {
public class ITestS3ADirectoryPerformance extends S3AScaleTestBase {
private static final Logger LOG = LoggerFactory.getLogger(
TestS3ADirectoryPerformance.class);
ITestS3ADirectoryPerformance.class);
@Test
public void testListOperations() throws Throwable {

View File

@ -50,9 +50,9 @@
/**
* Look at the performance of S3a operations.
*/
public class TestS3AInputStreamPerformance extends S3AScaleTestBase {
public class ITestS3AInputStreamPerformance extends S3AScaleTestBase {
private static final Logger LOG = LoggerFactory.getLogger(
TestS3AInputStreamPerformance.class);
ITestS3AInputStreamPerformance.class);
private S3AFileSystem s3aFS;
private Path testData;

View File

@ -36,7 +36,10 @@
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
public class TestS3A {
/**
* S3A tests through the {@link FileContext} API.
*/
public class ITestS3A {
private FileContext fc;
@Rule

View File

@ -45,7 +45,10 @@
import org.junit.Test;
import static org.junit.Assert.assertEquals;
public class TestS3AMiniYarnCluster {
/**
* Tests that S3A is usable through a YARN application.
*/
public class ITestS3AMiniYarnCluster {
private final Configuration conf = new YarnConfiguration();
private S3AFileSystem fs;
@ -105,9 +108,10 @@ public void testWithMiniCluster() throws Exception {
}
/**
* helper method
* helper method.
*/
private Map<String, Integer> getResultAsMap(String outputAsStr) throws IOException {
private Map<String, Integer> getResultAsMap(String outputAsStr)
throws IOException {
Map<String, Integer> result = new HashMap<>();
for (String line : outputAsStr.split("\n")) {
String[] tokens = line.split("\t");
@ -117,7 +121,7 @@ private Map<String, Integer> getResultAsMap(String outputAsStr) throws IOExcepti
}
/**
* helper method
* helper method.
*/
private void writeStringToFile(Path path, String string) throws IOException {
FileContext fc = S3ATestUtils.createTestFileContext(conf);
@ -128,7 +132,7 @@ private void writeStringToFile(Path path, String string) throws IOException {
}
/**
* helper method
* helper method.
*/
private String readStringFromFile(Path path) {
try (FSDataInputStream in = fs.open(path)) {

View File

@ -20,7 +20,10 @@
import java.io.IOException;
public class TestInMemoryNativeS3FileSystemContract
/**
* S3N basic contract tests through mock in-memory S3 implementation.
*/
public class ITestInMemoryNativeS3FileSystemContract
extends NativeS3FileSystemContractBaseTest {
@Override

View File

@ -40,8 +40,10 @@
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
public class TestJets3tNativeFileSystemStore {
/**
* S3N tests through live S3 service.
*/
public class ITestJets3tNativeFileSystemStore {
private Configuration conf;
private Jets3tNativeFileSystemStore store;
private NativeS3FileSystem fs;
@ -98,7 +100,9 @@ protected void writeRenameReadCompare(Path path, long len)
InputStream in = new BufferedInputStream(
new DigestInputStream(fs.open(copyPath), digest2));
long copyLen = 0;
while (in.read() != -1) {copyLen++;}
while (in.read() != -1) {
copyLen++;
}
in.close();
assertEquals("Copy length matches original", len, copyLen);

View File

@ -20,7 +20,10 @@
import java.io.IOException;
public class TestJets3tNativeS3FileSystemContract
/**
* S3N basic contract tests through live S3 service.
*/
public class ITestJets3tNativeS3FileSystemContract
extends NativeS3FileSystemContractBaseTest {
@Override