HADOOP-13614. Purge some superfluous/obsolete S3 FS tests that are slowing test runs down. Contributed by Steve Loughran.

This commit is contained in:
Chris Nauroth 2016-10-26 08:27:26 -07:00
parent e90af4a89b
commit 9cad3e2350
26 changed files with 256 additions and 327 deletions

View File

@ -834,6 +834,7 @@ public class ContractTestUtils extends Assert {
long totalBytesRead = 0; long totalBytesRead = 0;
int nextExpectedNumber = 0; int nextExpectedNumber = 0;
NanoTimer timer = new NanoTimer();
try (InputStream inputStream = fs.open(path)) { try (InputStream inputStream = fs.open(path)) {
while (true) { while (true) {
final int bytesRead = inputStream.read(testBuffer); final int bytesRead = inputStream.read(testBuffer);
@ -862,6 +863,8 @@ public class ContractTestUtils extends Assert {
" bytes but only received " + totalBytesRead); " bytes but only received " + totalBytesRead);
} }
} }
timer.end("Time to read %d bytes", expectedSize);
bandwidth(timer, expectedSize);
} }
/** /**
@ -925,9 +928,12 @@ public class ContractTestUtils extends Assert {
final Path objectPath = new Path(parent, objectName); final Path objectPath = new Path(parent, objectName);
// Write test file in a specific pattern // Write test file in a specific pattern
NanoTimer timer = new NanoTimer();
assertEquals(fileSize, assertEquals(fileSize,
generateTestFile(fs, objectPath, fileSize, testBufferSize, modulus)); generateTestFile(fs, objectPath, fileSize, testBufferSize, modulus));
assertPathExists(fs, "not created successful", objectPath); assertPathExists(fs, "not created successful", objectPath);
timer.end("Time to write %d bytes", fileSize);
bandwidth(timer, fileSize);
// Now read the same file back and verify its content // Now read the same file back and verify its content
try { try {

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information

View File

@ -181,9 +181,6 @@
</includes> </includes>
<excludes> <excludes>
<exclude>**/ITestJets3tNativeS3FileSystemContract.java</exclude> <exclude>**/ITestJets3tNativeS3FileSystemContract.java</exclude>
<exclude>**/ITestS3ABlockingThreadPool.java</exclude>
<exclude>**/ITestS3AFileSystemContract.java</exclude>
<exclude>**/ITestS3AMiniYarnCluster.java</exclude>
<exclude>**/ITest*Root*.java</exclude> <exclude>**/ITest*Root*.java</exclude>
<exclude>**/ITestS3AFileContextStatistics.java</exclude> <exclude>**/ITestS3AFileContextStatistics.java</exclude>
<include>**/ITestS3AHuge*.java</include> <include>**/ITestS3AHuge*.java</include>
@ -211,10 +208,6 @@
<!-- parallel execution. --> <!-- parallel execution. -->
<includes> <includes>
<include>**/ITestJets3tNativeS3FileSystemContract.java</include> <include>**/ITestJets3tNativeS3FileSystemContract.java</include>
<include>**/ITestS3ABlockingThreadPool.java</include>
<include>**/ITestS3AFastOutputStream.java</include>
<include>**/ITestS3AFileSystemContract.java</include>
<include>**/ITestS3AMiniYarnCluster.java</include>
<include>**/ITest*Root*.java</include> <include>**/ITest*Root*.java</include>
<include>**/ITestS3AFileContextStatistics.java</include> <include>**/ITestS3AFileContextStatistics.java</include>
<include>**/ITestS3AHuge*.java</include> <include>**/ITestS3AHuge*.java</include>

View File

@ -19,6 +19,7 @@
package org.apache.hadoop.fs.contract.s3a; package org.apache.hadoop.fs.contract.s3a;
import static org.apache.hadoop.fs.s3a.Constants.*; import static org.apache.hadoop.fs.s3a.Constants.*;
import static org.apache.hadoop.fs.s3a.S3ATestConstants.SCALE_TEST_TIMEOUT_MILLIS;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.tools.contract.AbstractContractDistCpTest; import org.apache.hadoop.tools.contract.AbstractContractDistCpTest;
@ -32,6 +33,11 @@ public class ITestS3AContractDistCp extends AbstractContractDistCpTest {
private static final long MULTIPART_SETTING = MULTIPART_MIN_SIZE; private static final long MULTIPART_SETTING = MULTIPART_MIN_SIZE;
@Override
protected int getTestTimeoutMillis() {
return SCALE_TEST_TIMEOUT_MILLIS;
}
@Override @Override
protected Configuration createConfiguration() { protected Configuration createConfiguration() {
Configuration newConf = super.createConfiguration(); Configuration newConf = super.createConfiguration();

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.fs.contract.s3a;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.AbstractBondedFSContract; import org.apache.hadoop.fs.contract.AbstractBondedFSContract;
import org.apache.hadoop.fs.s3a.S3ATestUtils;
/** /**
* The contract of S3A: only enabled if the test bucket is provided. * The contract of S3A: only enabled if the test bucket is provided.
@ -29,7 +30,6 @@ public class S3AContract extends AbstractBondedFSContract {
public static final String CONTRACT_XML = "contract/s3a.xml"; public static final String CONTRACT_XML = "contract/s3a.xml";
public S3AContract(Configuration conf) { public S3AContract(Configuration conf) {
super(conf); super(conf);
//insert the base features //insert the base features
@ -43,8 +43,6 @@ public class S3AContract extends AbstractBondedFSContract {
@Override @Override
public Path getTestPath() { public Path getTestPath() {
String testUniqueForkId = System.getProperty("test.unique.fork.id"); return S3ATestUtils.createTestPath(super.getTestPath());
return testUniqueForkId == null ? super.getTestPath() :
new Path("/" + testUniqueForkId, "test");
} }
} }

View File

@ -26,8 +26,8 @@ import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.apache.hadoop.fs.contract.s3a.S3AContract; import org.apache.hadoop.fs.contract.s3a.S3AContract;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.junit.Before; import org.junit.Before;
import org.junit.Rule; import org.slf4j.Logger;
import org.junit.rules.TestName; import org.slf4j.LoggerFactory;
import java.io.IOException; import java.io.IOException;
@ -40,6 +40,9 @@ import static org.apache.hadoop.fs.contract.ContractTestUtils.writeDataset;
public abstract class AbstractS3ATestBase extends AbstractFSContractTestBase public abstract class AbstractS3ATestBase extends AbstractFSContractTestBase
implements S3ATestConstants { implements S3ATestConstants {
protected static final Logger LOG =
LoggerFactory.getLogger(AbstractS3ATestBase.class);
@Override @Override
protected AbstractFSContract createContract(Configuration conf) { protected AbstractFSContract createContract(Configuration conf) {
return new S3AContract(conf); return new S3AContract(conf);
@ -52,14 +55,16 @@ public abstract class AbstractS3ATestBase extends AbstractFSContractTestBase
IOUtils.closeStream(getFileSystem()); IOUtils.closeStream(getFileSystem());
} }
@Rule
public TestName methodName = new TestName();
@Before @Before
public void nameThread() { public void nameThread() {
Thread.currentThread().setName("JUnit-" + methodName.getMethodName()); Thread.currentThread().setName("JUnit-" + methodName.getMethodName());
} }
@Override
protected int getTestTimeoutMillis() {
return S3A_TEST_TIMEOUT;
}
protected Configuration getConfiguration() { protected Configuration getConfiguration() {
return getContract().getConf(); return getContract().getConf();
} }
@ -73,6 +78,17 @@ public abstract class AbstractS3ATestBase extends AbstractFSContractTestBase
return (S3AFileSystem) super.getFileSystem(); return (S3AFileSystem) super.getFileSystem();
} }
/**
* Describe a test in the logs.
* @param text text to print
* @param args arguments to format in the printing
*/
protected void describe(String text, Object... args) {
LOG.info("\n\n{}: {}\n",
methodName.getMethodName(),
String.format(text, args));
}
/** /**
* Write a file, read it back, validate the dataset. Overwrites the file * Write a file, read it back, validate the dataset. Overwrites the file
* if it is present * if it is present

View File

@ -1,82 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3a;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
/**
* Demonstrate that the threadpool blocks additional client requests if
* its queue is full (rather than throwing an exception) by initiating an
* upload consisting of 4 parts with 2 threads and 1 spot in the queue. The
* 4th part should not trigger an exception as it would with a
* non-blocking threadpool.
*/
public class ITestS3ABlockingThreadPool {
private Configuration conf;
private S3AFileSystem fs;
@Rule
public Timeout testTimeout = new Timeout(30 * 60 * 1000);
protected Path getTestPath() {
return new Path("/tests3a");
}
@Before
public void setUp() throws Exception {
conf = new Configuration();
conf.setLong(Constants.MIN_MULTIPART_THRESHOLD, 5 * 1024 * 1024);
conf.setLong(Constants.MULTIPART_SIZE, 5 * 1024 * 1024);
conf.setInt(Constants.MAX_THREADS, 2);
conf.setInt(Constants.MAX_TOTAL_TASKS, 1);
}
@After
public void tearDown() throws Exception {
if (fs != null) {
fs.delete(getTestPath(), true);
}
}
@Test
public void testRegularMultiPartUpload() throws Exception {
fs = S3ATestUtils.createTestFileSystem(conf);
ContractTestUtils.createAndVerifyFile(fs, getTestPath(), 16 * 1024 *
1024);
}
@Test
public void testFastMultiPartUpload() throws Exception {
conf.setBoolean(Constants.FAST_UPLOAD, true);
conf.set(Constants.FAST_UPLOAD_BUFFER,
Constants.FAST_UPLOAD_BYTEBUFFER);
fs = S3ATestUtils.createTestFileSystem(conf);
ContractTestUtils.createAndVerifyFile(fs, getTestPath(), 16 * 1024 *
1024);
}
}

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -18,16 +18,11 @@
package org.apache.hadoop.fs.s3a; package org.apache.hadoop.fs.s3a;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.apache.hadoop.fs.contract.AbstractFSContractTestBase;
import org.apache.hadoop.fs.contract.s3a.S3AContract;
import org.junit.Rule;
import org.junit.Test; import org.junit.Test;
import org.junit.rules.Timeout;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -38,19 +33,11 @@ import static org.apache.hadoop.fs.contract.ContractTestUtils.fileStatsToString;
/** /**
* S3A tests for configuring block size. * S3A tests for configuring block size.
*/ */
public class ITestS3ABlocksize extends AbstractFSContractTestBase { public class ITestS3ABlocksize extends AbstractS3ATestBase {
private static final Logger LOG = private static final Logger LOG =
LoggerFactory.getLogger(ITestS3ABlocksize.class); LoggerFactory.getLogger(ITestS3ABlocksize.class);
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new S3AContract(conf);
}
@Rule
public Timeout testTimeout = new Timeout(30 * 60 * 1000);
@Test @Test
@SuppressWarnings("deprecation") @SuppressWarnings("deprecation")
public void testBlockSize() throws Exception { public void testBlockSize() throws Exception {

View File

@ -68,34 +68,37 @@ public class ITestS3AConfiguration {
private static final Logger LOG = private static final Logger LOG =
LoggerFactory.getLogger(ITestS3AConfiguration.class); LoggerFactory.getLogger(ITestS3AConfiguration.class);
private static final String TEST_ENDPOINT = "test.fs.s3a.endpoint";
@Rule @Rule
public Timeout testTimeout = new Timeout(30 * 60 * 1000); public Timeout testTimeout = new Timeout(
S3ATestConstants.S3A_TEST_TIMEOUT
);
@Rule @Rule
public final TemporaryFolder tempDir = new TemporaryFolder(); public final TemporaryFolder tempDir = new TemporaryFolder();
/** /**
* Test if custom endpoint is picked up. * Test if custom endpoint is picked up.
* <p/> * <p>
* The test expects TEST_ENDPOINT to be defined in the Configuration * The test expects {@link S3ATestConstants#CONFIGURATION_TEST_ENDPOINT}
* to be defined in the Configuration
* describing the endpoint of the bucket to which TEST_FS_S3A_NAME points * describing the endpoint of the bucket to which TEST_FS_S3A_NAME points
* (f.i. "s3-eu-west-1.amazonaws.com" if the bucket is located in Ireland). * (i.e. "s3-eu-west-1.amazonaws.com" if the bucket is located in Ireland).
* Evidently, the bucket has to be hosted in the region denoted by the * Evidently, the bucket has to be hosted in the region denoted by the
* endpoint for the test to succeed. * endpoint for the test to succeed.
* <p/> * <p>
* More info and the list of endpoint identifiers: * More info and the list of endpoint identifiers:
* http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region * @see <a href="http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region">endpoint list</a>.
* *
* @throws Exception * @throws Exception
*/ */
@Test @Test
public void testEndpoint() throws Exception { public void testEndpoint() throws Exception {
conf = new Configuration(); conf = new Configuration();
String endpoint = conf.getTrimmed(TEST_ENDPOINT, ""); String endpoint = conf.getTrimmed(
S3ATestConstants.CONFIGURATION_TEST_ENDPOINT, "");
if (endpoint.isEmpty()) { if (endpoint.isEmpty()) {
LOG.warn("Custom endpoint test skipped as " + TEST_ENDPOINT + "config " + LOG.warn("Custom endpoint test skipped as " +
S3ATestConstants.CONFIGURATION_TEST_ENDPOINT + "config " +
"setting was not detected"); "setting was not detected");
} else { } else {
conf.set(Constants.ENDPOINT, endpoint); conf.set(Constants.ENDPOINT, endpoint);

View File

@ -22,7 +22,6 @@ import com.amazonaws.services.s3.model.ObjectMetadata;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.apache.hadoop.io.IOUtils;
import org.junit.Test; import org.junit.Test;
import java.io.IOException; import java.io.IOException;
@ -48,15 +47,9 @@ public class ITestS3AEncryption extends AbstractS3ATestBase {
} }
private static final int[] SIZES = { private static final int[] SIZES = {
0, 1, 2, 3, 4, 5, 254, 255, 256, 257, 2 ^ 10 - 3, 2 ^ 11 - 2, 2 ^ 12 - 1 0, 1, 2, 3, 4, 5, 254, 255, 256, 257, 2 ^ 12 - 1
}; };
@Override
public void teardown() throws Exception {
super.teardown();
IOUtils.closeStream(getFileSystem());
}
@Test @Test
public void testEncryption() throws Throwable { public void testEncryption() throws Throwable {
for (int size: SIZES) { for (int size: SIZES) {

View File

@ -20,7 +20,6 @@ package org.apache.hadoop.fs.s3a;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.junit.Test; import org.junit.Test;
import java.io.IOException; import java.io.IOException;
@ -43,12 +42,6 @@ public class ITestS3AEncryptionAlgorithmPropagation
return conf; return conf;
} }
@Override
public void teardown() throws Exception {
super.teardown();
IOUtils.closeStream(getFileSystem());
}
@Test @Test
public void testEncrypt0() throws Throwable { public void testEncrypt0() throws Throwable {
writeThenReadFileToFailure(0); writeThenReadFileToFailure(0);

View File

@ -18,13 +18,9 @@
package org.apache.hadoop.fs.s3a; package org.apache.hadoop.fs.s3a;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.apache.hadoop.fs.contract.AbstractFSContractTestBase;
import org.apache.hadoop.fs.contract.s3a.S3AContract;
import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.test.LambdaTestUtils;
import org.junit.Test; import org.junit.Test;
@ -41,15 +37,10 @@ import static org.apache.hadoop.fs.contract.ContractTestUtils.*;
* Test S3A Failure translation, including a functional test * Test S3A Failure translation, including a functional test
* generating errors during stream IO. * generating errors during stream IO.
*/ */
public class ITestS3AFailureHandling extends AbstractFSContractTestBase { public class ITestS3AFailureHandling extends AbstractS3ATestBase {
private static final Logger LOG = private static final Logger LOG =
LoggerFactory.getLogger(ITestS3AFailureHandling.class); LoggerFactory.getLogger(ITestS3AFailureHandling.class);
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new S3AContract(conf);
}
@Test @Test
public void testReadFileChanged() throws Throwable { public void testReadFileChanged() throws Throwable {
describe("overwrite a file with a shorter one during a read, seek"); describe("overwrite a file with a shorter one during a read, seek");

View File

@ -18,13 +18,9 @@
package org.apache.hadoop.fs.s3a; package org.apache.hadoop.fs.s3a;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.apache.hadoop.fs.contract.AbstractFSContractTestBase;
import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.apache.hadoop.fs.contract.s3a.S3AContract;
import org.junit.Test; import org.junit.Test;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -43,7 +39,7 @@ import static org.apache.hadoop.test.GenericTestUtils.getTestDir;
* Use metrics to assert about the cost of file status queries. * Use metrics to assert about the cost of file status queries.
* {@link S3AFileSystem#getFileStatus(Path)}. * {@link S3AFileSystem#getFileStatus(Path)}.
*/ */
public class ITestS3AFileOperationCost extends AbstractFSContractTestBase { public class ITestS3AFileOperationCost extends AbstractS3ATestBase {
private MetricDiff metadataRequests; private MetricDiff metadataRequests;
private MetricDiff listRequests; private MetricDiff listRequests;
@ -51,16 +47,6 @@ public class ITestS3AFileOperationCost extends AbstractFSContractTestBase {
private static final Logger LOG = private static final Logger LOG =
LoggerFactory.getLogger(ITestS3AFileOperationCost.class); LoggerFactory.getLogger(ITestS3AFileOperationCost.class);
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new S3AContract(conf);
}
@Override
public S3AFileSystem getFileSystem() {
return (S3AFileSystem) super.getFileSystem();
}
@Override @Override
public void setup() throws Exception { public void setup() throws Exception {
super.setup(); super.setup();
@ -246,7 +232,8 @@ public class ITestS3AFileOperationCost extends AbstractFSContractTestBase {
int destDirDepth = directoriesInPath(destDir); int destDirDepth = directoriesInPath(destDir);
directoriesCreated.assertDiffEquals(state, 1); directoriesCreated.assertDiffEquals(state, 1);
/* TODO: uncomment once HADOOP-13222 is in /* TODO: uncomment once HADOOP-13222 "s3a.mkdirs() to delete empty fake parent directories"
is in
deleteRequests.assertDiffEquals(state,1); deleteRequests.assertDiffEquals(state,1);
directoriesDeleted.assertDiffEquals(state,0); directoriesDeleted.assertDiffEquals(state,0);
fakeDirectoriesDeleted.assertDiffEquals(state,destDirDepth); fakeDirectoriesDeleted.assertDiffEquals(state,destDirDepth);

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -18,6 +18,9 @@
package org.apache.hadoop.fs.s3a; package org.apache.hadoop.fs.s3a;
import org.junit.Before;
import org.junit.Rule;
import org.junit.rules.TestName;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -38,18 +41,44 @@ public class ITestS3AFileSystemContract extends FileSystemContractBaseTest {
protected static final Logger LOG = protected static final Logger LOG =
LoggerFactory.getLogger(ITestS3AFileSystemContract.class); LoggerFactory.getLogger(ITestS3AFileSystemContract.class);
private Path basePath;
@Rule
public TestName methodName = new TestName();
@Before
public void nameThread() {
Thread.currentThread().setName("JUnit-" + methodName.getMethodName());
}
@Override @Override
public void setUp() throws Exception { public void setUp() throws Exception {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
fs = S3ATestUtils.createTestFileSystem(conf); fs = S3ATestUtils.createTestFileSystem(conf);
basePath = fs.makeQualified(
S3ATestUtils.createTestPath(new Path("/s3afilesystemcontract")));
super.setUp(); super.setUp();
} }
/**
* This path explicitly places all absolute paths under the per-test suite
* path directory; this allows the test to run in parallel.
* @param pathString path string as input
* @return a qualified path string.
*/
protected Path path(String pathString) {
if (pathString.startsWith("/")) {
return fs.makeQualified(new Path(basePath, pathString));
} else {
return super.path(pathString);
}
}
@Override @Override
protected void tearDown() throws Exception { protected void tearDown() throws Exception {
if (fs != null) { if (fs != null) {
fs.delete(path("test"), true); fs.delete(basePath, true);
} }
super.tearDown(); super.tearDown();
} }

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -28,9 +28,6 @@ import com.amazonaws.services.securitytoken.model.GetSessionTokenRequest;
import com.amazonaws.services.securitytoken.model.GetSessionTokenResult; import com.amazonaws.services.securitytoken.model.GetSessionTokenResult;
import com.amazonaws.services.securitytoken.model.Credentials; import com.amazonaws.services.securitytoken.model.Credentials;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.apache.hadoop.fs.contract.AbstractFSContractTestBase;
import org.apache.hadoop.fs.contract.s3a.S3AContract;
import org.apache.hadoop.fs.s3native.S3xLoginHelper; import org.apache.hadoop.fs.s3native.S3xLoginHelper;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -48,9 +45,7 @@ import static org.apache.hadoop.fs.s3a.Constants.*;
* should only be used against transient filesystems where you don't care about * should only be used against transient filesystems where you don't care about
* the data. * the data.
*/ */
public class ITestS3ATemporaryCredentials extends AbstractFSContractTestBase { public class ITestS3ATemporaryCredentials extends AbstractS3ATestBase {
public static final String TEST_STS_ENABLED = "test.fs.s3a.sts.enabled";
public static final String TEST_STS_ENDPOINT = "test.fs.s3a.sts.endpoint";
private static final Logger LOG = private static final Logger LOG =
LoggerFactory.getLogger(ITestS3ATemporaryCredentials.class); LoggerFactory.getLogger(ITestS3ATemporaryCredentials.class);
@ -60,11 +55,6 @@ public class ITestS3ATemporaryCredentials extends AbstractFSContractTestBase {
private static final long TEST_FILE_SIZE = 1024; private static final long TEST_FILE_SIZE = 1024;
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new S3AContract(conf);
}
/** /**
* Test use of STS for requesting temporary credentials. * Test use of STS for requesting temporary credentials.
* *

View File

@ -134,14 +134,33 @@ public interface S3ATestConstants {
*/ */
int DEFAULT_DIRECTORY_COUNT = 2; int DEFAULT_DIRECTORY_COUNT = 2;
/**
* Default scale test timeout in seconds: {@value}.
*/
int DEFAULT_TEST_TIMEOUT = 30 * 60;
/** /**
* Default policy on scale tests: {@value}. * Default policy on scale tests: {@value}.
*/ */
boolean DEFAULT_SCALE_TESTS_ENABLED = false; boolean DEFAULT_SCALE_TESTS_ENABLED = false;
/**
* Fork ID passed down from maven if the test is running in parallel.
*/
String TEST_UNIQUE_FORK_ID = "test.unique.fork.id";
String TEST_STS_ENABLED = "test.fs.s3a.sts.enabled";
String TEST_STS_ENDPOINT = "test.fs.s3a.sts.endpoint";
/**
* Timeout in Milliseconds for standard tests: {@value}.
*/
int S3A_TEST_TIMEOUT = 10 * 60 * 1000;
/**
* Timeout in Seconds for Scale Tests: {@value}.
*/
int SCALE_TEST_TIMEOUT_SECONDS = 30 * 60;
int SCALE_TEST_TIMEOUT_MILLIS = SCALE_TEST_TIMEOUT_SECONDS * 1000;
/**
* Optional custom endpoint for S3A configuration tests.
* This does <i>not</i> set the endpoint for s3 access elsewhere.
*/
String CONFIGURATION_TEST_ENDPOINT =
"test.fs.s3a.endpoint";
} }

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.fs.s3a;
import org.apache.commons.lang.StringUtils; import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.s3a.scale.S3AScaleTestBase; import org.apache.hadoop.fs.s3a.scale.S3AScaleTestBase;
import org.junit.Assert; import org.junit.Assert;
import org.junit.internal.AssumptionViolatedException; import org.junit.internal.AssumptionViolatedException;
@ -59,7 +60,7 @@ public final class S3ATestUtils {
*/ */
public static S3AFileSystem createTestFileSystem(Configuration conf) public static S3AFileSystem createTestFileSystem(Configuration conf)
throws IOException { throws IOException {
return createTestFileSystem(conf, true); return createTestFileSystem(conf, false);
} }
/** /**
@ -302,6 +303,19 @@ public final class S3ATestUtils {
} }
} }
/**
* Create a test path, using the value of
* {@link S3ATestConstants#TEST_UNIQUE_FORK_ID} if it is set.
* @param defVal default value
* @return a path
*/
public static Path createTestPath(Path defVal) {
String testUniqueForkId = System.getProperty(
S3ATestConstants.TEST_UNIQUE_FORK_ID);
return testUniqueForkId == null ? defVal :
new Path("/" + testUniqueForkId, "test");
}
/** /**
* Reset all metrics in a list. * Reset all metrics in a list.
* @param metrics metrics to reset * @param metrics metrics to reset

View File

@ -38,6 +38,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageStatistics; import org.apache.hadoop.fs.StorageStatistics;
import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.apache.hadoop.fs.s3a.S3AFileStatus; import org.apache.hadoop.fs.s3a.S3AFileStatus;
import org.apache.hadoop.fs.s3a.S3AFileSystem;
import org.apache.hadoop.fs.s3a.Statistic; import org.apache.hadoop.fs.s3a.Statistic;
import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.Progressable;
@ -70,27 +71,22 @@ public abstract class AbstractSTestS3AHugeFiles extends S3AScaleTestBase {
private int partitionSize; private int partitionSize;
@Override @Override
public void setUp() throws Exception { public void setup() throws Exception {
super.setUp(); super.setup();
final Path testPath = getTestPath(); final Path testPath = getTestPath();
scaleTestDir = new Path(testPath, "scale"); scaleTestDir = new Path(testPath, "scale");
hugefile = new Path(scaleTestDir, "hugefile"); hugefile = new Path(scaleTestDir, "hugefile");
hugefileRenamed = new Path(scaleTestDir, "hugefileRenamed"); hugefileRenamed = new Path(scaleTestDir, "hugefileRenamed");
} }
@Override
public void tearDown() throws Exception {
// do nothing. Specifically: do not delete the test dir
}
/** /**
* Note that this can get called before test setup. * Note that this can get called before test setup.
* @return the configuration to use. * @return the configuration to use.
*/ */
@Override @Override
protected Configuration createConfiguration() { protected Configuration createScaleConfiguration() {
Configuration conf = super.createConfiguration(); Configuration conf = super.createScaleConfiguration();
partitionSize = (int)getTestPropertyBytes(conf, partitionSize = (int)getTestPropertyBytes(conf,
KEY_HUGE_PARTITION_SIZE, KEY_HUGE_PARTITION_SIZE,
DEFAULT_PARTITION_SIZE); DEFAULT_PARTITION_SIZE);
@ -155,6 +151,7 @@ public abstract class AbstractSTestS3AHugeFiles extends S3AScaleTestBase {
// perform the upload. // perform the upload.
// there's lots of logging here, so that a tail -f on the output log // there's lots of logging here, so that a tail -f on the output log
// can give a view of what is happening. // can give a view of what is happening.
S3AFileSystem fs = getFileSystem();
StorageStatistics storageStatistics = fs.getStorageStatistics(); StorageStatistics storageStatistics = fs.getStorageStatistics();
String putRequests = Statistic.OBJECT_PUT_REQUESTS.getSymbol(); String putRequests = Statistic.OBJECT_PUT_REQUESTS.getSymbol();
String putBytes = Statistic.OBJECT_PUT_BYTES.getSymbol(); String putBytes = Statistic.OBJECT_PUT_BYTES.getSymbol();
@ -286,12 +283,13 @@ public abstract class AbstractSTestS3AHugeFiles extends S3AScaleTestBase {
} }
void assumeHugeFileExists() throws IOException { void assumeHugeFileExists() throws IOException {
S3AFileSystem fs = getFileSystem();
ContractTestUtils.assertPathExists(fs, "huge file not created", hugefile); ContractTestUtils.assertPathExists(fs, "huge file not created", hugefile);
ContractTestUtils.assertIsFile(fs, hugefile); ContractTestUtils.assertIsFile(fs, hugefile);
} }
private void logFSState() { private void logFSState() {
LOG.info("File System state after operation:\n{}", fs); LOG.info("File System state after operation:\n{}", getFileSystem());
} }
@Test @Test
@ -305,6 +303,7 @@ public abstract class AbstractSTestS3AHugeFiles extends S3AScaleTestBase {
} }
String filetype = encrypted ? "encrypted file" : "file"; String filetype = encrypted ? "encrypted file" : "file";
describe("Positioned reads of %s %s", filetype, hugefile); describe("Positioned reads of %s %s", filetype, hugefile);
S3AFileSystem fs = getFileSystem();
S3AFileStatus status = fs.getFileStatus(hugefile); S3AFileStatus status = fs.getFileStatus(hugefile);
long filesize = status.getLen(); long filesize = status.getLen();
int ops = 0; int ops = 0;
@ -344,6 +343,7 @@ public abstract class AbstractSTestS3AHugeFiles extends S3AScaleTestBase {
public void test_050_readHugeFile() throws Throwable { public void test_050_readHugeFile() throws Throwable {
assumeHugeFileExists(); assumeHugeFileExists();
describe("Reading %s", hugefile); describe("Reading %s", hugefile);
S3AFileSystem fs = getFileSystem();
S3AFileStatus status = fs.getFileStatus(hugefile); S3AFileStatus status = fs.getFileStatus(hugefile);
long filesize = status.getLen(); long filesize = status.getLen();
long blocks = filesize / uploadBlockSize; long blocks = filesize / uploadBlockSize;
@ -369,6 +369,7 @@ public abstract class AbstractSTestS3AHugeFiles extends S3AScaleTestBase {
public void test_100_renameHugeFile() throws Throwable { public void test_100_renameHugeFile() throws Throwable {
assumeHugeFileExists(); assumeHugeFileExists();
describe("renaming %s to %s", hugefile, hugefileRenamed); describe("renaming %s to %s", hugefile, hugefileRenamed);
S3AFileSystem fs = getFileSystem();
S3AFileStatus status = fs.getFileStatus(hugefile); S3AFileStatus status = fs.getFileStatus(hugefile);
long filesize = status.getLen(); long filesize = status.getLen();
fs.delete(hugefileRenamed, false); fs.delete(hugefileRenamed, false);
@ -396,7 +397,7 @@ public abstract class AbstractSTestS3AHugeFiles extends S3AScaleTestBase {
public void test_999_DeleteHugeFiles() throws IOException { public void test_999_DeleteHugeFiles() throws IOException {
deleteHugeFile(); deleteHugeFile();
ContractTestUtils.NanoTimer timer2 = new ContractTestUtils.NanoTimer(); ContractTestUtils.NanoTimer timer2 = new ContractTestUtils.NanoTimer();
S3AFileSystem fs = getFileSystem();
fs.delete(hugefileRenamed, false); fs.delete(hugefileRenamed, false);
timer2.end("time to delete %s", hugefileRenamed); timer2.end("time to delete %s", hugefileRenamed);
ContractTestUtils.rm(fs, getTestPath(), true, true); ContractTestUtils.rm(fs, getTestPath(), true, true);
@ -405,7 +406,7 @@ public abstract class AbstractSTestS3AHugeFiles extends S3AScaleTestBase {
protected void deleteHugeFile() throws IOException { protected void deleteHugeFile() throws IOException {
describe("Deleting %s", hugefile); describe("Deleting %s", hugefile);
NanoTimer timer = new NanoTimer(); NanoTimer timer = new NanoTimer();
fs.delete(hugefile, false); getFileSystem().delete(hugefile, false);
timer.end("time to delete %s", hugefile); timer.end("time to delete %s", hugefile);
} }

View File

@ -20,9 +20,6 @@ package org.apache.hadoop.fs.s3a.scale;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.s3a.Constants; import org.apache.hadoop.fs.s3a.Constants;
import org.junit.Test;
import java.io.IOException;
/** /**
* Tests file deletion with multi-delete disabled. * Tests file deletion with multi-delete disabled.
@ -30,15 +27,10 @@ import java.io.IOException;
public class ITestS3ADeleteFilesOneByOne extends ITestS3ADeleteManyFiles { public class ITestS3ADeleteFilesOneByOne extends ITestS3ADeleteManyFiles {
@Override @Override
protected Configuration createConfiguration() { protected Configuration createScaleConfiguration() {
Configuration configuration = super.createConfiguration(); Configuration configuration = super.createScaleConfiguration();
configuration.setBoolean(Constants.ENABLE_MULTI_DELETE, false); configuration.setBoolean(Constants.ENABLE_MULTI_DELETE, false);
return configuration; return configuration;
} }
@Override
@Test
public void testOpenCreate() throws IOException {
}
} }

View File

@ -20,6 +20,8 @@ package org.apache.hadoop.fs.s3a.scale;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.apache.hadoop.fs.s3a.S3AFileSystem;
import org.junit.Test; import org.junit.Test;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -50,12 +52,12 @@ public class ITestS3ADeleteManyFiles extends S3AScaleTestBase {
*/ */
@Test @Test
public void testBulkRenameAndDelete() throws Throwable { public void testBulkRenameAndDelete() throws Throwable {
final Path scaleTestDir = getTestPath(); final Path scaleTestDir = path("testBulkRenameAndDelete");
final Path srcDir = new Path(scaleTestDir, "src"); final Path srcDir = new Path(scaleTestDir, "src");
final Path finalDir = new Path(scaleTestDir, "final"); final Path finalDir = new Path(scaleTestDir, "final");
final long count = getOperationCount(); final long count = getOperationCount();
final S3AFileSystem fs = getFileSystem();
ContractTestUtils.rm(fs, scaleTestDir, true, false); ContractTestUtils.rm(fs, scaleTestDir, true, false);
fs.mkdirs(srcDir); fs.mkdirs(srcDir);
fs.mkdirs(finalDir); fs.mkdirs(finalDir);
@ -114,11 +116,4 @@ public class ITestS3ADeleteManyFiles extends S3AScaleTestBase {
ContractTestUtils.assertDeleted(fs, finalDir, true, false); ContractTestUtils.assertDeleted(fs, finalDir, true, false);
} }
@Test
public void testOpenCreate() throws IOException {
final Path scaleTestDir = getTestPath();
final Path srcDir = new Path(scaleTestDir, "opencreate");
ContractTestUtils.createAndVerifyFile(fs, srcDir, 1024);
ContractTestUtils.createAndVerifyFile(fs, srcDir, 50 * 1024);
}
} }

View File

@ -19,6 +19,7 @@
package org.apache.hadoop.fs.s3a.scale; package org.apache.hadoop.fs.s3a.scale;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.s3a.S3AFileSystem;
import org.apache.hadoop.fs.s3a.Statistic; import org.apache.hadoop.fs.s3a.Statistic;
import org.junit.Test; import org.junit.Test;
import org.slf4j.Logger; import org.slf4j.Logger;
@ -40,8 +41,9 @@ public class ITestS3ADirectoryPerformance extends S3AScaleTestBase {
@Test @Test
public void testListOperations() throws Throwable { public void testListOperations() throws Throwable {
describe("Test recursive list operations"); describe("Test recursive list operations");
final Path scaleTestDir = getTestPath(); final Path scaleTestDir = path("testListOperations");
final Path listDir = new Path(scaleTestDir, "lists"); final Path listDir = new Path(scaleTestDir, "lists");
S3AFileSystem fs = getFileSystem();
// scale factor. // scale factor.
int scale = getConf().getInt(KEY_DIRECTORY_COUNT, DEFAULT_DIRECTORY_COUNT); int scale = getConf().getInt(KEY_DIRECTORY_COUNT, DEFAULT_DIRECTORY_COUNT);
@ -137,15 +139,16 @@ public class ITestS3ADirectoryPerformance extends S3AScaleTestBase {
@Test @Test
public void testTimeToStatEmptyDirectory() throws Throwable { public void testTimeToStatEmptyDirectory() throws Throwable {
describe("Time to stat an empty directory"); describe("Time to stat an empty directory");
Path path = new Path(getTestPath(), "empty"); Path path = path("empty");
fs.mkdirs(path); getFileSystem().mkdirs(path);
timeToStatPath(path); timeToStatPath(path);
} }
@Test @Test
public void testTimeToStatNonEmptyDirectory() throws Throwable { public void testTimeToStatNonEmptyDirectory() throws Throwable {
describe("Time to stat a non-empty directory"); describe("Time to stat a non-empty directory");
Path path = new Path(getTestPath(), "dir"); Path path = path("dir");
S3AFileSystem fs = getFileSystem();
fs.mkdirs(path); fs.mkdirs(path);
touch(fs, new Path(path, "file")); touch(fs, new Path(path, "file"));
timeToStatPath(path); timeToStatPath(path);
@ -154,8 +157,8 @@ public class ITestS3ADirectoryPerformance extends S3AScaleTestBase {
@Test @Test
public void testTimeToStatFile() throws Throwable { public void testTimeToStatFile() throws Throwable {
describe("Time to stat a simple file"); describe("Time to stat a simple file");
Path path = new Path(getTestPath(), "file"); Path path = path("file");
touch(fs, path); touch(getFileSystem(), path);
timeToStatPath(path); timeToStatPath(path);
} }
@ -167,6 +170,7 @@ public class ITestS3ADirectoryPerformance extends S3AScaleTestBase {
private void timeToStatPath(Path path) throws IOException { private void timeToStatPath(Path path) throws IOException {
describe("Timing getFileStatus(\"%s\")", path); describe("Timing getFileStatus(\"%s\")", path);
S3AFileSystem fs = getFileSystem();
MetricDiff metadataRequests = MetricDiff metadataRequests =
new MetricDiff(fs, Statistic.OBJECT_METADATA_REQUESTS); new MetricDiff(fs, Statistic.OBJECT_METADATA_REQUESTS);
MetricDiff listRequests = MetricDiff listRequests =

View File

@ -29,8 +29,8 @@ import org.apache.hadoop.fs.s3a.Constants;
public class ITestS3AHugeFilesClassicOutput extends AbstractSTestS3AHugeFiles { public class ITestS3AHugeFilesClassicOutput extends AbstractSTestS3AHugeFiles {
@Override @Override
protected Configuration createConfiguration() { protected Configuration createScaleConfiguration() {
final Configuration conf = super.createConfiguration(); final Configuration conf = super.createScaleConfiguration();
conf.setBoolean(Constants.FAST_UPLOAD, false); conf.setBoolean(Constants.FAST_UPLOAD, false);
return conf; return conf;
} }

View File

@ -436,7 +436,8 @@ public class ITestS3AInputStreamPerformance extends S3AScaleTestBase {
describe("read over a buffer, making sure that the requests" + describe("read over a buffer, making sure that the requests" +
" spans readahead ranges"); " spans readahead ranges");
int datasetLen = _32K; int datasetLen = _32K;
Path dataFile = new Path(getTestPath(), "testReadOverBuffer.bin"); S3AFileSystem fs = getFileSystem();
Path dataFile = path("testReadOverBuffer.bin");
byte[] sourceData = dataset(datasetLen, 0, 64); byte[] sourceData = dataset(datasetLen, 0, 64);
// relies on the field 'fs' referring to the R/W FS // relies on the field 'fs' referring to the R/W FS
writeDataset(fs, dataFile, sourceData, datasetLen, _16K, true); writeDataset(fs, dataFile, sourceData, datasetLen, _16K, true);

View File

@ -21,20 +21,15 @@ package org.apache.hadoop.fs.s3a.scale;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.fs.s3a.AbstractS3ATestBase;
import org.apache.hadoop.fs.s3a.S3AFileSystem;
import org.apache.hadoop.fs.s3a.S3AInputStream; import org.apache.hadoop.fs.s3a.S3AInputStream;
import org.apache.hadoop.fs.s3a.S3AInstrumentation; import org.apache.hadoop.fs.s3a.S3AInstrumentation;
import org.apache.hadoop.fs.s3a.S3ATestConstants; import org.apache.hadoop.fs.s3a.S3ATestConstants;
import org.apache.hadoop.fs.s3a.Statistic; import org.apache.hadoop.fs.s3a.Statistic;
import org.apache.hadoop.metrics2.lib.MutableGaugeLong; import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
import org.junit.After;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Assume; import org.junit.Assume;
import org.junit.Before;
import org.junit.Rule;
import org.junit.rules.TestName;
import org.junit.rules.Timeout;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -45,25 +40,35 @@ import static org.apache.hadoop.fs.s3a.S3ATestUtils.*;
/** /**
* Base class for scale tests; here is where the common scale configuration * Base class for scale tests; here is where the common scale configuration
* keys are defined. * keys are defined.
* <p>
* Configuration setup is a bit more complex than in the parent classes,
* as the test timeout is desired prior to the {@link #getTestTimeoutMillis()}
* being called to set the test timeout rule; this happens before any of
* the methods tagged with {@code @Before} are invoked.
* <p>
* The algorithm is:
* <ol>
* <li>Create a configuration on demand, via
* {@link #demandCreateConfiguration()}</li>
* <li>Have that return the value of {@link #conf} or create a new one
* if that field is null (and set the field to the created value).</li>
* <li>Override the superclasses {@link #createConfiguration()}
* to return the demand created value; make that method final so that
* subclasses don't break things by overridding it.</li>
* <li>Add a new override point {@link #createScaleConfiguration()}
* to create the config, one which subclasses can (and do) override.</li>
* </ol>
* Bear in mind that this process also takes place during initialization
* of the superclass; the overridden methods are being invoked before
* their instances are fully configured. This is considered
* <i>very bad form</i> in Java code (indeed, in C++ it is actually permitted;
* the base class implementations get invoked instead).
*/ */
public class S3AScaleTestBase extends Assert implements S3ATestConstants { public class S3AScaleTestBase extends AbstractS3ATestBase {
@Rule
public final TestName methodName = new TestName();
@Rule
public Timeout testTimeout = createTestTimeout();
@Before
public void nameThread() {
Thread.currentThread().setName("JUnit");
}
public static final int _1KB = 1024; public static final int _1KB = 1024;
public static final int _1MB = _1KB * _1KB; public static final int _1MB = _1KB * _1KB;
protected S3AFileSystem fs;
protected static final Logger LOG = protected static final Logger LOG =
LoggerFactory.getLogger(S3AScaleTestBase.class); LoggerFactory.getLogger(S3AScaleTestBase.class);
@ -71,14 +76,8 @@ public class S3AScaleTestBase extends Assert implements S3ATestConstants {
private boolean enabled; private boolean enabled;
/**
* Configuration generator. May be overridden to inject private Path testPath;
* some custom options.
* @return a configuration with which to create FS instances
*/
protected Configuration createConfiguration() {
return new Configuration();
}
/** /**
* Get the configuration used to set up the FS. * Get the configuration used to set up the FS.
@ -88,79 +87,72 @@ public class S3AScaleTestBase extends Assert implements S3ATestConstants {
return conf; return conf;
} }
/** @Override
* Setup. This triggers creation of the configuration. public void setup() throws Exception {
*/ super.setup();
@Before testPath = path("/tests3ascale");
public void setUp() throws Exception {
demandCreateConfiguration();
LOG.debug("Scale test operation count = {}", getOperationCount()); LOG.debug("Scale test operation count = {}", getOperationCount());
// multipart purges are disabled on the scale tests // multipart purges are disabled on the scale tests
fs = createTestFileSystem(conf, false);
// check for the test being enabled // check for the test being enabled
enabled = getTestPropertyBool( enabled = getTestPropertyBool(
getConf(), getConf(),
KEY_SCALE_TESTS_ENABLED, KEY_SCALE_TESTS_ENABLED,
DEFAULT_SCALE_TESTS_ENABLED); DEFAULT_SCALE_TESTS_ENABLED);
Assume.assumeTrue("Scale test disabled: to enable set property " + Assume.assumeTrue("Scale test disabled: to enable set property " +
KEY_SCALE_TESTS_ENABLED, enabled); KEY_SCALE_TESTS_ENABLED, isEnabled());
} }
/** /**
* Create the configuration if it is not already set up. * Create the configuration if it is not already set up, calling
* {@link #createScaleConfiguration()} to do so.
* @return the configuration. * @return the configuration.
*/ */
private synchronized Configuration demandCreateConfiguration() { private synchronized Configuration demandCreateConfiguration() {
if (conf == null) { if (conf == null) {
conf = createConfiguration(); conf = createScaleConfiguration();
} }
return conf; return conf;
} }
@After /**
public void tearDown() throws Exception { * Returns the config created with {@link #demandCreateConfiguration()}.
ContractTestUtils.rm(fs, getTestPath(), true, true); * Subclasses must override {@link #createScaleConfiguration()}
* in order to customize their configurations.
* @return a configuration with which to create FS instances
*/
protected final Configuration createConfiguration() {
return demandCreateConfiguration();
}
/**
* Override point: create a configuration.
* @return a configuration with which to create FS instances
*/
protected Configuration createScaleConfiguration() {
return new Configuration();
} }
protected Path getTestPath() { protected Path getTestPath() {
String testUniqueForkId = System.getProperty("test.unique.fork.id"); return testPath;
return testUniqueForkId == null ? new Path("/tests3a") :
new Path("/" + testUniqueForkId, "tests3a");
} }
protected long getOperationCount() { protected long getOperationCount() {
return getConf().getLong(KEY_OPERATION_COUNT, DEFAULT_OPERATION_COUNT); return getConf().getLong(KEY_OPERATION_COUNT, DEFAULT_OPERATION_COUNT);
} }
/**
* Create the timeout for tests. Some large tests may need a larger value.
* @return the test timeout to use
*/
protected Timeout createTestTimeout() {
demandCreateConfiguration();
return new Timeout(
getTestTimeoutSeconds() * 1000);
}
/** /**
* Get the test timeout in seconds. * Get the test timeout in seconds.
* @return the test timeout as set in system properties or the default. * @return the test timeout as set in system properties or the default.
*/ */
protected static int getTestTimeoutSeconds() { protected int getTestTimeoutSeconds() {
return getTestPropertyInt(null, return getTestPropertyInt(demandCreateConfiguration(),
KEY_TEST_TIMEOUT, KEY_TEST_TIMEOUT,
DEFAULT_TEST_TIMEOUT); SCALE_TEST_TIMEOUT_SECONDS);
} }
/** @Override
* Describe a test in the logs. protected int getTestTimeoutMillis() {
* @param text text to print return getTestTimeoutSeconds() * 1000;
* @param args arguments to format in the printing
*/
protected void describe(String text, Object... args) {
LOG.info("\n\n{}: {}\n",
methodName.getMethodName(),
String.format(text, args));
} }
/** /**
@ -189,20 +181,25 @@ public class S3AScaleTestBase extends Assert implements S3ATestConstants {
* @return the value. * @return the value.
*/ */
public long gaugeValue(Statistic statistic) { public long gaugeValue(Statistic statistic) {
S3AInstrumentation instrumentation = fs.getInstrumentation(); S3AInstrumentation instrumentation = getFileSystem().getInstrumentation();
MutableGaugeLong gauge = instrumentation.lookupGauge(statistic.getSymbol()); MutableGaugeLong gauge = instrumentation.lookupGauge(statistic.getSymbol());
assertNotNull("No gauge " + statistic assertNotNull("No gauge " + statistic
+ " in " + instrumentation.dump("", " = ", "\n", true), gauge); + " in " + instrumentation.dump("", " = ", "\n", true), gauge);
return gauge.value(); return gauge.value();
} }
protected boolean isEnabled() { /**
* Is the test enabled; this is controlled by the configuration
* and the {@code -Dscale} maven option.
* @return true if the scale tests are enabled.
*/
protected final boolean isEnabled() {
return enabled; return enabled;
} }
/** /**
* Flag to indicate that this test is being used sequentially. This * Flag to indicate that this test is being executed in parallel.
* is used by some of the scale tests to validate test time expectations. * This is used by some of the scale tests to validate test time expectations.
* @return true if the build indicates this test is being run in parallel. * @return true if the build indicates this test is being run in parallel.
*/ */
protected boolean isParallelExecution() { protected boolean isParallelExecution() {

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -59,7 +59,7 @@ public class ITestS3A {
} }
protected Path getTestPath() { protected Path getTestPath() {
return new Path("/tests3afc"); return S3ATestUtils.createTestPath(new Path("/tests3afc"));
} }
@Test @Test

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one or more * Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this * contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF * work for additional information regarding copyright ownership. The ASF
@ -24,13 +24,13 @@ import java.util.Map;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.examples.WordCount; import org.apache.hadoop.examples.WordCount;
import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.apache.hadoop.fs.s3a.AbstractS3ATestBase;
import org.apache.hadoop.fs.s3a.S3AFileSystem; import org.apache.hadoop.fs.s3a.S3AFileSystem;
import org.apache.hadoop.fs.s3a.S3ATestUtils; import org.apache.hadoop.fs.s3a.S3ATestUtils;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Job;
@ -39,26 +39,26 @@ import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.MiniYARNCluster; import org.apache.hadoop.yarn.server.MiniYARNCluster;
import org.junit.After;
import static org.junit.Assert.assertTrue;
import org.junit.Before;
import org.junit.Test; import org.junit.Test;
import static org.junit.Assert.assertEquals;
/** /**
* Tests that S3A is usable through a YARN application. * Tests that S3A is usable through a YARN application.
*/ */
public class ITestS3AMiniYarnCluster { public class ITestS3AMiniYarnCluster extends AbstractS3ATestBase {
private final Configuration conf = new YarnConfiguration(); private final Configuration conf = new YarnConfiguration();
private S3AFileSystem fs; private S3AFileSystem fs;
private MiniYARNCluster yarnCluster; private MiniYARNCluster yarnCluster;
private final String rootPath = "/tests/MiniClusterWordCount/"; private Path rootPath;
@Before @Override
public void beforeTest() throws IOException { public void setup() throws Exception {
super.setup();
fs = S3ATestUtils.createTestFileSystem(conf); fs = S3ATestUtils.createTestFileSystem(conf);
fs.mkdirs(new Path(rootPath + "input/")); rootPath = path("MiniClusterWordCount");
Path workingDir = path("working");
fs.setWorkingDirectory(workingDir);
fs.mkdirs(new Path(rootPath, "input/"));
yarnCluster = new MiniYARNCluster("MiniClusterWordCount", // testName yarnCluster = new MiniYARNCluster("MiniClusterWordCount", // testName
1, // number of node managers 1, // number of node managers
@ -68,17 +68,19 @@ public class ITestS3AMiniYarnCluster {
yarnCluster.start(); yarnCluster.start();
} }
@After @Override
public void afterTest() throws IOException { public void teardown() throws Exception {
fs.delete(new Path(rootPath), true); if (yarnCluster != null) {
yarnCluster.stop(); yarnCluster.stop();
} }
super.teardown();
}
@Test @Test
public void testWithMiniCluster() throws Exception { public void testWithMiniCluster() throws Exception {
Path input = new Path(rootPath + "input/in.txt"); Path input = new Path(rootPath, "input/in.txt");
input = input.makeQualified(fs.getUri(), fs.getWorkingDirectory()); input = input.makeQualified(fs.getUri(), fs.getWorkingDirectory());
Path output = new Path(rootPath + "output/"); Path output = new Path(rootPath, "output/");
output = output.makeQualified(fs.getUri(), fs.getWorkingDirectory()); output = output.makeQualified(fs.getUri(), fs.getWorkingDirectory());
writeStringToFile(input, "first line\nsecond line\nthird line"); writeStringToFile(input, "first line\nsecond line\nthird line");
@ -134,15 +136,9 @@ public class ITestS3AMiniYarnCluster {
/** /**
* helper method. * helper method.
*/ */
private String readStringFromFile(Path path) { private String readStringFromFile(Path path) throws IOException {
try (FSDataInputStream in = fs.open(path)) { return ContractTestUtils.readBytesToString(fs, path,
long bytesLen = fs.getFileStatus(path).getLen(); (int) fs.getFileStatus(path).getLen());
byte[] buffer = new byte[(int) bytesLen];
IOUtils.readFully(in, buffer, 0, buffer.length);
return new String(buffer);
} catch (IOException e) {
throw new RuntimeException("Failed to read from [" + path + "]", e);
}
} }
} }