HADOOP-12696. Add tests for S3FileSystem Contract. Contributed by Matt Paduano

(cherry picked from commit 1acc509b45)
(cherry picked from commit 2cbb8bbd72)
This commit is contained in:
Ravi Prakash 2016-01-19 13:57:08 -08:00
parent 5dc2e78c97
commit a359dc87d4
15 changed files with 430 additions and 34 deletions

View File

@ -380,6 +380,8 @@ Release 2.8.0 - UNRELEASED
HADOOP-12604. Exception may be swallowed in KMSClientProvider. HADOOP-12604. Exception may be swallowed in KMSClientProvider.
(Yongjun Zhang) (Yongjun Zhang)
HADOOP-12696. Add tests for S3Filesystem Contract (Matt Paduano via raviprak)
OPTIMIZATIONS OPTIMIZATIONS
HADOOP-11785. Reduce the number of listStatus operation in distcp HADOOP-11785. Reduce the number of listStatus operation in distcp

View File

@ -190,7 +190,7 @@ tests against remote FileSystems that require login details require usernames/ID
All these details MUST be required to be placed in the file `src/test/resources/contract-test-options.xml`, and your SCM tools configured to never commit this file to subversion, git or All these details MUST be required to be placed in the file `src/test/resources/contract-test-options.xml`, and your SCM tools configured to never commit this file to subversion, git or
equivalent. Furthermore, the build MUST be configured to never bundle this file in any `-test` artifacts generated. The Hadoop build does this, excluding `src/test/**/*.xml` from the JAR files. equivalent. Furthermore, the build MUST be configured to never bundle this file in any `-test` artifacts generated. The Hadoop build does this, excluding `src/test/**/*.xml` from the JAR files.
In addition, `src/test/resources/auth-keys.xml` will need to be created. It can be a copy of `contract-test-options.xml`.
The `AbstractFSContract` class automatically loads this resource file if present; specific keys for specific test cases can be added. The `AbstractFSContract` class automatically loads this resource file if present; specific keys for specific test cases can be added.
As an example, here are what S3N test keys look like: As an example, here are what S3N test keys look like:
@ -214,7 +214,7 @@ As an example, here are what S3N test keys look like:
The `AbstractBondedFSContract` automatically skips a test suite if the FileSystem URL is not defined in the property `fs.contract.test.fs.%s`, where `%s` matches the schema name of the FileSystem. The `AbstractBondedFSContract` automatically skips a test suite if the FileSystem URL is not defined in the property `fs.contract.test.fs.%s`, where `%s` matches the schema name of the FileSystem.
When running the tests `maven.test.skip` will need to be turned off since it is true by default on these tests. This can be done with a command like `mvn test -Ptests-on`.
### Important: passing the tests does not guarantee compatibility ### Important: passing the tests does not guarantee compatibility

View File

@ -116,15 +116,13 @@ public abstract class AbstractContractSeekTest extends AbstractFSContractTestBas
*/ */
@Test @Test
public void testSeekReadClosedFile() throws Throwable { public void testSeekReadClosedFile() throws Throwable {
boolean supportsSeekOnClosedFiles = isSupported(SUPPORTS_SEEK_ON_CLOSED_FILE);
instream = getFileSystem().open(smallSeekFile); instream = getFileSystem().open(smallSeekFile);
getLog().debug( getLog().debug(
"Stream is of type " + instream.getClass().getCanonicalName()); "Stream is of type " + instream.getClass().getCanonicalName());
instream.close(); instream.close();
try { try {
instream.seek(0); instream.seek(0);
if (!supportsSeekOnClosedFiles) { if (!isSupported(SUPPORTS_SEEK_ON_CLOSED_FILE)) {
fail("seek succeeded on a closed stream"); fail("seek succeeded on a closed stream");
} }
} catch (IOException e) { } catch (IOException e) {
@ -132,7 +130,9 @@ public abstract class AbstractContractSeekTest extends AbstractFSContractTestBas
} }
try { try {
int data = instream.available(); int data = instream.available();
fail("read() succeeded on a closed stream, got " + data); if (!isSupported(SUPPORTS_AVAILABLE_ON_CLOSED_FILE)) {
fail("available() succeeded on a closed stream, got " + data);
}
} catch (IOException e) { } catch (IOException e) {
//expected a closed file //expected a closed file
} }

View File

@ -147,6 +147,12 @@ public interface ContractOptions {
*/ */
String SUPPORTS_SEEK_ON_CLOSED_FILE = "supports-seek-on-closed-file"; String SUPPORTS_SEEK_ON_CLOSED_FILE = "supports-seek-on-closed-file";
/**
* Is available() on a closed InputStream supported?
* @{value}
*/
String SUPPORTS_AVAILABLE_ON_CLOSED_FILE = "supports-available-on-closed-file";
/** /**
* Flag to indicate that this FS expects to throw the strictest * Flag to indicate that this FS expects to throw the strictest
* exceptions it can, not generic IOEs, which, if returned, * exceptions it can, not generic IOEs, which, if returned,

View File

@ -35,6 +35,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.s3native.NativeS3FileSystem; import org.apache.hadoop.fs.s3native.NativeS3FileSystem;
@ -62,7 +63,7 @@ public class S3FileSystem extends FileSystem {
public S3FileSystem() { public S3FileSystem() {
// set store in initialize() // set store in initialize()
} }
public S3FileSystem(FileSystemStore store) { public S3FileSystem(FileSystemStore store) {
this.store = store; this.store = store;
} }
@ -90,14 +91,14 @@ public class S3FileSystem extends FileSystem {
} }
store.initialize(uri, conf); store.initialize(uri, conf);
setConf(conf); setConf(conf);
this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority()); this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
this.workingDir = this.workingDir =
new Path("/user", System.getProperty("user.name")).makeQualified(this); new Path("/user", System.getProperty("user.name")).makeQualified(this);
} }
private static FileSystemStore createDefaultStore(Configuration conf) { private static FileSystemStore createDefaultStore(Configuration conf) {
FileSystemStore store = new Jets3tFileSystemStore(); FileSystemStore store = new Jets3tFileSystemStore();
RetryPolicy basePolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep( RetryPolicy basePolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep(
conf.getInt("fs.s3.maxRetries", 4), conf.getInt("fs.s3.maxRetries", 4),
conf.getLong("fs.s3.sleepTimeSeconds", 10), TimeUnit.SECONDS); conf.getLong("fs.s3.sleepTimeSeconds", 10), TimeUnit.SECONDS);
@ -105,13 +106,13 @@ public class S3FileSystem extends FileSystem {
new HashMap<Class<? extends Exception>, RetryPolicy>(); new HashMap<Class<? extends Exception>, RetryPolicy>();
exceptionToPolicyMap.put(IOException.class, basePolicy); exceptionToPolicyMap.put(IOException.class, basePolicy);
exceptionToPolicyMap.put(S3Exception.class, basePolicy); exceptionToPolicyMap.put(S3Exception.class, basePolicy);
RetryPolicy methodPolicy = RetryPolicies.retryByException( RetryPolicy methodPolicy = RetryPolicies.retryByException(
RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap); RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
Map<String,RetryPolicy> methodNameToPolicyMap = new HashMap<String,RetryPolicy>(); Map<String,RetryPolicy> methodNameToPolicyMap = new HashMap<String,RetryPolicy>();
methodNameToPolicyMap.put("storeBlock", methodPolicy); methodNameToPolicyMap.put("storeBlock", methodPolicy);
methodNameToPolicyMap.put("retrieveBlock", methodPolicy); methodNameToPolicyMap.put("retrieveBlock", methodPolicy);
return (FileSystemStore) RetryProxy.create(FileSystemStore.class, return (FileSystemStore) RetryProxy.create(FileSystemStore.class,
store, methodNameToPolicyMap); store, methodNameToPolicyMap);
} }
@ -144,21 +145,29 @@ public class S3FileSystem extends FileSystem {
paths.add(0, absolutePath); paths.add(0, absolutePath);
absolutePath = absolutePath.getParent(); absolutePath = absolutePath.getParent();
} while (absolutePath != null); } while (absolutePath != null);
boolean result = true; boolean result = true;
for (Path p : paths) { for (int i = 0; i < paths.size(); i++) {
result &= mkdir(p); Path p = paths.get(i);
try {
result &= mkdir(p);
} catch(FileAlreadyExistsException e) {
if (i + 1 < paths.size()) {
throw new ParentNotDirectoryException(e.getMessage());
}
throw e;
}
} }
return result; return result;
} }
private boolean mkdir(Path path) throws IOException { private boolean mkdir(Path path) throws IOException {
Path absolutePath = makeAbsolute(path); Path absolutePath = makeAbsolute(path);
INode inode = store.retrieveINode(absolutePath); INode inode = store.retrieveINode(absolutePath);
if (inode == null) { if (inode == null) {
store.storeINode(absolutePath, INode.DIRECTORY_INODE); store.storeINode(absolutePath, INode.DIRECTORY_INODE);
} else if (inode.isFile()) { } else if (inode.isFile()) {
throw new IOException(String.format( throw new FileAlreadyExistsException(String.format(
"Can't make directory for path %s since it is a file.", "Can't make directory for path %s since it is a file.",
absolutePath)); absolutePath));
} }
@ -176,11 +185,12 @@ public class S3FileSystem extends FileSystem {
private INode checkFile(Path path) throws IOException { private INode checkFile(Path path) throws IOException {
INode inode = store.retrieveINode(makeAbsolute(path)); INode inode = store.retrieveINode(makeAbsolute(path));
String message = String.format("No such file: '%s'", path.toString());
if (inode == null) { if (inode == null) {
throw new IOException("No such file."); throw new FileNotFoundException(message + " does not exist");
} }
if (inode.isDirectory()) { if (inode.isDirectory()) {
throw new IOException("Path " + path + " is a directory."); throw new FileNotFoundException(message + " is a directory");
} }
return inode; return inode;
} }
@ -222,10 +232,14 @@ public class S3FileSystem extends FileSystem {
INode inode = store.retrieveINode(makeAbsolute(file)); INode inode = store.retrieveINode(makeAbsolute(file));
if (inode != null) { if (inode != null) {
if (overwrite) { if (overwrite && !inode.isDirectory()) {
delete(file, true); delete(file, true);
} else { } else {
throw new FileAlreadyExistsException("File already exists: " + file); String message = String.format("File already exists: '%s'", file);
if (inode.isDirectory()) {
message = message + " is a directory";
}
throw new FileAlreadyExistsException(message);
} }
} else { } else {
Path parent = file.getParent(); Path parent = file.getParent();
@ -233,7 +247,7 @@ public class S3FileSystem extends FileSystem {
if (!mkdirs(parent)) { if (!mkdirs(parent)) {
throw new IOException("Mkdirs failed to create " + parent.toString()); throw new IOException("Mkdirs failed to create " + parent.toString());
} }
} }
} }
return new FSDataOutputStream return new FSDataOutputStream
(new S3OutputStream(getConf(), store, makeAbsolute(file), (new S3OutputStream(getConf(), store, makeAbsolute(file),
@ -254,7 +268,7 @@ public class S3FileSystem extends FileSystem {
INode srcINode = store.retrieveINode(absoluteSrc); INode srcINode = store.retrieveINode(absoluteSrc);
if (srcINode == null) { if (srcINode == null) {
// src path doesn't exist // src path doesn't exist
return false; return false;
} }
Path absoluteDst = makeAbsolute(dst); Path absoluteDst = makeAbsolute(dst);
INode dstINode = store.retrieveINode(absoluteDst); INode dstINode = store.retrieveINode(absoluteDst);
@ -311,7 +325,7 @@ public class S3FileSystem extends FileSystem {
store.deleteBlock(block); store.deleteBlock(block);
} }
} else { } else {
FileStatus[] contents = null; FileStatus[] contents = null;
try { try {
contents = listStatus(absolutePath); contents = listStatus(absolutePath);
} catch(FileNotFoundException fnfe) { } catch(FileNotFoundException fnfe) {
@ -319,7 +333,7 @@ public class S3FileSystem extends FileSystem {
} }
if ((contents.length !=0) && (!recursive)) { if ((contents.length !=0) && (!recursive)) {
throw new IOException("Directory " + path.toString() throw new IOException("Directory " + path.toString()
+ " is not empty."); + " is not empty.");
} }
for (FileStatus p:contents) { for (FileStatus p:contents) {
@ -331,9 +345,9 @@ public class S3FileSystem extends FileSystem {
} }
return true; return true;
} }
/** /**
* FileStatus for S3 file systems. * FileStatus for S3 file systems.
*/ */
@Override @Override
public FileStatus getFileStatus(Path f) throws IOException { public FileStatus getFileStatus(Path f) throws IOException {
@ -343,7 +357,7 @@ public class S3FileSystem extends FileSystem {
} }
return new S3FileStatus(f.makeQualified(this), inode); return new S3FileStatus(f.makeQualified(this), inode);
} }
@Override @Override
public long getDefaultBlockSize() { public long getDefaultBlockSize() {
return getConf().getLong("fs.s3.block.size", 64 * 1024 * 1024); return getConf().getLong("fs.s3.block.size", 64 * 1024 * 1024);

View File

@ -22,6 +22,7 @@ import java.io.DataInputStream;
import java.io.File; import java.io.File;
import java.io.FileInputStream; import java.io.FileInputStream;
import java.io.IOException; import java.io.IOException;
import java.io.EOFException;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
@ -46,14 +47,14 @@ class S3InputStream extends FSInputStream {
private long pos = 0; private long pos = 0;
private File blockFile; private File blockFile;
private DataInputStream blockStream; private DataInputStream blockStream;
private long blockEnd = -1; private long blockEnd = -1;
private FileSystem.Statistics stats; private FileSystem.Statistics stats;
private static final Log LOG = private static final Log LOG =
LogFactory.getLog(S3InputStream.class.getName()); LogFactory.getLog(S3InputStream.class.getName());
@ -65,7 +66,7 @@ class S3InputStream extends FSInputStream {
public S3InputStream(Configuration conf, FileSystemStore store, public S3InputStream(Configuration conf, FileSystemStore store,
INode inode, FileSystem.Statistics stats) { INode inode, FileSystem.Statistics stats) {
this.store = store; this.store = store;
this.stats = stats; this.stats = stats;
this.blocks = inode.getBlocks(); this.blocks = inode.getBlocks();
@ -86,8 +87,12 @@ class S3InputStream extends FSInputStream {
@Override @Override
public synchronized void seek(long targetPos) throws IOException { public synchronized void seek(long targetPos) throws IOException {
String message = String.format("Cannot seek to %d", targetPos);
if (targetPos > fileLength) { if (targetPos > fileLength) {
throw new IOException("Cannot seek after EOF"); throw new EOFException(message + ": after EOF");
}
if (targetPos < 0) {
throw new EOFException(message + ": negative");
} }
pos = targetPos; pos = targetPos;
blockEnd = -1; blockEnd = -1;

View File

@ -0,0 +1,40 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.s3;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractBondedFSContract;
public class S3Contract extends AbstractBondedFSContract {
public static final String CONTRACT_XML = "contract/s3.xml";
public S3Contract(Configuration conf) {
super(conf);
//insert the base features
addConfResource(CONTRACT_XML);
}
@Override
public String getScheme() {
return "s3";
}
}

View File

@ -0,0 +1,32 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.s3;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.apache.hadoop.fs.contract.ContractTestUtils;
public class TestS3ContractCreate extends AbstractContractCreateTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new S3Contract(conf);
}
}

View File

@ -0,0 +1,31 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.s3;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractDeleteTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestS3ContractDelete extends AbstractContractDeleteTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new S3Contract(conf);
}
}

View File

@ -0,0 +1,32 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.s3;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractMkdirTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.apache.hadoop.fs.contract.ContractTestUtils;
public class TestS3ContractMkdir extends AbstractContractMkdirTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new S3Contract(conf);
}
}

View File

@ -0,0 +1,32 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.s3;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractOpenTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.apache.hadoop.fs.contract.ContractTestUtils;
public class TestS3ContractOpen extends AbstractContractOpenTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new S3Contract(conf);
}
}

View File

@ -0,0 +1,32 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.s3;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestS3ContractRename extends AbstractContractRenameTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new S3Contract(conf);
}
}

View File

@ -0,0 +1,34 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.s3;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
/**
* root dir operations against an S3 bucket
*/
public class TestS3ContractRootDir extends AbstractContractRootDirectoryTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new S3Contract(conf);
}
}

View File

@ -0,0 +1,32 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.s3;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.apache.hadoop.fs.contract.ContractTestUtils;
public class TestS3ContractSeek extends AbstractContractSeekTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new S3Contract(conf);
}
}

View File

@ -0,0 +1,104 @@
<!--
~ Licensed to the Apache Software Foundation (ASF) under one
~ or more contributor license agreements. See the NOTICE file
~ distributed with this work for additional information
~ regarding copyright ownership. The ASF licenses this file
~ to you under the Apache License, Version 2.0 (the
~ "License"); you may not use this file except in compliance
~ with the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<configuration>
<!--
S3 is backed by a blobstore.
-->
<property>
<name>fs.contract.test.root-tests-enabled</name>
<value>true</value>
</property>
<property>
<name>fs.contract.test.random-seek-count</name>
<value>10</value>
</property>
<property>
<name>fs.contract.is-blobstore</name>
<value>true</value>
</property>
<property>
<name>fs.contract.is-case-sensitive</name>
<value>true</value>
</property>
<property>
<name>fs.contract.rename-returns-false-if-source-missing</name>
<value>true</value>
</property>
<property>
<name>fs.contract.supports-append</name>
<value>false</value>
</property>
<property>
<name>fs.contract.supports-atomic-directory-delete</name>
<value>false</value>
</property>
<property>
<name>fs.contract.supports-atomic-rename</name>
<value>false</value>
</property>
<property>
<name>fs.contract.supports-block-locality</name>
<value>false</value>
</property>
<property>
<name>fs.contract.supports-concat</name>
<value>false</value>
</property>
<property>
<name>fs.contract.supports-seek</name>
<value>true</value>
</property>
<property>
<name>fs.contract.supports-seek-on-closed-file</name>
<value>true</value>
</property>
<property>
<name>fs.contract.supports-available-on-closed-file</name>
<value>true</value>
</property>
<property>
<name>fs.contract.rejects-seek-past-eof</name>
<value>true</value>
</property>
<property>
<name>fs.contract.supports-strict-exceptions</name>
<value>true</value>
</property>
<property>
<name>fs.contract.supports-unix-permissions</name>
<value>false</value>
</property>
</configuration>