HADOOP-17817.HADOOP-17823. S3A to raise IOE if both S3-CSE and S3Guard enabled (#3239)
S3A S3Guard tests to skip if S3-CSE are enabled (#3263) Follow on to * HADOOP-13887. Encrypt S3A data client-side with AWS SDK (S3-CSE) If the S3A bucket is set up to use S3-CSE encryption, all tests which turn on S3Guard are skipped, so they don't raise any exceptions about incompatible configurations. Contributed by Mehakmeet Singh Change-Id: I9f4188109b56a1f4e5a31fae265d980c5795db1e
This commit is contained in:
parent
aee975a136
commit
abb367aec6
|
@ -217,6 +217,7 @@ import static org.apache.hadoop.fs.s3a.impl.CallableSupplier.waitForCompletionIg
|
||||||
import static org.apache.hadoop.fs.s3a.impl.ErrorTranslation.isObjectNotFound;
|
import static org.apache.hadoop.fs.s3a.impl.ErrorTranslation.isObjectNotFound;
|
||||||
import static org.apache.hadoop.fs.s3a.impl.ErrorTranslation.isUnknownBucket;
|
import static org.apache.hadoop.fs.s3a.impl.ErrorTranslation.isUnknownBucket;
|
||||||
import static org.apache.hadoop.fs.s3a.impl.InternalConstants.CSE_PADDING_LENGTH;
|
import static org.apache.hadoop.fs.s3a.impl.InternalConstants.CSE_PADDING_LENGTH;
|
||||||
|
import static org.apache.hadoop.fs.s3a.impl.InternalConstants.CSE_S3GUARD_INCOMPATIBLE;
|
||||||
import static org.apache.hadoop.fs.s3a.impl.InternalConstants.DEFAULT_UPLOAD_PART_COUNT_LIMIT;
|
import static org.apache.hadoop.fs.s3a.impl.InternalConstants.DEFAULT_UPLOAD_PART_COUNT_LIMIT;
|
||||||
import static org.apache.hadoop.fs.s3a.impl.InternalConstants.DELETE_CONSIDERED_IDEMPOTENT;
|
import static org.apache.hadoop.fs.s3a.impl.InternalConstants.DELETE_CONSIDERED_IDEMPOTENT;
|
||||||
import static org.apache.hadoop.fs.s3a.impl.InternalConstants.SC_404;
|
import static org.apache.hadoop.fs.s3a.impl.InternalConstants.SC_404;
|
||||||
|
@ -545,6 +546,9 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
|
||||||
if (hasMetadataStore()) {
|
if (hasMetadataStore()) {
|
||||||
LOG.debug("Using metadata store {}, authoritative store={}, authoritative path={}",
|
LOG.debug("Using metadata store {}, authoritative store={}, authoritative path={}",
|
||||||
getMetadataStore(), allowAuthoritativeMetadataStore, allowAuthoritativePaths);
|
getMetadataStore(), allowAuthoritativeMetadataStore, allowAuthoritativePaths);
|
||||||
|
if (isCSEEnabled) {
|
||||||
|
throw new PathIOException(uri.toString(), CSE_S3GUARD_INCOMPATIBLE);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// LOG if S3Guard is disabled on the warn level set in config
|
// LOG if S3Guard is disabled on the warn level set in config
|
||||||
|
|
|
@ -134,4 +134,9 @@ public final class InternalConstants {
|
||||||
*/
|
*/
|
||||||
public static final int CSE_PADDING_LENGTH = 16;
|
public static final int CSE_PADDING_LENGTH = 16;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Error message to indicate S3-CSE is incompatible with S3Guard.
|
||||||
|
*/
|
||||||
|
public static final String CSE_S3GUARD_INCOMPATIBLE = "S3-CSE cannot be "
|
||||||
|
+ "used with S3Guard";
|
||||||
}
|
}
|
||||||
|
|
|
@ -601,6 +601,7 @@ clients where S3-CSE has not been enabled.
|
||||||
|
|
||||||
### Limitations
|
### Limitations
|
||||||
|
|
||||||
|
- S3Guard is not supported with S3-CSE.
|
||||||
- Performance will be reduced. All encrypt/decrypt is now being done on the
|
- Performance will be reduced. All encrypt/decrypt is now being done on the
|
||||||
client.
|
client.
|
||||||
- Writing files may be slower, as only a single block can be encrypted and
|
- Writing files may be slower, as only a single block can be encrypted and
|
||||||
|
|
|
@ -1435,6 +1435,31 @@ The user trying to use the KMS Key ID should have the right permissions to acces
|
||||||
If not, then add permission(or IAM role) in "Key users" section by selecting the
|
If not, then add permission(or IAM role) in "Key users" section by selecting the
|
||||||
AWS-KMS CMK Key on AWS console.
|
AWS-KMS CMK Key on AWS console.
|
||||||
|
|
||||||
|
### S3-CSE cannot be used with S3Guard
|
||||||
|
|
||||||
|
S3-CSE not supported for S3Guard enabled buckets.
|
||||||
|
```
|
||||||
|
org.apache.hadoop.fs.PathIOException: `s3a://test-bucket': S3-CSE cannot be used with S3Guard
|
||||||
|
at org.apache.hadoop.fs.s3a.S3AFileSystem.initialize(S3AFileSystem.java:543)
|
||||||
|
at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:3460)
|
||||||
|
at org.apache.hadoop.fs.FileSystem.access$300(FileSystem.java:172)
|
||||||
|
at org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:3565)
|
||||||
|
at org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:3512)
|
||||||
|
at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:539)
|
||||||
|
at org.apache.hadoop.fs.Path.getFileSystem(Path.java:366)
|
||||||
|
at org.apache.hadoop.fs.shell.PathData.expandAsGlob(PathData.java:342)
|
||||||
|
at org.apache.hadoop.fs.shell.Command.expandArgument(Command.java:252)
|
||||||
|
at org.apache.hadoop.fs.shell.Command.expandArguments(Command.java:235)
|
||||||
|
at org.apache.hadoop.fs.shell.FsCommand.processRawArguments(FsCommand.java:105)
|
||||||
|
at org.apache.hadoop.fs.shell.Command.run(Command.java:179)
|
||||||
|
at org.apache.hadoop.fs.FsShell.run(FsShell.java:327)
|
||||||
|
at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:81)
|
||||||
|
at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:95)
|
||||||
|
at org.apache.hadoop.fs.FsShell.main(FsShell.java:390)
|
||||||
|
```
|
||||||
|
If you want to use S3Guard then disable S3-CSE or disable S3Guard if you want
|
||||||
|
to use S3-CSE.
|
||||||
|
|
||||||
### <a name="not_all_bytes_were_read"></a> Message appears in logs "Not all bytes were read from the S3ObjectInputStream"
|
### <a name="not_all_bytes_were_read"></a> Message appears in logs "Not all bytes were read from the S3ObjectInputStream"
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -143,7 +143,8 @@ public class ITestS3AContractSeek extends AbstractContractSeekTest {
|
||||||
public void teardown() throws Exception {
|
public void teardown() throws Exception {
|
||||||
super.teardown();
|
super.teardown();
|
||||||
S3AFileSystem fs = getFileSystem();
|
S3AFileSystem fs = getFileSystem();
|
||||||
if (fs.getConf().getBoolean(FS_S3A_IMPL_DISABLE_CACHE, false)) {
|
if (fs != null && fs.getConf().getBoolean(FS_S3A_IMPL_DISABLE_CACHE,
|
||||||
|
false)) {
|
||||||
fs.close();
|
fs.close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,12 +18,17 @@
|
||||||
|
|
||||||
package org.apache.hadoop.fs.contract.s3a;
|
package org.apache.hadoop.fs.contract.s3a;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.fs.PathIOException;
|
||||||
import org.apache.hadoop.fs.contract.AbstractBondedFSContract;
|
import org.apache.hadoop.fs.contract.AbstractBondedFSContract;
|
||||||
import org.apache.hadoop.fs.s3a.S3AFileSystem;
|
import org.apache.hadoop.fs.s3a.S3AFileSystem;
|
||||||
import org.apache.hadoop.fs.s3a.S3ATestUtils;
|
import org.apache.hadoop.fs.s3a.S3ATestUtils;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.fs.s3a.S3ATestUtils.maybeSkipIfS3GuardAndS3CSEIOE;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The contract of S3A: only enabled if the test bucket is provided.
|
* The contract of S3A: only enabled if the test bucket is provided.
|
||||||
*/
|
*/
|
||||||
|
@ -63,6 +68,20 @@ public class S3AContract extends AbstractBondedFSContract {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Skip S3AFS initialization if S3-CSE and S3Guard are enabled.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public void init() throws IOException {
|
||||||
|
try {
|
||||||
|
super.init();
|
||||||
|
} catch (PathIOException ioe) {
|
||||||
|
// Skip the tests if S3-CSE and S3-Guard are enabled.
|
||||||
|
maybeSkipIfS3GuardAndS3CSEIOE(ioe);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String getScheme() {
|
public String getScheme() {
|
||||||
return "s3a";
|
return "s3a";
|
||||||
|
|
|
@ -58,6 +58,8 @@ public abstract class AbstractS3AMockTest {
|
||||||
Configuration conf = createConfiguration();
|
Configuration conf = createConfiguration();
|
||||||
fs = new S3AFileSystem();
|
fs = new S3AFileSystem();
|
||||||
URI uri = URI.create(FS_S3A + "://" + BUCKET);
|
URI uri = URI.create(FS_S3A + "://" + BUCKET);
|
||||||
|
// unset S3CSE property from config to avoid pathIOE.
|
||||||
|
conf.unset(SERVER_SIDE_ENCRYPTION_ALGORITHM);
|
||||||
fs.initialize(uri, conf);
|
fs.initialize(uri, conf);
|
||||||
s3 = fs.getAmazonS3ClientForTesting("mocking");
|
s3 = fs.getAmazonS3ClientForTesting("mocking");
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,6 +33,7 @@ import static org.apache.hadoop.fs.s3a.S3ATestUtils.createTestPath;
|
||||||
*/
|
*/
|
||||||
public class ITestS3AFSMainOperations extends FSMainOperationsBaseTest {
|
public class ITestS3AFSMainOperations extends FSMainOperationsBaseTest {
|
||||||
|
|
||||||
|
private S3AContract contract;
|
||||||
|
|
||||||
public ITestS3AFSMainOperations() {
|
public ITestS3AFSMainOperations() {
|
||||||
super(createTestPath(
|
super(createTestPath(
|
||||||
|
@ -41,11 +42,18 @@ public class ITestS3AFSMainOperations extends FSMainOperationsBaseTest {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected FileSystem createFileSystem() throws Exception {
|
protected FileSystem createFileSystem() throws Exception {
|
||||||
S3AContract contract = new S3AContract(new Configuration());
|
contract = new S3AContract(new Configuration());
|
||||||
contract.init();
|
contract.init();
|
||||||
return contract.getTestFileSystem();
|
return contract.getTestFileSystem();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void tearDown() throws Exception {
|
||||||
|
if (contract.getTestFileSystem() != null) {
|
||||||
|
super.tearDown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@Ignore("Permissions not supported")
|
@Ignore("Permissions not supported")
|
||||||
public void testListStatusThrowsExceptionForUnreadableDir() {
|
public void testListStatusThrowsExceptionForUnreadableDir() {
|
||||||
|
|
|
@ -77,7 +77,7 @@ public class ITestS3GuardListConsistency extends AbstractS3ATestBase {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void teardown() throws Exception {
|
public void teardown() throws Exception {
|
||||||
if (getFileSystem()
|
if (getFileSystem() != null && getFileSystem()
|
||||||
.getAmazonS3Client() instanceof InconsistentAmazonS3Client) {
|
.getAmazonS3Client() instanceof InconsistentAmazonS3Client) {
|
||||||
clearInconsistency(getFileSystem());
|
clearInconsistency(getFileSystem());
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.LocatedFileStatus;
|
import org.apache.hadoop.fs.LocatedFileStatus;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.fs.PathIOException;
|
||||||
import org.apache.hadoop.fs.RemoteIterator;
|
import org.apache.hadoop.fs.RemoteIterator;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.fs.s3a.auth.MarshalledCredentialBinding;
|
import org.apache.hadoop.fs.s3a.auth.MarshalledCredentialBinding;
|
||||||
|
@ -37,6 +38,7 @@ import org.apache.hadoop.fs.s3a.commit.CommitConstants;
|
||||||
|
|
||||||
import org.apache.hadoop.fs.s3a.impl.ChangeDetectionPolicy;
|
import org.apache.hadoop.fs.s3a.impl.ChangeDetectionPolicy;
|
||||||
import org.apache.hadoop.fs.s3a.impl.ContextAccessors;
|
import org.apache.hadoop.fs.s3a.impl.ContextAccessors;
|
||||||
|
import org.apache.hadoop.fs.s3a.impl.InternalConstants;
|
||||||
import org.apache.hadoop.fs.s3a.impl.StatusProbeEnum;
|
import org.apache.hadoop.fs.s3a.impl.StatusProbeEnum;
|
||||||
import org.apache.hadoop.fs.s3a.impl.StoreContext;
|
import org.apache.hadoop.fs.s3a.impl.StoreContext;
|
||||||
import org.apache.hadoop.fs.s3a.impl.StoreContextBuilder;
|
import org.apache.hadoop.fs.s3a.impl.StoreContextBuilder;
|
||||||
|
@ -186,6 +188,8 @@ public final class S3ATestUtils {
|
||||||
// make this whole class not run by default
|
// make this whole class not run by default
|
||||||
Assume.assumeTrue("No test filesystem in " + TEST_FS_S3A_NAME,
|
Assume.assumeTrue("No test filesystem in " + TEST_FS_S3A_NAME,
|
||||||
liveTest);
|
liveTest);
|
||||||
|
// Skip if S3Guard and S3-CSE are enabled.
|
||||||
|
skipIfS3GuardAndS3CSEEnabled(conf);
|
||||||
// patch in S3Guard options
|
// patch in S3Guard options
|
||||||
maybeEnableS3Guard(conf);
|
maybeEnableS3Guard(conf);
|
||||||
S3AFileSystem fs1 = new S3AFileSystem();
|
S3AFileSystem fs1 = new S3AFileSystem();
|
||||||
|
@ -229,12 +233,45 @@ public final class S3ATestUtils {
|
||||||
// make this whole class not run by default
|
// make this whole class not run by default
|
||||||
Assume.assumeTrue("No test filesystem in " + TEST_FS_S3A_NAME,
|
Assume.assumeTrue("No test filesystem in " + TEST_FS_S3A_NAME,
|
||||||
liveTest);
|
liveTest);
|
||||||
|
// Skip if S3Guard and S3-CSE are enabled.
|
||||||
|
skipIfS3GuardAndS3CSEEnabled(conf);
|
||||||
// patch in S3Guard options
|
// patch in S3Guard options
|
||||||
maybeEnableS3Guard(conf);
|
maybeEnableS3Guard(conf);
|
||||||
FileContext fc = FileContext.getFileContext(testURI, conf);
|
FileContext fc = FileContext.getFileContext(testURI, conf);
|
||||||
return fc;
|
return fc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Skip if S3Guard and S3CSE are enabled together.
|
||||||
|
*
|
||||||
|
* @param conf Test Configuration.
|
||||||
|
*/
|
||||||
|
private static void skipIfS3GuardAndS3CSEEnabled(Configuration conf) {
|
||||||
|
String encryptionMethod =
|
||||||
|
conf.getTrimmed(SERVER_SIDE_ENCRYPTION_ALGORITHM, "");
|
||||||
|
String metaStore = conf.getTrimmed(S3_METADATA_STORE_IMPL, "");
|
||||||
|
if (encryptionMethod.equals(S3AEncryptionMethods.CSE_KMS.getMethod()) &&
|
||||||
|
!metaStore.equals(S3GUARD_METASTORE_NULL)) {
|
||||||
|
skip("Skipped if CSE is enabled with S3Guard.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Either skip if PathIOE occurred due to S3CSE and S3Guard
|
||||||
|
* incompatibility or throw the PathIOE.
|
||||||
|
*
|
||||||
|
* @param ioe PathIOE being parsed.
|
||||||
|
* @throws PathIOException Throws PathIOE if it doesn't relate to S3CSE
|
||||||
|
* and S3Guard incompatibility.
|
||||||
|
*/
|
||||||
|
public static void maybeSkipIfS3GuardAndS3CSEIOE(PathIOException ioe)
|
||||||
|
throws PathIOException {
|
||||||
|
if (ioe.toString().contains(InternalConstants.CSE_S3GUARD_INCOMPATIBLE)) {
|
||||||
|
skip("Skipping since CSE is enabled with S3Guard.");
|
||||||
|
}
|
||||||
|
throw ioe;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get a long test property.
|
* Get a long test property.
|
||||||
* <ol>
|
* <ol>
|
||||||
|
|
|
@ -32,4 +32,10 @@ public class ITestS3AFileContextCreateMkdir
|
||||||
super.setUp();
|
super.setUp();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void tearDown() throws Exception {
|
||||||
|
if (fc != null) {
|
||||||
|
super.tearDown();
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue