diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java index 5373c43ad7a..9dedc518f08 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java @@ -217,6 +217,7 @@ import static org.apache.hadoop.fs.s3a.impl.CallableSupplier.waitForCompletionIg import static org.apache.hadoop.fs.s3a.impl.ErrorTranslation.isObjectNotFound; import static org.apache.hadoop.fs.s3a.impl.ErrorTranslation.isUnknownBucket; import static org.apache.hadoop.fs.s3a.impl.InternalConstants.CSE_PADDING_LENGTH; +import static org.apache.hadoop.fs.s3a.impl.InternalConstants.CSE_S3GUARD_INCOMPATIBLE; import static org.apache.hadoop.fs.s3a.impl.InternalConstants.DEFAULT_UPLOAD_PART_COUNT_LIMIT; import static org.apache.hadoop.fs.s3a.impl.InternalConstants.DELETE_CONSIDERED_IDEMPOTENT; import static org.apache.hadoop.fs.s3a.impl.InternalConstants.SC_404; @@ -545,6 +546,9 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities, if (hasMetadataStore()) { LOG.debug("Using metadata store {}, authoritative store={}, authoritative path={}", getMetadataStore(), allowAuthoritativeMetadataStore, allowAuthoritativePaths); + if (isCSEEnabled) { + throw new PathIOException(uri.toString(), CSE_S3GUARD_INCOMPATIBLE); + } } // LOG if S3Guard is disabled on the warn level set in config diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/InternalConstants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/InternalConstants.java index 51b1bf60a2f..4dd52d9b88d 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/InternalConstants.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/InternalConstants.java @@ -134,4 +134,9 @@ public final class InternalConstants { */ public static final int CSE_PADDING_LENGTH = 16; + /** + * Error message to indicate S3-CSE is incompatible with S3Guard. + */ + public static final String CSE_S3GUARD_INCOMPATIBLE = "S3-CSE cannot be " + + "used with S3Guard"; } diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/encryption.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/encryption.md index 888ed8e211a..5fa6a3096b8 100644 --- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/encryption.md +++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/encryption.md @@ -601,6 +601,7 @@ clients where S3-CSE has not been enabled. ### Limitations +- S3Guard is not supported with S3-CSE. - Performance will be reduced. All encrypt/decrypt is now being done on the client. - Writing files may be slower, as only a single block can be encrypted and diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md index 6cdb492d885..33dd1654992 100644 --- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md +++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md @@ -1435,6 +1435,31 @@ The user trying to use the KMS Key ID should have the right permissions to acces If not, then add permission(or IAM role) in "Key users" section by selecting the AWS-KMS CMK Key on AWS console. +### S3-CSE cannot be used with S3Guard + +S3-CSE not supported for S3Guard enabled buckets. +``` +org.apache.hadoop.fs.PathIOException: `s3a://test-bucket': S3-CSE cannot be used with S3Guard + at org.apache.hadoop.fs.s3a.S3AFileSystem.initialize(S3AFileSystem.java:543) + at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:3460) + at org.apache.hadoop.fs.FileSystem.access$300(FileSystem.java:172) + at org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:3565) + at org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:3512) + at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:539) + at org.apache.hadoop.fs.Path.getFileSystem(Path.java:366) + at org.apache.hadoop.fs.shell.PathData.expandAsGlob(PathData.java:342) + at org.apache.hadoop.fs.shell.Command.expandArgument(Command.java:252) + at org.apache.hadoop.fs.shell.Command.expandArguments(Command.java:235) + at org.apache.hadoop.fs.shell.FsCommand.processRawArguments(FsCommand.java:105) + at org.apache.hadoop.fs.shell.Command.run(Command.java:179) + at org.apache.hadoop.fs.FsShell.run(FsShell.java:327) + at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:81) + at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:95) + at org.apache.hadoop.fs.FsShell.main(FsShell.java:390) +``` +If you want to use S3Guard then disable S3-CSE or disable S3Guard if you want +to use S3-CSE. + ### Message appears in logs "Not all bytes were read from the S3ObjectInputStream" diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractSeek.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractSeek.java index 17136244487..329b940a66a 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractSeek.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractSeek.java @@ -143,7 +143,8 @@ public class ITestS3AContractSeek extends AbstractContractSeekTest { public void teardown() throws Exception { super.teardown(); S3AFileSystem fs = getFileSystem(); - if (fs.getConf().getBoolean(FS_S3A_IMPL_DISABLE_CACHE, false)) { + if (fs != null && fs.getConf().getBoolean(FS_S3A_IMPL_DISABLE_CACHE, + false)) { fs.close(); } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/S3AContract.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/S3AContract.java index 0d3dd4c2f66..695a4a2b682 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/S3AContract.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/S3AContract.java @@ -18,12 +18,17 @@ package org.apache.hadoop.fs.contract.s3a; +import java.io.IOException; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathIOException; import org.apache.hadoop.fs.contract.AbstractBondedFSContract; import org.apache.hadoop.fs.s3a.S3AFileSystem; import org.apache.hadoop.fs.s3a.S3ATestUtils; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.maybeSkipIfS3GuardAndS3CSEIOE; + /** * The contract of S3A: only enabled if the test bucket is provided. */ @@ -63,6 +68,20 @@ public class S3AContract extends AbstractBondedFSContract { } } + /** + * Skip S3AFS initialization if S3-CSE and S3Guard are enabled. + * + */ + @Override + public void init() throws IOException { + try { + super.init(); + } catch (PathIOException ioe) { + // Skip the tests if S3-CSE and S3-Guard are enabled. + maybeSkipIfS3GuardAndS3CSEIOE(ioe); + } + } + @Override public String getScheme() { return "s3a"; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractS3AMockTest.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractS3AMockTest.java index 6afdd76ca44..afc444f20ae 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractS3AMockTest.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractS3AMockTest.java @@ -58,6 +58,8 @@ public abstract class AbstractS3AMockTest { Configuration conf = createConfiguration(); fs = new S3AFileSystem(); URI uri = URI.create(FS_S3A + "://" + BUCKET); + // unset S3CSE property from config to avoid pathIOE. + conf.unset(SERVER_SIDE_ENCRYPTION_ALGORITHM); fs.initialize(uri, conf); s3 = fs.getAmazonS3ClientForTesting("mocking"); } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFSMainOperations.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFSMainOperations.java index 511aa0fc80d..6669e8426af 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFSMainOperations.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFSMainOperations.java @@ -33,6 +33,7 @@ import static org.apache.hadoop.fs.s3a.S3ATestUtils.createTestPath; */ public class ITestS3AFSMainOperations extends FSMainOperationsBaseTest { + private S3AContract contract; public ITestS3AFSMainOperations() { super(createTestPath( @@ -41,11 +42,18 @@ public class ITestS3AFSMainOperations extends FSMainOperationsBaseTest { @Override protected FileSystem createFileSystem() throws Exception { - S3AContract contract = new S3AContract(new Configuration()); + contract = new S3AContract(new Configuration()); contract.init(); return contract.getTestFileSystem(); } + @Override + public void tearDown() throws Exception { + if (contract.getTestFileSystem() != null) { + super.tearDown(); + } + } + @Override @Ignore("Permissions not supported") public void testListStatusThrowsExceptionForUnreadableDir() { diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java index 2475b544a8f..0d5471939cd 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java @@ -77,7 +77,7 @@ public class ITestS3GuardListConsistency extends AbstractS3ATestBase { @Override public void teardown() throws Exception { - if (getFileSystem() + if (getFileSystem() != null && getFileSystem() .getAmazonS3Client() instanceof InconsistentAmazonS3Client) { clearInconsistency(getFileSystem()); } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java index 6c51c732e6a..70986d85168 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java @@ -29,6 +29,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathIOException; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.s3a.auth.MarshalledCredentialBinding; @@ -37,6 +38,7 @@ import org.apache.hadoop.fs.s3a.commit.CommitConstants; import org.apache.hadoop.fs.s3a.impl.ChangeDetectionPolicy; import org.apache.hadoop.fs.s3a.impl.ContextAccessors; +import org.apache.hadoop.fs.s3a.impl.InternalConstants; import org.apache.hadoop.fs.s3a.impl.StatusProbeEnum; import org.apache.hadoop.fs.s3a.impl.StoreContext; import org.apache.hadoop.fs.s3a.impl.StoreContextBuilder; @@ -186,6 +188,8 @@ public final class S3ATestUtils { // make this whole class not run by default Assume.assumeTrue("No test filesystem in " + TEST_FS_S3A_NAME, liveTest); + // Skip if S3Guard and S3-CSE are enabled. + skipIfS3GuardAndS3CSEEnabled(conf); // patch in S3Guard options maybeEnableS3Guard(conf); S3AFileSystem fs1 = new S3AFileSystem(); @@ -229,12 +233,45 @@ public final class S3ATestUtils { // make this whole class not run by default Assume.assumeTrue("No test filesystem in " + TEST_FS_S3A_NAME, liveTest); + // Skip if S3Guard and S3-CSE are enabled. + skipIfS3GuardAndS3CSEEnabled(conf); // patch in S3Guard options maybeEnableS3Guard(conf); FileContext fc = FileContext.getFileContext(testURI, conf); return fc; } + /** + * Skip if S3Guard and S3CSE are enabled together. + * + * @param conf Test Configuration. + */ + private static void skipIfS3GuardAndS3CSEEnabled(Configuration conf) { + String encryptionMethod = + conf.getTrimmed(SERVER_SIDE_ENCRYPTION_ALGORITHM, ""); + String metaStore = conf.getTrimmed(S3_METADATA_STORE_IMPL, ""); + if (encryptionMethod.equals(S3AEncryptionMethods.CSE_KMS.getMethod()) && + !metaStore.equals(S3GUARD_METASTORE_NULL)) { + skip("Skipped if CSE is enabled with S3Guard."); + } + } + + /** + * Either skip if PathIOE occurred due to S3CSE and S3Guard + * incompatibility or throw the PathIOE. + * + * @param ioe PathIOE being parsed. + * @throws PathIOException Throws PathIOE if it doesn't relate to S3CSE + * and S3Guard incompatibility. + */ + public static void maybeSkipIfS3GuardAndS3CSEIOE(PathIOException ioe) + throws PathIOException { + if (ioe.toString().contains(InternalConstants.CSE_S3GUARD_INCOMPATIBLE)) { + skip("Skipping since CSE is enabled with S3Guard."); + } + throw ioe; + } + /** * Get a long test property. *
    diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/ITestS3AFileContextCreateMkdir.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/ITestS3AFileContextCreateMkdir.java index 4b8d4bb5b32..dcc9da93365 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/ITestS3AFileContextCreateMkdir.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/ITestS3AFileContextCreateMkdir.java @@ -32,4 +32,10 @@ public class ITestS3AFileContextCreateMkdir super.setUp(); } + @Override + public void tearDown() throws Exception { + if (fc != null) { + super.tearDown(); + } + } }