diff --git a/hadoop-common-project/hadoop-common/src/site/apt/SecureMode.apt.vm b/hadoop-common-project/hadoop-common/src/site/apt/SecureMode.apt.vm index 2085d5b8d55..8e17e8981d9 100644 --- a/hadoop-common-project/hadoop-common/src/site/apt/SecureMode.apt.vm +++ b/hadoop-common-project/hadoop-common/src/site/apt/SecureMode.apt.vm @@ -314,6 +314,21 @@ KVNO Timestamp Principal You need to set <<>> to <<<"true">>> in the hdfs-site.xml in order to activate data encryption for data transfer protocol of DataNode. + Optionally, you may set <<>> to either + "3des" or "rc4" to choose the specific encryption algorithm. If unspecified, + then the configured JCE default on the system is used, which is usually 3DES. + + Setting <<>> to + <<>> activates AES encryption. By default, this is + unspecified, so AES is not used. When AES is used, the algorithm specified in + <<>> is still used during an initial key + exchange. The AES key bit length can be configured by setting + <<>> to 128, 192 or 256. The + default is 128. + + AES offers the greatest cryptographic strength and the best performance. At + this time, 3DES and RC4 have been used more often in Hadoop clusters. + ** Data Encryption on HTTP Data transfer between Web-console and clients are protected by using SSL(HTTPS). @@ -491,6 +506,18 @@ Configuration for <<>> | <<>> | | | | | | set to <<>> when using data encryption | *-------------------------+-------------------------+------------------------+ +| <<>> | | | +| | | optionally set to <<<3des>>> or <<>> when using data encryption to | +| | | control encryption algorithm | +*-------------------------+-------------------------+------------------------+ +| <<>> | | | +| | | optionally set to <<>> to activate AES encryption | +| | | when using data encryption | +*-------------------------+-------------------------+------------------------+ +| <<>> | | | +| | | optionally set to <<<128>>>, <<<192>>> or <<<256>>> to control key bit | +| | | length when using AES with data encryption | +*-------------------------+-------------------------+------------------------+ | <<>> | | | | | | : authentication only \ | | | : integrity check in addition to authentication \ diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java index 06376e52d77..7be71e9ef17 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java @@ -328,6 +328,12 @@ public abstract class GenericTestUtils { } } + public static void assertDoesNotMatch(String output, String pattern) { + Assert.assertFalse("Expected output to match /" + pattern + "/" + + " but got:\n" + output, + Pattern.compile(pattern).matcher(output).find()); + } + public static void assertMatches(String output, String pattern) { Assert.assertTrue("Expected output to match /" + pattern + "/" + " but got:\n" + output, diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 47eea752df5..7010c4a283b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -674,6 +674,9 @@ Release 2.6.0 - UNRELEASED HDFS-6385. Show when block deletion will start after NameNode startup in WebUI. (cnauroth) + HDFS-7313. Support optional configuration of AES cipher suite on + DataTransferProtocol. (cnauroth) + OPTIMIZATIONS HDFS-6690. Deduplicate xattr names in memory. (wang) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 59eaa20321e..488bf0ec48a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -602,6 +602,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final boolean DFS_ENCRYPT_DATA_TRANSFER_DEFAULT = false; public static final String DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_KEY = "dfs.encrypt.data.transfer.cipher.key.bitlength"; public static final int DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_DEFAULT = 128; + public static final String DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY = "dfs.encrypt.data.transfer.cipher.suites"; public static final String DFS_DATA_ENCRYPTION_ALGORITHM_KEY = "dfs.encrypt.data.transfer.algorithm"; public static final String DFS_TRUSTEDCHANNEL_RESOLVER_CLASS = "dfs.trustedchannel.resolver.class"; public static final String DFS_DATA_TRANSFER_PROTECTION_KEY = "dfs.data.transfer.protection"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java index 2d5e13c4a12..398d44cc094 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java @@ -23,6 +23,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_PROTECTION_ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY; import static org.apache.hadoop.hdfs.protocolPB.PBHelper.vintPrefixed; import java.io.IOException; @@ -249,14 +250,25 @@ public final class DataTransferSaslUtil { /** * Negotiate a cipher option which server supports. * + * @param conf the configuration * @param options the cipher options which client supports * @return CipherOption negotiated cipher option */ public static CipherOption negotiateCipherOption(Configuration conf, - List options) { + List options) throws IOException { + // Negotiate cipher suites if configured. Currently, the only supported + // cipher suite is AES/CTR/NoPadding, but the protocol allows multiple + // values for future expansion. + String cipherSuites = conf.get(DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY); + if (cipherSuites == null || cipherSuites.isEmpty()) { + return null; + } + if (!cipherSuites.equals(CipherSuite.AES_CTR_NOPADDING.getName())) { + throw new IOException(String.format("Invalid cipher suite, %s=%s", + DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY, cipherSuites)); + } if (options != null) { for (CipherOption option : options) { - // Currently we support AES/CTR/NoPadding CipherSuite suite = option.getCipherSuite(); if (suite == CipherSuite.AES_CTR_NOPADDING) { int keyLen = conf.getInt( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java index cfcc91fa2be..98cdcfea338 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.protocol.datatransfer.sasl; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY; import static org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil.*; import java.io.DataInputStream; @@ -450,10 +451,20 @@ public class SaslDataTransferClient { byte[] localResponse = sasl.evaluateChallengeOrResponse(remoteResponse); List cipherOptions = null; if (requestedQopContainsPrivacy(saslProps)) { - // Negotiation cipher options - CipherOption option = new CipherOption(CipherSuite.AES_CTR_NOPADDING); - cipherOptions = Lists.newArrayListWithCapacity(1); - cipherOptions.add(option); + // Negotiate cipher suites if configured. Currently, the only supported + // cipher suite is AES/CTR/NoPadding, but the protocol allows multiple + // values for future expansion. + String cipherSuites = conf.get( + DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY); + if (cipherSuites != null && !cipherSuites.isEmpty()) { + if (!cipherSuites.equals(CipherSuite.AES_CTR_NOPADDING.getName())) { + throw new IOException(String.format("Invalid cipher suite, %s=%s", + DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY, cipherSuites)); + } + CipherOption option = new CipherOption(CipherSuite.AES_CTR_NOPADDING); + cipherOptions = Lists.newArrayListWithCapacity(1); + cipherOptions.add(option); + } } sendSaslMessageAndNegotiationCipherOptions(out, localResponse, cipherOptions); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java index 9dfd33bc25a..2a45a425173 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java @@ -58,6 +58,7 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.Op; import org.apache.hadoop.hdfs.protocol.datatransfer.Receiver; import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory; +import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.InvalidMagicNumberException; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto; @@ -179,11 +180,19 @@ class DataXceiver extends Receiver implements Runnable { dataXceiverServer.addPeer(peer, Thread.currentThread(), this); peer.setWriteTimeout(datanode.getDnConf().socketWriteTimeout); InputStream input = socketIn; - IOStreamPair saslStreams = datanode.saslServer.receive(peer, socketOut, - socketIn, datanode.getDatanodeId()); - input = new BufferedInputStream(saslStreams.in, - HdfsConstants.SMALL_BUFFER_SIZE); - socketOut = saslStreams.out; + try { + IOStreamPair saslStreams = datanode.saslServer.receive(peer, socketOut, + socketIn, datanode.getDatanodeId()); + input = new BufferedInputStream(saslStreams.in, + HdfsConstants.SMALL_BUFFER_SIZE); + socketOut = saslStreams.out; + } catch (InvalidMagicNumberException imne) { + LOG.info("Failed to read expected encryption handshake from client " + + "at " + peer.getRemoteAddressString() + ". Perhaps the client " + + "is running an older version of Hadoop which does not support " + + "encryption"); + return; + } super.initialize(new DataInputStream(input)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 38d3c50c7e5..31145ba3993 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -1517,6 +1517,19 @@ Note that if AES is supported by both the client and server then this encryption algorithm will only be used to initially transfer keys for AES. + (See dfs.encrypt.data.transfer.cipher.suites.) + + + + + dfs.encrypt.data.transfer.cipher.suites + + + This value may be either undefined or AES/CTR/NoPadding. If defined, then + dfs.encrypt.data.transfer uses the specified cipher suite for data + encryption. If not defined, then only the algorithm specified in + dfs.encrypt.data.transfer.algorithm is used. By default, the property is + not defined. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java index 7f6ad1a38ca..30484d1b50f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -133,12 +134,12 @@ public class TestEncryptedTransfer { fs.close(); - if (resolverClazz != null && !resolverClazz.endsWith("TestTrustedChannelResolver")){ + if (resolverClazz == null) { // Test client and server negotiate cipher option - GenericTestUtils.assertMatches(logs.getOutput(), + GenericTestUtils.assertDoesNotMatch(logs.getOutput(), "Server using cipher suite"); // Check the IOStreamPair - GenericTestUtils.assertMatches(logs1.getOutput(), + GenericTestUtils.assertDoesNotMatch(logs1.getOutput(), "Creating IOStreamPair of CryptoInputStream and CryptoOutputStream."); } } finally { @@ -174,9 +175,28 @@ public class TestEncryptedTransfer { .build(); fs = getFileSystem(conf); - assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); - assertEquals(checksum, fs.getFileChecksum(TEST_PATH)); + LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( + LogFactory.getLog(SaslDataTransferServer.class)); + LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs( + LogFactory.getLog(DataTransferSaslUtil.class)); + try { + assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); + assertEquals(checksum, fs.getFileChecksum(TEST_PATH)); + } finally { + logs.stopCapturing(); + logs1.stopCapturing(); + } + fs.close(); + + if (resolverClazz == null) { + // Test client and server negotiate cipher option + GenericTestUtils.assertDoesNotMatch(logs.getOutput(), + "Server using cipher suite"); + // Check the IOStreamPair + GenericTestUtils.assertDoesNotMatch(logs1.getOutput(), + "Creating IOStreamPair of CryptoInputStream and CryptoOutputStream."); + } } finally { if (cluster != null) { cluster.shutdown(); @@ -184,6 +204,61 @@ public class TestEncryptedTransfer { } } + @Test + public void testEncryptedReadWithAES() throws IOException { + MiniDFSCluster cluster = null; + try { + Configuration conf = new Configuration(); + conf.set(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY, + "AES/CTR/NoPadding"); + cluster = new MiniDFSCluster.Builder(conf).build(); + + FileSystem fs = getFileSystem(conf); + writeTestDataToFile(fs); + assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); + FileChecksum checksum = fs.getFileChecksum(TEST_PATH); + fs.close(); + cluster.shutdown(); + + setEncryptionConfigKeys(conf); + + cluster = new MiniDFSCluster.Builder(conf) + .manageDataDfsDirs(false) + .manageNameDfsDirs(false) + .format(false) + .startupOption(StartupOption.REGULAR) + .build(); + + fs = getFileSystem(conf); + LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( + LogFactory.getLog(SaslDataTransferServer.class)); + LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs( + LogFactory.getLog(DataTransferSaslUtil.class)); + try { + assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); + assertEquals(checksum, fs.getFileChecksum(TEST_PATH)); + } finally { + logs.stopCapturing(); + logs1.stopCapturing(); + } + + fs.close(); + + if (resolverClazz == null) { + // Test client and server negotiate cipher option + GenericTestUtils.assertMatches(logs.getOutput(), + "Server using cipher suite"); + // Check the IOStreamPair + GenericTestUtils.assertMatches(logs1.getOutput(), + "Creating IOStreamPair of CryptoInputStream and CryptoOutputStream."); + } + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } + @Test public void testEncryptedReadAfterNameNodeRestart() throws IOException { MiniDFSCluster cluster = null; @@ -270,7 +345,7 @@ public class TestEncryptedTransfer { } fs.close(); - if (resolverClazz != null && !resolverClazz.endsWith("TestTrustedChannelResolver")){ + if (resolverClazz == null) { GenericTestUtils.assertMatches(logs.getOutput(), "Failed to read expected encryption handshake from client at"); } @@ -444,12 +519,12 @@ public class TestEncryptedTransfer { assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); fs.close(); - if (resolverClazz != null && !resolverClazz.endsWith("TestTrustedChannelResolver")){ + if (resolverClazz == null) { // Test client and server negotiate cipher option - GenericTestUtils.assertMatches(logs.getOutput(), + GenericTestUtils.assertDoesNotMatch(logs.getOutput(), "Server using cipher suite"); // Check the IOStreamPair - GenericTestUtils.assertMatches(logs1.getOutput(), + GenericTestUtils.assertDoesNotMatch(logs1.getOutput(), "Creating IOStreamPair of CryptoInputStream and CryptoOutputStream."); } } finally {