HDFS-7313. Support optional configuration of AES cipher suite on DataTransferProtocol. Contributed by Chris Nauroth.

(cherry picked from commit 5573b3476a)
This commit is contained in:
cnauroth 2014-10-30 22:48:25 -07:00
parent caded2545c
commit 57fa43ab0a
9 changed files with 177 additions and 20 deletions

View File

@ -314,6 +314,21 @@ KVNO Timestamp Principal
You need to set <<<dfs.encrypt.data.transfer>>> to <<<"true">>> in the hdfs-site.xml You need to set <<<dfs.encrypt.data.transfer>>> to <<<"true">>> in the hdfs-site.xml
in order to activate data encryption for data transfer protocol of DataNode. in order to activate data encryption for data transfer protocol of DataNode.
Optionally, you may set <<<dfs.encrypt.data.transfer.algorithm>>> to either
"3des" or "rc4" to choose the specific encryption algorithm. If unspecified,
then the configured JCE default on the system is used, which is usually 3DES.
Setting <<<dfs.encrypt.data.transfer.cipher.suites>>> to
<<<AES/CTR/NoPadding>>> activates AES encryption. By default, this is
unspecified, so AES is not used. When AES is used, the algorithm specified in
<<<dfs.encrypt.data.transfer.algorithm>>> is still used during an initial key
exchange. The AES key bit length can be configured by setting
<<<dfs.encrypt.data.transfer.cipher.key.bitlength>>> to 128, 192 or 256. The
default is 128.
AES offers the greatest cryptographic strength and the best performance. At
this time, 3DES and RC4 have been used more often in Hadoop clusters.
** Data Encryption on HTTP ** Data Encryption on HTTP
Data transfer between Web-console and clients are protected by using SSL(HTTPS). Data transfer between Web-console and clients are protected by using SSL(HTTPS).
@ -491,6 +506,18 @@ Configuration for <<<conf/hdfs-site.xml>>>
| <<<dfs.encrypt.data.transfer>>> | <false> | | | <<<dfs.encrypt.data.transfer>>> | <false> | |
| | | set to <<<true>>> when using data encryption | | | | set to <<<true>>> when using data encryption |
*-------------------------+-------------------------+------------------------+ *-------------------------+-------------------------+------------------------+
| <<<dfs.encrypt.data.transfer.algorithm>>> | | |
| | | optionally set to <<<3des>>> or <<<rc4>>> when using data encryption to |
| | | control encryption algorithm |
*-------------------------+-------------------------+------------------------+
| <<<dfs.encrypt.data.transfer.cipher.suites>>> | | |
| | | optionally set to <<<AES/CTR/NoPadding>>> to activate AES encryption |
| | | when using data encryption |
*-------------------------+-------------------------+------------------------+
| <<<dfs.encrypt.data.transfer.cipher.key.bitlength>>> | | |
| | | optionally set to <<<128>>>, <<<192>>> or <<<256>>> to control key bit |
| | | length when using AES with data encryption |
*-------------------------+-------------------------+------------------------+
| <<<dfs.data.transfer.protection>>> | | | | <<<dfs.data.transfer.protection>>> | | |
| | | <authentication> : authentication only \ | | | <authentication> : authentication only \
| | | <integrity> : integrity check in addition to authentication \ | | | <integrity> : integrity check in addition to authentication \

View File

@ -328,6 +328,12 @@ public abstract class GenericTestUtils {
} }
} }
public static void assertDoesNotMatch(String output, String pattern) {
Assert.assertFalse("Expected output to match /" + pattern + "/" +
" but got:\n" + output,
Pattern.compile(pattern).matcher(output).find());
}
public static void assertMatches(String output, String pattern) { public static void assertMatches(String output, String pattern) {
Assert.assertTrue("Expected output to match /" + pattern + "/" + Assert.assertTrue("Expected output to match /" + pattern + "/" +
" but got:\n" + output, " but got:\n" + output,

View File

@ -426,6 +426,9 @@ Release 2.6.0 - UNRELEASED
HDFS-6385. Show when block deletion will start after NameNode startup in HDFS-6385. Show when block deletion will start after NameNode startup in
WebUI. (cnauroth) WebUI. (cnauroth)
HDFS-7313. Support optional configuration of AES cipher suite on
DataTransferProtocol. (cnauroth)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-6690. Deduplicate xattr names in memory. (wang) HDFS-6690. Deduplicate xattr names in memory. (wang)

View File

@ -608,6 +608,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final boolean DFS_ENCRYPT_DATA_TRANSFER_DEFAULT = false; public static final boolean DFS_ENCRYPT_DATA_TRANSFER_DEFAULT = false;
public static final String DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_KEY = "dfs.encrypt.data.transfer.cipher.key.bitlength"; public static final String DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_KEY = "dfs.encrypt.data.transfer.cipher.key.bitlength";
public static final int DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_DEFAULT = 128; public static final int DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_DEFAULT = 128;
public static final String DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY = "dfs.encrypt.data.transfer.cipher.suites";
public static final String DFS_DATA_ENCRYPTION_ALGORITHM_KEY = "dfs.encrypt.data.transfer.algorithm"; public static final String DFS_DATA_ENCRYPTION_ALGORITHM_KEY = "dfs.encrypt.data.transfer.algorithm";
public static final String DFS_TRUSTEDCHANNEL_RESOLVER_CLASS = "dfs.trustedchannel.resolver.class"; public static final String DFS_TRUSTEDCHANNEL_RESOLVER_CLASS = "dfs.trustedchannel.resolver.class";
public static final String DFS_DATA_TRANSFER_PROTECTION_KEY = "dfs.data.transfer.protection"; public static final String DFS_DATA_TRANSFER_PROTECTION_KEY = "dfs.data.transfer.protection";

View File

@ -23,6 +23,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_PROTECTION_
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY;
import static org.apache.hadoop.hdfs.protocolPB.PBHelper.vintPrefixed; import static org.apache.hadoop.hdfs.protocolPB.PBHelper.vintPrefixed;
import java.io.IOException; import java.io.IOException;
@ -249,14 +250,25 @@ public final class DataTransferSaslUtil {
/** /**
* Negotiate a cipher option which server supports. * Negotiate a cipher option which server supports.
* *
* @param conf the configuration
* @param options the cipher options which client supports * @param options the cipher options which client supports
* @return CipherOption negotiated cipher option * @return CipherOption negotiated cipher option
*/ */
public static CipherOption negotiateCipherOption(Configuration conf, public static CipherOption negotiateCipherOption(Configuration conf,
List<CipherOption> options) { List<CipherOption> options) throws IOException {
// Negotiate cipher suites if configured. Currently, the only supported
// cipher suite is AES/CTR/NoPadding, but the protocol allows multiple
// values for future expansion.
String cipherSuites = conf.get(DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY);
if (cipherSuites == null || cipherSuites.isEmpty()) {
return null;
}
if (!cipherSuites.equals(CipherSuite.AES_CTR_NOPADDING.getName())) {
throw new IOException(String.format("Invalid cipher suite, %s=%s",
DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY, cipherSuites));
}
if (options != null) { if (options != null) {
for (CipherOption option : options) { for (CipherOption option : options) {
// Currently we support AES/CTR/NoPadding
CipherSuite suite = option.getCipherSuite(); CipherSuite suite = option.getCipherSuite();
if (suite == CipherSuite.AES_CTR_NOPADDING) { if (suite == CipherSuite.AES_CTR_NOPADDING) {
int keyLen = conf.getInt( int keyLen = conf.getInt(

View File

@ -17,6 +17,7 @@
*/ */
package org.apache.hadoop.hdfs.protocol.datatransfer.sasl; package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY;
import static org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil.*; import static org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil.*;
import java.io.DataInputStream; import java.io.DataInputStream;
@ -450,11 +451,21 @@ public class SaslDataTransferClient {
byte[] localResponse = sasl.evaluateChallengeOrResponse(remoteResponse); byte[] localResponse = sasl.evaluateChallengeOrResponse(remoteResponse);
List<CipherOption> cipherOptions = null; List<CipherOption> cipherOptions = null;
if (requestedQopContainsPrivacy(saslProps)) { if (requestedQopContainsPrivacy(saslProps)) {
// Negotiation cipher options // Negotiate cipher suites if configured. Currently, the only supported
// cipher suite is AES/CTR/NoPadding, but the protocol allows multiple
// values for future expansion.
String cipherSuites = conf.get(
DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY);
if (cipherSuites != null && !cipherSuites.isEmpty()) {
if (!cipherSuites.equals(CipherSuite.AES_CTR_NOPADDING.getName())) {
throw new IOException(String.format("Invalid cipher suite, %s=%s",
DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY, cipherSuites));
}
CipherOption option = new CipherOption(CipherSuite.AES_CTR_NOPADDING); CipherOption option = new CipherOption(CipherSuite.AES_CTR_NOPADDING);
cipherOptions = Lists.newArrayListWithCapacity(1); cipherOptions = Lists.newArrayListWithCapacity(1);
cipherOptions.add(option); cipherOptions.add(option);
} }
}
sendSaslMessageAndNegotiationCipherOptions(out, localResponse, sendSaslMessageAndNegotiationCipherOptions(out, localResponse,
cipherOptions); cipherOptions);

View File

@ -58,6 +58,7 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.Op;
import org.apache.hadoop.hdfs.protocol.datatransfer.Receiver; import org.apache.hadoop.hdfs.protocol.datatransfer.Receiver;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory; import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.InvalidMagicNumberException;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto;
@ -179,11 +180,19 @@ class DataXceiver extends Receiver implements Runnable {
dataXceiverServer.addPeer(peer, Thread.currentThread(), this); dataXceiverServer.addPeer(peer, Thread.currentThread(), this);
peer.setWriteTimeout(datanode.getDnConf().socketWriteTimeout); peer.setWriteTimeout(datanode.getDnConf().socketWriteTimeout);
InputStream input = socketIn; InputStream input = socketIn;
try {
IOStreamPair saslStreams = datanode.saslServer.receive(peer, socketOut, IOStreamPair saslStreams = datanode.saslServer.receive(peer, socketOut,
socketIn, datanode.getDatanodeId()); socketIn, datanode.getDatanodeId());
input = new BufferedInputStream(saslStreams.in, input = new BufferedInputStream(saslStreams.in,
HdfsConstants.SMALL_BUFFER_SIZE); HdfsConstants.SMALL_BUFFER_SIZE);
socketOut = saslStreams.out; socketOut = saslStreams.out;
} catch (InvalidMagicNumberException imne) {
LOG.info("Failed to read expected encryption handshake from client " +
"at " + peer.getRemoteAddressString() + ". Perhaps the client " +
"is running an older version of Hadoop which does not support " +
"encryption");
return;
}
super.initialize(new DataInputStream(input)); super.initialize(new DataInputStream(input));

View File

@ -1517,6 +1517,19 @@
Note that if AES is supported by both the client and server then this Note that if AES is supported by both the client and server then this
encryption algorithm will only be used to initially transfer keys for AES. encryption algorithm will only be used to initially transfer keys for AES.
(See dfs.encrypt.data.transfer.cipher.suites.)
</description>
</property>
<property>
<name>dfs.encrypt.data.transfer.cipher.suites</name>
<value></value>
<description>
This value may be either undefined or AES/CTR/NoPadding. If defined, then
dfs.encrypt.data.transfer uses the specified cipher suite for data
encryption. If not defined, then only the algorithm specified in
dfs.encrypt.data.transfer.algorithm is used. By default, the property is
not defined.
</description> </description>
</property> </property>

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs; package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail; import static org.junit.Assert.fail;
@ -133,12 +134,12 @@ public class TestEncryptedTransfer {
fs.close(); fs.close();
if (resolverClazz != null && !resolverClazz.endsWith("TestTrustedChannelResolver")){ if (resolverClazz == null) {
// Test client and server negotiate cipher option // Test client and server negotiate cipher option
GenericTestUtils.assertMatches(logs.getOutput(), GenericTestUtils.assertDoesNotMatch(logs.getOutput(),
"Server using cipher suite"); "Server using cipher suite");
// Check the IOStreamPair // Check the IOStreamPair
GenericTestUtils.assertMatches(logs1.getOutput(), GenericTestUtils.assertDoesNotMatch(logs1.getOutput(),
"Creating IOStreamPair of CryptoInputStream and CryptoOutputStream."); "Creating IOStreamPair of CryptoInputStream and CryptoOutputStream.");
} }
} finally { } finally {
@ -174,9 +175,83 @@ public class TestEncryptedTransfer {
.build(); .build();
fs = getFileSystem(conf); fs = getFileSystem(conf);
LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
LogFactory.getLog(SaslDataTransferServer.class));
LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs(
LogFactory.getLog(DataTransferSaslUtil.class));
try {
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
assertEquals(checksum, fs.getFileChecksum(TEST_PATH)); assertEquals(checksum, fs.getFileChecksum(TEST_PATH));
} finally {
logs.stopCapturing();
logs1.stopCapturing();
}
fs.close(); fs.close();
if (resolverClazz == null) {
// Test client and server negotiate cipher option
GenericTestUtils.assertDoesNotMatch(logs.getOutput(),
"Server using cipher suite");
// Check the IOStreamPair
GenericTestUtils.assertDoesNotMatch(logs1.getOutput(),
"Creating IOStreamPair of CryptoInputStream and CryptoOutputStream.");
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testEncryptedReadWithAES() throws IOException {
MiniDFSCluster cluster = null;
try {
Configuration conf = new Configuration();
conf.set(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY,
"AES/CTR/NoPadding");
cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = getFileSystem(conf);
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
FileChecksum checksum = fs.getFileChecksum(TEST_PATH);
fs.close();
cluster.shutdown();
setEncryptionConfigKeys(conf);
cluster = new MiniDFSCluster.Builder(conf)
.manageDataDfsDirs(false)
.manageNameDfsDirs(false)
.format(false)
.startupOption(StartupOption.REGULAR)
.build();
fs = getFileSystem(conf);
LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
LogFactory.getLog(SaslDataTransferServer.class));
LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs(
LogFactory.getLog(DataTransferSaslUtil.class));
try {
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
assertEquals(checksum, fs.getFileChecksum(TEST_PATH));
} finally {
logs.stopCapturing();
logs1.stopCapturing();
}
fs.close();
if (resolverClazz == null) {
// Test client and server negotiate cipher option
GenericTestUtils.assertMatches(logs.getOutput(),
"Server using cipher suite");
// Check the IOStreamPair
GenericTestUtils.assertMatches(logs1.getOutput(),
"Creating IOStreamPair of CryptoInputStream and CryptoOutputStream.");
}
} finally { } finally {
if (cluster != null) { if (cluster != null) {
cluster.shutdown(); cluster.shutdown();
@ -270,7 +345,7 @@ public class TestEncryptedTransfer {
} }
fs.close(); fs.close();
if (resolverClazz != null && !resolverClazz.endsWith("TestTrustedChannelResolver")){ if (resolverClazz == null) {
GenericTestUtils.assertMatches(logs.getOutput(), GenericTestUtils.assertMatches(logs.getOutput(),
"Failed to read expected encryption handshake from client at"); "Failed to read expected encryption handshake from client at");
} }
@ -444,12 +519,12 @@ public class TestEncryptedTransfer {
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
fs.close(); fs.close();
if (resolverClazz != null && !resolverClazz.endsWith("TestTrustedChannelResolver")){ if (resolverClazz == null) {
// Test client and server negotiate cipher option // Test client and server negotiate cipher option
GenericTestUtils.assertMatches(logs.getOutput(), GenericTestUtils.assertDoesNotMatch(logs.getOutput(),
"Server using cipher suite"); "Server using cipher suite");
// Check the IOStreamPair // Check the IOStreamPair
GenericTestUtils.assertMatches(logs1.getOutput(), GenericTestUtils.assertDoesNotMatch(logs1.getOutput(),
"Creating IOStreamPair of CryptoInputStream and CryptoOutputStream."); "Creating IOStreamPair of CryptoInputStream and CryptoOutputStream.");
} }
} finally { } finally {