HDFS-7313. Support optional configuration of AES cipher suite on DataTransferProtocol. Contributed by Chris Nauroth.

This commit is contained in:
cnauroth 2014-10-30 22:48:25 -07:00
parent c637d6d9d5
commit 5573b3476a
9 changed files with 177 additions and 20 deletions

View File

@ -314,6 +314,21 @@ KVNO Timestamp Principal
You need to set <<<dfs.encrypt.data.transfer>>> to <<<"true">>> in the hdfs-site.xml
in order to activate data encryption for data transfer protocol of DataNode.
Optionally, you may set <<<dfs.encrypt.data.transfer.algorithm>>> to either
"3des" or "rc4" to choose the specific encryption algorithm. If unspecified,
then the configured JCE default on the system is used, which is usually 3DES.
Setting <<<dfs.encrypt.data.transfer.cipher.suites>>> to
<<<AES/CTR/NoPadding>>> activates AES encryption. By default, this is
unspecified, so AES is not used. When AES is used, the algorithm specified in
<<<dfs.encrypt.data.transfer.algorithm>>> is still used during an initial key
exchange. The AES key bit length can be configured by setting
<<<dfs.encrypt.data.transfer.cipher.key.bitlength>>> to 128, 192 or 256. The
default is 128.
AES offers the greatest cryptographic strength and the best performance. At
this time, 3DES and RC4 have been used more often in Hadoop clusters.
** Data Encryption on HTTP
Data transfer between Web-console and clients are protected by using SSL(HTTPS).
@ -491,6 +506,18 @@ Configuration for <<<conf/hdfs-site.xml>>>
| <<<dfs.encrypt.data.transfer>>> | <false> | |
| | | set to <<<true>>> when using data encryption |
*-------------------------+-------------------------+------------------------+
| <<<dfs.encrypt.data.transfer.algorithm>>> | | |
| | | optionally set to <<<3des>>> or <<<rc4>>> when using data encryption to |
| | | control encryption algorithm |
*-------------------------+-------------------------+------------------------+
| <<<dfs.encrypt.data.transfer.cipher.suites>>> | | |
| | | optionally set to <<<AES/CTR/NoPadding>>> to activate AES encryption |
| | | when using data encryption |
*-------------------------+-------------------------+------------------------+
| <<<dfs.encrypt.data.transfer.cipher.key.bitlength>>> | | |
| | | optionally set to <<<128>>>, <<<192>>> or <<<256>>> to control key bit |
| | | length when using AES with data encryption |
*-------------------------+-------------------------+------------------------+
| <<<dfs.data.transfer.protection>>> | | |
| | | <authentication> : authentication only \
| | | <integrity> : integrity check in addition to authentication \

View File

@ -328,6 +328,12 @@ public Object answer(InvocationOnMock invocation) throws Throwable {
}
}
public static void assertDoesNotMatch(String output, String pattern) {
Assert.assertFalse("Expected output to match /" + pattern + "/" +
" but got:\n" + output,
Pattern.compile(pattern).matcher(output).find());
}
public static void assertMatches(String output, String pattern) {
Assert.assertTrue("Expected output to match /" + pattern + "/" +
" but got:\n" + output,

View File

@ -674,6 +674,9 @@ Release 2.6.0 - UNRELEASED
HDFS-6385. Show when block deletion will start after NameNode startup in
WebUI. (cnauroth)
HDFS-7313. Support optional configuration of AES cipher suite on
DataTransferProtocol. (cnauroth)
OPTIMIZATIONS
HDFS-6690. Deduplicate xattr names in memory. (wang)

View File

@ -602,6 +602,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final boolean DFS_ENCRYPT_DATA_TRANSFER_DEFAULT = false;
public static final String DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_KEY = "dfs.encrypt.data.transfer.cipher.key.bitlength";
public static final int DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_DEFAULT = 128;
public static final String DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY = "dfs.encrypt.data.transfer.cipher.suites";
public static final String DFS_DATA_ENCRYPTION_ALGORITHM_KEY = "dfs.encrypt.data.transfer.algorithm";
public static final String DFS_TRUSTEDCHANNEL_RESOLVER_CLASS = "dfs.trustedchannel.resolver.class";
public static final String DFS_DATA_TRANSFER_PROTECTION_KEY = "dfs.data.transfer.protection";

View File

@ -23,6 +23,7 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY;
import static org.apache.hadoop.hdfs.protocolPB.PBHelper.vintPrefixed;
import java.io.IOException;
@ -249,14 +250,25 @@ public static byte[] readSaslMessageAndNegotiationCipherOptions(
/**
* Negotiate a cipher option which server supports.
*
* @param conf the configuration
* @param options the cipher options which client supports
* @return CipherOption negotiated cipher option
*/
public static CipherOption negotiateCipherOption(Configuration conf,
List<CipherOption> options) {
List<CipherOption> options) throws IOException {
// Negotiate cipher suites if configured. Currently, the only supported
// cipher suite is AES/CTR/NoPadding, but the protocol allows multiple
// values for future expansion.
String cipherSuites = conf.get(DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY);
if (cipherSuites == null || cipherSuites.isEmpty()) {
return null;
}
if (!cipherSuites.equals(CipherSuite.AES_CTR_NOPADDING.getName())) {
throw new IOException(String.format("Invalid cipher suite, %s=%s",
DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY, cipherSuites));
}
if (options != null) {
for (CipherOption option : options) {
// Currently we support AES/CTR/NoPadding
CipherSuite suite = option.getCipherSuite();
if (suite == CipherSuite.AES_CTR_NOPADDING) {
int keyLen = conf.getInt(

View File

@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY;
import static org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil.*;
import java.io.DataInputStream;
@ -450,10 +451,20 @@ private IOStreamPair doSaslHandshake(OutputStream underlyingOut,
byte[] localResponse = sasl.evaluateChallengeOrResponse(remoteResponse);
List<CipherOption> cipherOptions = null;
if (requestedQopContainsPrivacy(saslProps)) {
// Negotiation cipher options
CipherOption option = new CipherOption(CipherSuite.AES_CTR_NOPADDING);
cipherOptions = Lists.newArrayListWithCapacity(1);
cipherOptions.add(option);
// Negotiate cipher suites if configured. Currently, the only supported
// cipher suite is AES/CTR/NoPadding, but the protocol allows multiple
// values for future expansion.
String cipherSuites = conf.get(
DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY);
if (cipherSuites != null && !cipherSuites.isEmpty()) {
if (!cipherSuites.equals(CipherSuite.AES_CTR_NOPADDING.getName())) {
throw new IOException(String.format("Invalid cipher suite, %s=%s",
DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY, cipherSuites));
}
CipherOption option = new CipherOption(CipherSuite.AES_CTR_NOPADDING);
cipherOptions = Lists.newArrayListWithCapacity(1);
cipherOptions.add(option);
}
}
sendSaslMessageAndNegotiationCipherOptions(out, localResponse,
cipherOptions);

View File

@ -58,6 +58,7 @@
import org.apache.hadoop.hdfs.protocol.datatransfer.Receiver;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.InvalidMagicNumberException;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto;
@ -179,11 +180,19 @@ public void run() {
dataXceiverServer.addPeer(peer, Thread.currentThread(), this);
peer.setWriteTimeout(datanode.getDnConf().socketWriteTimeout);
InputStream input = socketIn;
IOStreamPair saslStreams = datanode.saslServer.receive(peer, socketOut,
socketIn, datanode.getDatanodeId());
input = new BufferedInputStream(saslStreams.in,
HdfsConstants.SMALL_BUFFER_SIZE);
socketOut = saslStreams.out;
try {
IOStreamPair saslStreams = datanode.saslServer.receive(peer, socketOut,
socketIn, datanode.getDatanodeId());
input = new BufferedInputStream(saslStreams.in,
HdfsConstants.SMALL_BUFFER_SIZE);
socketOut = saslStreams.out;
} catch (InvalidMagicNumberException imne) {
LOG.info("Failed to read expected encryption handshake from client " +
"at " + peer.getRemoteAddressString() + ". Perhaps the client " +
"is running an older version of Hadoop which does not support " +
"encryption");
return;
}
super.initialize(new DataInputStream(input));

View File

@ -1517,6 +1517,19 @@
Note that if AES is supported by both the client and server then this
encryption algorithm will only be used to initially transfer keys for AES.
(See dfs.encrypt.data.transfer.cipher.suites.)
</description>
</property>
<property>
<name>dfs.encrypt.data.transfer.cipher.suites</name>
<value></value>
<description>
This value may be either undefined or AES/CTR/NoPadding. If defined, then
dfs.encrypt.data.transfer uses the specified cipher suite for data
encryption. If not defined, then only the algorithm specified in
dfs.encrypt.data.transfer.algorithm is used. By default, the property is
not defined.
</description>
</property>

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@ -133,12 +134,12 @@ public void testEncryptedRead() throws IOException {
fs.close();
if (resolverClazz != null && !resolverClazz.endsWith("TestTrustedChannelResolver")){
if (resolverClazz == null) {
// Test client and server negotiate cipher option
GenericTestUtils.assertMatches(logs.getOutput(),
GenericTestUtils.assertDoesNotMatch(logs.getOutput(),
"Server using cipher suite");
// Check the IOStreamPair
GenericTestUtils.assertMatches(logs1.getOutput(),
GenericTestUtils.assertDoesNotMatch(logs1.getOutput(),
"Creating IOStreamPair of CryptoInputStream and CryptoOutputStream.");
}
} finally {
@ -174,9 +175,28 @@ public void testEncryptedReadWithRC4() throws IOException {
.build();
fs = getFileSystem(conf);
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
assertEquals(checksum, fs.getFileChecksum(TEST_PATH));
LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
LogFactory.getLog(SaslDataTransferServer.class));
LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs(
LogFactory.getLog(DataTransferSaslUtil.class));
try {
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
assertEquals(checksum, fs.getFileChecksum(TEST_PATH));
} finally {
logs.stopCapturing();
logs1.stopCapturing();
}
fs.close();
if (resolverClazz == null) {
// Test client and server negotiate cipher option
GenericTestUtils.assertDoesNotMatch(logs.getOutput(),
"Server using cipher suite");
// Check the IOStreamPair
GenericTestUtils.assertDoesNotMatch(logs1.getOutput(),
"Creating IOStreamPair of CryptoInputStream and CryptoOutputStream.");
}
} finally {
if (cluster != null) {
cluster.shutdown();
@ -184,6 +204,61 @@ public void testEncryptedReadWithRC4() throws IOException {
}
}
@Test
public void testEncryptedReadWithAES() throws IOException {
MiniDFSCluster cluster = null;
try {
Configuration conf = new Configuration();
conf.set(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY,
"AES/CTR/NoPadding");
cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = getFileSystem(conf);
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
FileChecksum checksum = fs.getFileChecksum(TEST_PATH);
fs.close();
cluster.shutdown();
setEncryptionConfigKeys(conf);
cluster = new MiniDFSCluster.Builder(conf)
.manageDataDfsDirs(false)
.manageNameDfsDirs(false)
.format(false)
.startupOption(StartupOption.REGULAR)
.build();
fs = getFileSystem(conf);
LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
LogFactory.getLog(SaslDataTransferServer.class));
LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs(
LogFactory.getLog(DataTransferSaslUtil.class));
try {
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
assertEquals(checksum, fs.getFileChecksum(TEST_PATH));
} finally {
logs.stopCapturing();
logs1.stopCapturing();
}
fs.close();
if (resolverClazz == null) {
// Test client and server negotiate cipher option
GenericTestUtils.assertMatches(logs.getOutput(),
"Server using cipher suite");
// Check the IOStreamPair
GenericTestUtils.assertMatches(logs1.getOutput(),
"Creating IOStreamPair of CryptoInputStream and CryptoOutputStream.");
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testEncryptedReadAfterNameNodeRestart() throws IOException {
MiniDFSCluster cluster = null;
@ -270,7 +345,7 @@ public void testClientThatDoesNotSupportEncryption() throws IOException {
}
fs.close();
if (resolverClazz != null && !resolverClazz.endsWith("TestTrustedChannelResolver")){
if (resolverClazz == null) {
GenericTestUtils.assertMatches(logs.getOutput(),
"Failed to read expected encryption handshake from client at");
}
@ -444,12 +519,12 @@ private void testEncryptedWrite(int numDns) throws IOException {
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
fs.close();
if (resolverClazz != null && !resolverClazz.endsWith("TestTrustedChannelResolver")){
if (resolverClazz == null) {
// Test client and server negotiate cipher option
GenericTestUtils.assertMatches(logs.getOutput(),
GenericTestUtils.assertDoesNotMatch(logs.getOutput(),
"Server using cipher suite");
// Check the IOStreamPair
GenericTestUtils.assertMatches(logs1.getOutput(),
GenericTestUtils.assertDoesNotMatch(logs1.getOutput(),
"Creating IOStreamPair of CryptoInputStream and CryptoOutputStream.");
}
} finally {