From 1f30a8a105d178e0e554e85d713320bf5356e035 Mon Sep 17 00:00:00 2001 From: Lei Xu Date: Thu, 4 May 2017 12:06:50 -0700 Subject: [PATCH] HDFS-11687. Add new public encryption APIs required by Hive. (lei) Change-Id: I4a23a00de63ad18022312ceb1f306a87d032d07c (cherry picked from commit 25f5d9ad5ee5ead349d259a99b49541a70b1604d) --- .../org/apache/hadoop/hdfs/DFSClient.java | 18 ++-------- .../hadoop/hdfs/DistributedFileSystem.java | 9 +++-- .../apache/hadoop/hdfs/client/HdfsAdmin.java | 12 +++++++ .../hadoop/hdfs/TestEncryptionZones.java | 1 + .../org/apache/hadoop/hdfs/TestHdfsAdmin.java | 33 +++++++++++++++++++ 5 files changed, 55 insertions(+), 18 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 07f5e78f806..02b595d7331 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -3067,24 +3067,10 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, /** * Probe for encryption enabled on this filesystem. - * Note (see HDFS-11689): - * Not to throw exception in this method since it would break hive. - * Hive accesses this method and assumes no exception would be thrown. - * Hive should not access DFSClient since it is InterfaceAudience.Private. - * Deprecated annotation is added to trigger build warning at hive side. - * Request has been made to Hive to remove access to DFSClient. * @return true if encryption is enabled */ - @Deprecated - public boolean isHDFSEncryptionEnabled() { - boolean result = false; - try { - result = (getKeyProviderUri() != null); - } catch (IOException ioe) { - DFSClient.LOG.warn("Exception while checking whether encryption zone " - + "is supported, assumes it is not supported", ioe); - } - return result; + boolean isHDFSEncryptionEnabled() throws IOException { + return getKeyProviderUri() != null; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index fd45406400c..a09c1b576e9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -2520,8 +2520,13 @@ public class DistributedFileSystem extends FileSystem { */ @Override public Path getTrashRoot(Path path) { - if ((path == null) || !dfs.isHDFSEncryptionEnabled()) { - return super.getTrashRoot(path); + try { + if ((path == null) || !dfs.isHDFSEncryptionEnabled()) { + return super.getTrashRoot(path); + } + } catch (IOException ioe) { + DFSClient.LOG.warn("Exception while checking whether encryption zone is " + + "supported", ioe); } String parentSrc = path.isRoot()? diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java index 64f0b866073..ca95d4c8c67 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java @@ -27,6 +27,7 @@ import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.fs.BlockStoragePolicySpi; import org.apache.hadoop.fs.CacheFlag; import org.apache.hadoop.fs.FileAlreadyExistsException; @@ -266,6 +267,17 @@ public class HdfsAdmin { return dfs.listCachePools(); } + /** + * Get KeyProvider if present. + * + * @return the key provider if encryption is enabled on HDFS. + * Otherwise, it returns null. + * @throws IOException on RPC exception to the NN. + */ + public KeyProvider getKeyProvider() throws IOException { + return dfs.getClient().getKeyProvider(); + } + /** * Create an encryption zone rooted at an empty existing directory, using the * specified encryption key. An encryption zone has an associated encryption diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java index e61cc8848a1..b4c6de36195 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java @@ -351,6 +351,7 @@ public class TestEncryptionZones { @Test public void testBasicOperations() throws Exception { + assertNotNull("key provider is not present", dfsAdmin.getKeyProvider()); int numZones = 0; /* Number of EZs should be 0 if no EZ is created */ assertEquals("Unexpected number of encryption zones!", numZones, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java index 717d79e3ada..fe20c68e06b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import java.io.File; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; @@ -27,8 +28,11 @@ import java.util.HashSet; import java.util.Set; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.key.JavaKeyStoreProvider; import org.apache.hadoop.fs.BlockStoragePolicySpi; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileSystemTestHelper; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.client.HdfsAdmin; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; @@ -172,4 +176,33 @@ public class TestHdfsAdmin { Assert.assertTrue( Sets.difference(policyNamesSet2, policyNamesSet1).isEmpty()); } + + private static String getKeyProviderURI() { + FileSystemTestHelper helper = new FileSystemTestHelper(); + // Set up java key store + String testRoot = helper.getTestRootDir(); + File testRootDir = new File(testRoot).getAbsoluteFile(); + return JavaKeyStoreProvider.SCHEME_NAME + "://file" + + new Path(testRootDir.toString(), "test.jks").toUri(); + } + + @Test + public void testGetKeyProvider() throws IOException { + HdfsAdmin hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); + Assert.assertNull("should return null for an non-encrypted cluster", + hdfsAdmin.getKeyProvider()); + + shutDownCluster(); + + Configuration conf = new Configuration(); + conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, + getKeyProviderURI()); + + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); + cluster.waitActive(); + hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); + + Assert.assertNotNull("should not return null for an encrypted cluster", + hdfsAdmin.getKeyProvider()); + } }