HDFS-11687. Add new public encryption APIs required by Hive. (lei)

Change-Id: I4a23a00de63ad18022312ceb1f306a87d032d07c
This commit is contained in:
Lei Xu 2017-05-04 12:06:50 -07:00
parent c2a52ef9c2
commit 25f5d9ad5e
5 changed files with 55 additions and 18 deletions

View File

@ -2951,24 +2951,10 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
/** /**
* Probe for encryption enabled on this filesystem. * Probe for encryption enabled on this filesystem.
* Note (see HDFS-11689):
* Not to throw exception in this method since it would break hive.
* Hive accesses this method and assumes no exception would be thrown.
* Hive should not access DFSClient since it is InterfaceAudience.Private.
* Deprecated annotation is added to trigger build warning at hive side.
* Request has been made to Hive to remove access to DFSClient.
* @return true if encryption is enabled * @return true if encryption is enabled
*/ */
@Deprecated boolean isHDFSEncryptionEnabled() throws IOException {
public boolean isHDFSEncryptionEnabled() { return getKeyProviderUri() != null;
boolean result = false;
try {
result = (getKeyProviderUri() != null);
} catch (IOException ioe) {
DFSClient.LOG.warn("Exception while checking whether encryption zone "
+ "is supported, assumes it is not supported", ioe);
}
return result;
} }
/** /**

View File

@ -2587,8 +2587,13 @@ public class DistributedFileSystem extends FileSystem {
*/ */
@Override @Override
public Path getTrashRoot(Path path) { public Path getTrashRoot(Path path) {
if ((path == null) || !dfs.isHDFSEncryptionEnabled()) { try {
return super.getTrashRoot(path); if ((path == null) || !dfs.isHDFSEncryptionEnabled()) {
return super.getTrashRoot(path);
}
} catch (IOException ioe) {
DFSClient.LOG.warn("Exception while checking whether encryption zone is "
+ "supported", ioe);
} }
String parentSrc = path.isRoot()? String parentSrc = path.isRoot()?

View File

@ -27,6 +27,7 @@ import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.fs.BlockStoragePolicySpi; import org.apache.hadoop.fs.BlockStoragePolicySpi;
import org.apache.hadoop.fs.CacheFlag; import org.apache.hadoop.fs.CacheFlag;
import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileAlreadyExistsException;
@ -267,6 +268,17 @@ public class HdfsAdmin {
return dfs.listCachePools(); return dfs.listCachePools();
} }
/**
* Get KeyProvider if present.
*
* @return the key provider if encryption is enabled on HDFS.
* Otherwise, it returns null.
* @throws IOException on RPC exception to the NN.
*/
public KeyProvider getKeyProvider() throws IOException {
return dfs.getClient().getKeyProvider();
}
/** /**
* Create an encryption zone rooted at an empty existing directory, using the * Create an encryption zone rooted at an empty existing directory, using the
* specified encryption key. An encryption zone has an associated encryption * specified encryption key. An encryption zone has an associated encryption

View File

@ -349,6 +349,7 @@ public class TestEncryptionZones {
@Test @Test
public void testBasicOperations() throws Exception { public void testBasicOperations() throws Exception {
assertNotNull("key provider is not present", dfsAdmin.getKeyProvider());
int numZones = 0; int numZones = 0;
/* Number of EZs should be 0 if no EZ is created */ /* Number of EZs should be 0 if no EZ is created */
assertEquals("Unexpected number of encryption zones!", numZones, assertEquals("Unexpected number of encryption zones!", numZones,

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.net.URI; import java.net.URI;
import java.net.URISyntaxException; import java.net.URISyntaxException;
@ -27,8 +28,11 @@ import java.util.HashSet;
import java.util.Set; import java.util.Set;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.JavaKeyStoreProvider;
import org.apache.hadoop.fs.BlockStoragePolicySpi; import org.apache.hadoop.fs.BlockStoragePolicySpi;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsAdmin; import org.apache.hadoop.hdfs.client.HdfsAdmin;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
@ -172,4 +176,33 @@ public class TestHdfsAdmin {
Assert.assertTrue( Assert.assertTrue(
Sets.difference(policyNamesSet2, policyNamesSet1).isEmpty()); Sets.difference(policyNamesSet2, policyNamesSet1).isEmpty());
} }
private static String getKeyProviderURI() {
FileSystemTestHelper helper = new FileSystemTestHelper();
// Set up java key store
String testRoot = helper.getTestRootDir();
File testRootDir = new File(testRoot).getAbsoluteFile();
return JavaKeyStoreProvider.SCHEME_NAME + "://file" +
new Path(testRootDir.toString(), "test.jks").toUri();
}
@Test
public void testGetKeyProvider() throws IOException {
HdfsAdmin hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
Assert.assertNull("should return null for an non-encrypted cluster",
hdfsAdmin.getKeyProvider());
shutDownCluster();
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
getKeyProviderURI());
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
cluster.waitActive();
hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
Assert.assertNotNull("should not return null for an encrypted cluster",
hdfsAdmin.getKeyProvider());
}
} }