HDFS-11687. Add new public encryption APIs required by Hive. (lei)
Change-Id: I4a23a00de63ad18022312ceb1f306a87d032d07c
(cherry picked from commit 25f5d9ad5e
)
This commit is contained in:
parent
89baea96d5
commit
1f30a8a105
|
@ -3067,24 +3067,10 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
|
||||
/**
|
||||
* Probe for encryption enabled on this filesystem.
|
||||
* Note (see HDFS-11689):
|
||||
* Not to throw exception in this method since it would break hive.
|
||||
* Hive accesses this method and assumes no exception would be thrown.
|
||||
* Hive should not access DFSClient since it is InterfaceAudience.Private.
|
||||
* Deprecated annotation is added to trigger build warning at hive side.
|
||||
* Request has been made to Hive to remove access to DFSClient.
|
||||
* @return true if encryption is enabled
|
||||
*/
|
||||
@Deprecated
|
||||
public boolean isHDFSEncryptionEnabled() {
|
||||
boolean result = false;
|
||||
try {
|
||||
result = (getKeyProviderUri() != null);
|
||||
} catch (IOException ioe) {
|
||||
DFSClient.LOG.warn("Exception while checking whether encryption zone "
|
||||
+ "is supported, assumes it is not supported", ioe);
|
||||
}
|
||||
return result;
|
||||
boolean isHDFSEncryptionEnabled() throws IOException {
|
||||
return getKeyProviderUri() != null;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -2520,9 +2520,14 @@ public class DistributedFileSystem extends FileSystem {
|
|||
*/
|
||||
@Override
|
||||
public Path getTrashRoot(Path path) {
|
||||
try {
|
||||
if ((path == null) || !dfs.isHDFSEncryptionEnabled()) {
|
||||
return super.getTrashRoot(path);
|
||||
}
|
||||
} catch (IOException ioe) {
|
||||
DFSClient.LOG.warn("Exception while checking whether encryption zone is "
|
||||
+ "supported", ioe);
|
||||
}
|
||||
|
||||
String parentSrc = path.isRoot()?
|
||||
path.toUri().getPath():path.getParent().toUri().getPath();
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.hadoop.HadoopIllegalArgumentException;
|
|||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.crypto.key.KeyProvider;
|
||||
import org.apache.hadoop.fs.BlockStoragePolicySpi;
|
||||
import org.apache.hadoop.fs.CacheFlag;
|
||||
import org.apache.hadoop.fs.FileAlreadyExistsException;
|
||||
|
@ -266,6 +267,17 @@ public class HdfsAdmin {
|
|||
return dfs.listCachePools();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get KeyProvider if present.
|
||||
*
|
||||
* @return the key provider if encryption is enabled on HDFS.
|
||||
* Otherwise, it returns null.
|
||||
* @throws IOException on RPC exception to the NN.
|
||||
*/
|
||||
public KeyProvider getKeyProvider() throws IOException {
|
||||
return dfs.getClient().getKeyProvider();
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an encryption zone rooted at an empty existing directory, using the
|
||||
* specified encryption key. An encryption zone has an associated encryption
|
||||
|
|
|
@ -351,6 +351,7 @@ public class TestEncryptionZones {
|
|||
@Test
|
||||
public void testBasicOperations() throws Exception {
|
||||
|
||||
assertNotNull("key provider is not present", dfsAdmin.getKeyProvider());
|
||||
int numZones = 0;
|
||||
/* Number of EZs should be 0 if no EZ is created */
|
||||
assertEquals("Unexpected number of encryption zones!", numZones,
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs;
|
|||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
|
@ -27,8 +28,11 @@ import java.util.HashSet;
|
|||
import java.util.Set;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.crypto.key.JavaKeyStoreProvider;
|
||||
import org.apache.hadoop.fs.BlockStoragePolicySpi;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FileSystemTestHelper;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.client.HdfsAdmin;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
||||
|
@ -172,4 +176,33 @@ public class TestHdfsAdmin {
|
|||
Assert.assertTrue(
|
||||
Sets.difference(policyNamesSet2, policyNamesSet1).isEmpty());
|
||||
}
|
||||
|
||||
private static String getKeyProviderURI() {
|
||||
FileSystemTestHelper helper = new FileSystemTestHelper();
|
||||
// Set up java key store
|
||||
String testRoot = helper.getTestRootDir();
|
||||
File testRootDir = new File(testRoot).getAbsoluteFile();
|
||||
return JavaKeyStoreProvider.SCHEME_NAME + "://file" +
|
||||
new Path(testRootDir.toString(), "test.jks").toUri();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetKeyProvider() throws IOException {
|
||||
HdfsAdmin hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
|
||||
Assert.assertNull("should return null for an non-encrypted cluster",
|
||||
hdfsAdmin.getKeyProvider());
|
||||
|
||||
shutDownCluster();
|
||||
|
||||
Configuration conf = new Configuration();
|
||||
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
|
||||
getKeyProviderURI());
|
||||
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
|
||||
cluster.waitActive();
|
||||
hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
|
||||
|
||||
Assert.assertNotNull("should not return null for an encrypted cluster",
|
||||
hdfsAdmin.getKeyProvider());
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue