HDFS-7032. Add WebHDFS support for reading and writing to encryption zones. Contributed by Charles Lamb.
This commit is contained in:
parent
24d920b80e
commit
43b0303008
|
@ -647,6 +647,9 @@ Release 2.6.0 - UNRELEASED
|
||||||
HDFS-7045. Fix NameNode deadlock when opening file under /.reserved path.
|
HDFS-7045. Fix NameNode deadlock when opening file under /.reserved path.
|
||||||
(Yi Liu via wang)
|
(Yi Liu via wang)
|
||||||
|
|
||||||
|
HDFS-7032. Add WebHDFS support for reading and writing to encryption zones.
|
||||||
|
(clamb via wang)
|
||||||
|
|
||||||
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
|
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
|
||||||
|
|
||||||
HDFS-6387. HDFS CLI admin tool for creating & deleting an
|
HDFS-6387. HDFS CLI admin tool for creating & deleting an
|
||||||
|
|
|
@ -231,11 +231,13 @@ public class DatanodeWebHdfsMethods {
|
||||||
DFSClient dfsclient = newDfsClient(nnId, conf);
|
DFSClient dfsclient = newDfsClient(nnId, conf);
|
||||||
FSDataOutputStream out = null;
|
FSDataOutputStream out = null;
|
||||||
try {
|
try {
|
||||||
out = new FSDataOutputStream(dfsclient.create(
|
out = dfsclient.createWrappedOutputStream(dfsclient.create(
|
||||||
fullpath, permission.getFsPermission(),
|
fullpath, permission.getFsPermission(),
|
||||||
overwrite.getValue() ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
|
overwrite.getValue() ?
|
||||||
: EnumSet.of(CreateFlag.CREATE),
|
EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE) :
|
||||||
replication.getValue(conf), blockSize.getValue(conf), null, b, null), null);
|
EnumSet.of(CreateFlag.CREATE),
|
||||||
|
replication.getValue(conf), blockSize.getValue(conf), null,
|
||||||
|
b, null), null);
|
||||||
IOUtils.copyBytes(in, out, b);
|
IOUtils.copyBytes(in, out, b);
|
||||||
out.close();
|
out.close();
|
||||||
out = null;
|
out = null;
|
||||||
|
@ -418,7 +420,8 @@ public class DatanodeWebHdfsMethods {
|
||||||
final DFSClient dfsclient = newDfsClient(nnId, conf);
|
final DFSClient dfsclient = newDfsClient(nnId, conf);
|
||||||
HdfsDataInputStream in = null;
|
HdfsDataInputStream in = null;
|
||||||
try {
|
try {
|
||||||
in = new HdfsDataInputStream(dfsclient.open(fullpath, b, true));
|
in = dfsclient.createWrappedInputStream(
|
||||||
|
dfsclient.open(fullpath, b, true));
|
||||||
in.seek(offset.getValue());
|
in.seek(offset.getValue());
|
||||||
} catch(IOException ioe) {
|
} catch(IOException ioe) {
|
||||||
IOUtils.cleanup(LOG, in);
|
IOUtils.cleanup(LOG, in);
|
||||||
|
|
|
@ -42,6 +42,7 @@ import org.apache.hadoop.crypto.key.JavaKeyStoreProvider;
|
||||||
import org.apache.hadoop.crypto.key.KeyProvider;
|
import org.apache.hadoop.crypto.key.KeyProvider;
|
||||||
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
|
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
|
||||||
import org.apache.hadoop.crypto.key.KeyProviderFactory;
|
import org.apache.hadoop.crypto.key.KeyProviderFactory;
|
||||||
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
import org.apache.hadoop.fs.FSTestWrapper;
|
import org.apache.hadoop.fs.FSTestWrapper;
|
||||||
import org.apache.hadoop.fs.FileContext;
|
import org.apache.hadoop.fs.FileContext;
|
||||||
import org.apache.hadoop.fs.FileContextTestWrapper;
|
import org.apache.hadoop.fs.FileContextTestWrapper;
|
||||||
|
@ -62,6 +63,8 @@ import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck;
|
import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck;
|
||||||
import org.apache.hadoop.hdfs.tools.DFSck;
|
import org.apache.hadoop.hdfs.tools.DFSck;
|
||||||
import org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter;
|
import org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter;
|
||||||
|
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
|
||||||
|
import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
import org.apache.hadoop.security.Credentials;
|
import org.apache.hadoop.security.Credentials;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
|
@ -570,6 +573,55 @@ public class TestEncryptionZones {
|
||||||
verifyFilesEqual(fs, encFile1, encFile2, len);
|
verifyFilesEqual(fs, encFile1, encFile2, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 120000)
|
||||||
|
public void testReadWriteUsingWebHdfs() throws Exception {
|
||||||
|
final HdfsAdmin dfsAdmin =
|
||||||
|
new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
|
||||||
|
final FileSystem webHdfsFs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
|
||||||
|
WebHdfsFileSystem.SCHEME);
|
||||||
|
|
||||||
|
final Path zone = new Path("/zone");
|
||||||
|
fs.mkdirs(zone);
|
||||||
|
dfsAdmin.createEncryptionZone(zone, TEST_KEY);
|
||||||
|
|
||||||
|
/* Create an unencrypted file for comparison purposes. */
|
||||||
|
final Path unencFile = new Path("/unenc");
|
||||||
|
final int len = 8192;
|
||||||
|
DFSTestUtil.createFile(webHdfsFs, unencFile, len, (short) 1, 0xFEED);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Create the same file via webhdfs, but this time encrypted. Compare it
|
||||||
|
* using both webhdfs and DFS.
|
||||||
|
*/
|
||||||
|
final Path encFile1 = new Path(zone, "myfile");
|
||||||
|
DFSTestUtil.createFile(webHdfsFs, encFile1, len, (short) 1, 0xFEED);
|
||||||
|
verifyFilesEqual(webHdfsFs, unencFile, encFile1, len);
|
||||||
|
verifyFilesEqual(fs, unencFile, encFile1, len);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Same thing except this time create the encrypted file using DFS.
|
||||||
|
*/
|
||||||
|
final Path encFile2 = new Path(zone, "myfile2");
|
||||||
|
DFSTestUtil.createFile(fs, encFile2, len, (short) 1, 0xFEED);
|
||||||
|
verifyFilesEqual(webHdfsFs, unencFile, encFile2, len);
|
||||||
|
verifyFilesEqual(fs, unencFile, encFile2, len);
|
||||||
|
|
||||||
|
/* Verify appending to files works correctly. */
|
||||||
|
appendOneByte(fs, unencFile);
|
||||||
|
appendOneByte(webHdfsFs, encFile1);
|
||||||
|
appendOneByte(fs, encFile2);
|
||||||
|
verifyFilesEqual(webHdfsFs, unencFile, encFile1, len);
|
||||||
|
verifyFilesEqual(fs, unencFile, encFile1, len);
|
||||||
|
verifyFilesEqual(webHdfsFs, unencFile, encFile2, len);
|
||||||
|
verifyFilesEqual(fs, unencFile, encFile2, len);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void appendOneByte(FileSystem fs, Path p) throws IOException {
|
||||||
|
final FSDataOutputStream out = fs.append(p);
|
||||||
|
out.write((byte) 0x123);
|
||||||
|
out.close();
|
||||||
|
}
|
||||||
|
|
||||||
@Test(timeout = 60000)
|
@Test(timeout = 60000)
|
||||||
public void testCipherSuiteNegotiation() throws Exception {
|
public void testCipherSuiteNegotiation() throws Exception {
|
||||||
final HdfsAdmin dfsAdmin =
|
final HdfsAdmin dfsAdmin =
|
||||||
|
|
Loading…
Reference in New Issue