From 43b03030084839db041d0337013806aaeef12aaa Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Mon, 15 Sep 2014 10:23:57 -0700 Subject: [PATCH] HDFS-7032. Add WebHDFS support for reading and writing to encryption zones. Contributed by Charles Lamb. --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../web/resources/DatanodeWebHdfsMethods.java | 13 +++-- .../hadoop/hdfs/TestEncryptionZones.java | 52 +++++++++++++++++++ 3 files changed, 63 insertions(+), 5 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 57a4a0f9bd8..209c7c1ed3a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -647,6 +647,9 @@ Release 2.6.0 - UNRELEASED HDFS-7045. Fix NameNode deadlock when opening file under /.reserved path. (Yi Liu via wang) + HDFS-7032. Add WebHDFS support for reading and writing to encryption zones. + (clamb via wang) + BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS HDFS-6387. HDFS CLI admin tool for creating & deleting an diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java index 51731c8d013..0f0f3be2520 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java @@ -231,11 +231,13 @@ private Response put( DFSClient dfsclient = newDfsClient(nnId, conf); FSDataOutputStream out = null; try { - out = new FSDataOutputStream(dfsclient.create( + out = dfsclient.createWrappedOutputStream(dfsclient.create( fullpath, permission.getFsPermission(), - overwrite.getValue() ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE) - : EnumSet.of(CreateFlag.CREATE), - replication.getValue(conf), blockSize.getValue(conf), null, b, null), null); + overwrite.getValue() ? + EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE) : + EnumSet.of(CreateFlag.CREATE), + replication.getValue(conf), blockSize.getValue(conf), null, + b, null), null); IOUtils.copyBytes(in, out, b); out.close(); out = null; @@ -418,7 +420,8 @@ private Response get( final DFSClient dfsclient = newDfsClient(nnId, conf); HdfsDataInputStream in = null; try { - in = new HdfsDataInputStream(dfsclient.open(fullpath, b, true)); + in = dfsclient.createWrappedInputStream( + dfsclient.open(fullpath, b, true)); in.seek(offset.getValue()); } catch(IOException ioe) { IOUtils.cleanup(LOG, in); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java index b3bf5d9a37b..68fc8506526 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java @@ -42,6 +42,7 @@ import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; import org.apache.hadoop.crypto.key.KeyProviderFactory; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSTestWrapper; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileContextTestWrapper; @@ -62,6 +63,8 @@ import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck; import org.apache.hadoop.hdfs.tools.DFSck; import org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter; +import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; +import org.apache.hadoop.hdfs.web.WebHdfsTestUtil; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; @@ -570,6 +573,55 @@ public void testReadWrite() throws Exception { verifyFilesEqual(fs, encFile1, encFile2, len); } + @Test(timeout = 120000) + public void testReadWriteUsingWebHdfs() throws Exception { + final HdfsAdmin dfsAdmin = + new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); + final FileSystem webHdfsFs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, + WebHdfsFileSystem.SCHEME); + + final Path zone = new Path("/zone"); + fs.mkdirs(zone); + dfsAdmin.createEncryptionZone(zone, TEST_KEY); + + /* Create an unencrypted file for comparison purposes. */ + final Path unencFile = new Path("/unenc"); + final int len = 8192; + DFSTestUtil.createFile(webHdfsFs, unencFile, len, (short) 1, 0xFEED); + + /* + * Create the same file via webhdfs, but this time encrypted. Compare it + * using both webhdfs and DFS. + */ + final Path encFile1 = new Path(zone, "myfile"); + DFSTestUtil.createFile(webHdfsFs, encFile1, len, (short) 1, 0xFEED); + verifyFilesEqual(webHdfsFs, unencFile, encFile1, len); + verifyFilesEqual(fs, unencFile, encFile1, len); + + /* + * Same thing except this time create the encrypted file using DFS. + */ + final Path encFile2 = new Path(zone, "myfile2"); + DFSTestUtil.createFile(fs, encFile2, len, (short) 1, 0xFEED); + verifyFilesEqual(webHdfsFs, unencFile, encFile2, len); + verifyFilesEqual(fs, unencFile, encFile2, len); + + /* Verify appending to files works correctly. */ + appendOneByte(fs, unencFile); + appendOneByte(webHdfsFs, encFile1); + appendOneByte(fs, encFile2); + verifyFilesEqual(webHdfsFs, unencFile, encFile1, len); + verifyFilesEqual(fs, unencFile, encFile1, len); + verifyFilesEqual(webHdfsFs, unencFile, encFile2, len); + verifyFilesEqual(fs, unencFile, encFile2, len); + } + + private void appendOneByte(FileSystem fs, Path p) throws IOException { + final FSDataOutputStream out = fs.append(p); + out.write((byte) 0x123); + out.close(); + } + @Test(timeout = 60000) public void testCipherSuiteNegotiation() throws Exception { final HdfsAdmin dfsAdmin =