HDFS-7032. Add WebHDFS support for reading and writing to encryption zones. Contributed by Charles Lamb.

(cherry picked from commit 43b0303008)
This commit is contained in:
Andrew Wang 2014-09-15 10:23:57 -07:00
parent a0c54aeb00
commit d595557859
3 changed files with 63 additions and 5 deletions

View File

@ -389,6 +389,9 @@ Release 2.6.0 - UNRELEASED
HDFS-7045. Fix NameNode deadlock when opening file under /.reserved path. HDFS-7045. Fix NameNode deadlock when opening file under /.reserved path.
(Yi Liu via wang) (Yi Liu via wang)
HDFS-7032. Add WebHDFS support for reading and writing to encryption zones.
(clamb via wang)
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
HDFS-6387. HDFS CLI admin tool for creating & deleting an HDFS-6387. HDFS CLI admin tool for creating & deleting an

View File

@ -231,11 +231,13 @@ private Response put(
DFSClient dfsclient = newDfsClient(nnId, conf); DFSClient dfsclient = newDfsClient(nnId, conf);
FSDataOutputStream out = null; FSDataOutputStream out = null;
try { try {
out = new FSDataOutputStream(dfsclient.create( out = dfsclient.createWrappedOutputStream(dfsclient.create(
fullpath, permission.getFsPermission(), fullpath, permission.getFsPermission(),
overwrite.getValue() ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE) overwrite.getValue() ?
: EnumSet.of(CreateFlag.CREATE), EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE) :
replication.getValue(conf), blockSize.getValue(conf), null, b, null), null); EnumSet.of(CreateFlag.CREATE),
replication.getValue(conf), blockSize.getValue(conf), null,
b, null), null);
IOUtils.copyBytes(in, out, b); IOUtils.copyBytes(in, out, b);
out.close(); out.close();
out = null; out = null;
@ -418,7 +420,8 @@ private Response get(
final DFSClient dfsclient = newDfsClient(nnId, conf); final DFSClient dfsclient = newDfsClient(nnId, conf);
HdfsDataInputStream in = null; HdfsDataInputStream in = null;
try { try {
in = new HdfsDataInputStream(dfsclient.open(fullpath, b, true)); in = dfsclient.createWrappedInputStream(
dfsclient.open(fullpath, b, true));
in.seek(offset.getValue()); in.seek(offset.getValue());
} catch(IOException ioe) { } catch(IOException ioe) {
IOUtils.cleanup(LOG, in); IOUtils.cleanup(LOG, in);

View File

@ -42,6 +42,7 @@
import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
import org.apache.hadoop.crypto.key.KeyProviderFactory; import org.apache.hadoop.crypto.key.KeyProviderFactory;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FSTestWrapper; import org.apache.hadoop.fs.FSTestWrapper;
import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileContextTestWrapper; import org.apache.hadoop.fs.FileContextTestWrapper;
@ -62,6 +63,8 @@
import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck; import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck;
import org.apache.hadoop.hdfs.tools.DFSck; import org.apache.hadoop.hdfs.tools.DFSck;
import org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter; import org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
@ -570,6 +573,55 @@ public void testReadWrite() throws Exception {
verifyFilesEqual(fs, encFile1, encFile2, len); verifyFilesEqual(fs, encFile1, encFile2, len);
} }
@Test(timeout = 120000)
public void testReadWriteUsingWebHdfs() throws Exception {
final HdfsAdmin dfsAdmin =
new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
final FileSystem webHdfsFs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
WebHdfsFileSystem.SCHEME);
final Path zone = new Path("/zone");
fs.mkdirs(zone);
dfsAdmin.createEncryptionZone(zone, TEST_KEY);
/* Create an unencrypted file for comparison purposes. */
final Path unencFile = new Path("/unenc");
final int len = 8192;
DFSTestUtil.createFile(webHdfsFs, unencFile, len, (short) 1, 0xFEED);
/*
* Create the same file via webhdfs, but this time encrypted. Compare it
* using both webhdfs and DFS.
*/
final Path encFile1 = new Path(zone, "myfile");
DFSTestUtil.createFile(webHdfsFs, encFile1, len, (short) 1, 0xFEED);
verifyFilesEqual(webHdfsFs, unencFile, encFile1, len);
verifyFilesEqual(fs, unencFile, encFile1, len);
/*
* Same thing except this time create the encrypted file using DFS.
*/
final Path encFile2 = new Path(zone, "myfile2");
DFSTestUtil.createFile(fs, encFile2, len, (short) 1, 0xFEED);
verifyFilesEqual(webHdfsFs, unencFile, encFile2, len);
verifyFilesEqual(fs, unencFile, encFile2, len);
/* Verify appending to files works correctly. */
appendOneByte(fs, unencFile);
appendOneByte(webHdfsFs, encFile1);
appendOneByte(fs, encFile2);
verifyFilesEqual(webHdfsFs, unencFile, encFile1, len);
verifyFilesEqual(fs, unencFile, encFile1, len);
verifyFilesEqual(webHdfsFs, unencFile, encFile2, len);
verifyFilesEqual(fs, unencFile, encFile2, len);
}
private void appendOneByte(FileSystem fs, Path p) throws IOException {
final FSDataOutputStream out = fs.append(p);
out.write((byte) 0x123);
out.close();
}
@Test(timeout = 60000) @Test(timeout = 60000)
public void testCipherSuiteNegotiation() throws Exception { public void testCipherSuiteNegotiation() throws Exception {
final HdfsAdmin dfsAdmin = final HdfsAdmin dfsAdmin =