From c760eec0541a8fc564da997014baddfc6ea6baed Mon Sep 17 00:00:00 2001 From: James Baiera Date: Thu, 21 Sep 2017 11:55:07 -0400 Subject: [PATCH] Add permission checks before reading from HDFS stream (#26716) Add checks for special permissions before reading hdfs stream data. Also adds test from readonly repository fix. MiniHDFS will now start with an existing repository with a single snapshot contained within. Readonly Repository is created in tests and attempts to list the snapshots within this repo. --- .../repositories/hdfs/HdfsBlobContainer.java | 19 ++++++--- .../repositories/hdfs/HdfsBlobStore.java | 2 +- .../repositories/hdfs/HdfsRepository.java | 2 +- .../hdfs/HdfsSecurityContext.java | 4 +- .../hdfs_repository/30_snapshot_readonly.yml | 29 +++++++++++++ .../30_snapshot_readonly.yml | 31 ++++++++++++++ .../src/main/java/hdfs/MiniHDFS.java | 40 ++++++++++++++---- .../main/resources/readonly-repository.tar.gz | Bin 0 -> 1314 bytes 8 files changed, 111 insertions(+), 16 deletions(-) create mode 100644 plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/30_snapshot_readonly.yml create mode 100644 plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot_readonly.yml create mode 100644 test/fixtures/hdfs-fixture/src/main/resources/readonly-repository.tar.gz diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java index 4649cf858d2..f160f4c4ead 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java @@ -23,6 +23,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Options.CreateOpts; import org.apache.hadoop.fs.Path; +import org.elasticsearch.SpecialPermission; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; @@ -45,12 +46,14 @@ import java.util.Map; final class HdfsBlobContainer extends AbstractBlobContainer { private final HdfsBlobStore store; + private final HdfsSecurityContext securityContext; private final Path path; private final int bufferSize; - HdfsBlobContainer(BlobPath blobPath, HdfsBlobStore store, Path path, int bufferSize) { + HdfsBlobContainer(BlobPath blobPath, HdfsBlobStore store, Path path, int bufferSize, HdfsSecurityContext hdfsSecurityContext) { super(blobPath); this.store = store; + this.securityContext = hdfsSecurityContext; this.path = path; this.bufferSize = bufferSize; } @@ -90,7 +93,9 @@ final class HdfsBlobContainer extends AbstractBlobContainer { // FSDataInputStream can open connections on read() or skip() so we wrap in // HDFSPrivilegedInputSteam which will ensure that underlying methods will // be called with the proper privileges. - return store.execute(fileContext -> new HDFSPrivilegedInputSteam(fileContext.open(new Path(path, blobName), bufferSize))); + return store.execute(fileContext -> + new HDFSPrivilegedInputSteam(fileContext.open(new Path(path, blobName), bufferSize), securityContext) + ); } @Override @@ -144,8 +149,11 @@ final class HdfsBlobContainer extends AbstractBlobContainer { */ private static class HDFSPrivilegedInputSteam extends FilterInputStream { - HDFSPrivilegedInputSteam(InputStream in) { + private final HdfsSecurityContext securityContext; + + HDFSPrivilegedInputSteam(InputStream in, HdfsSecurityContext hdfsSecurityContext) { super(in); + this.securityContext = hdfsSecurityContext; } public int read() throws IOException { @@ -175,9 +183,10 @@ final class HdfsBlobContainer extends AbstractBlobContainer { }); } - private static T doPrivilegedOrThrow(PrivilegedExceptionAction action) throws IOException { + private T doPrivilegedOrThrow(PrivilegedExceptionAction action) throws IOException { + SpecialPermission.check(); try { - return AccessController.doPrivileged(action); + return AccessController.doPrivileged(action, null, securityContext.getRestrictedExecutionPermissions()); } catch (PrivilegedActionException e) { throw (IOException) e.getCause(); } diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java index 8d88b7fd074..fb26bd46754 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java @@ -75,7 +75,7 @@ final class HdfsBlobStore implements BlobStore { @Override public BlobContainer blobContainer(BlobPath path) { - return new HdfsBlobContainer(path, this, buildHdfsPath(path), bufferSize); + return new HdfsBlobContainer(path, this, buildHdfsPath(path), bufferSize, securityContext); } private Path buildHdfsPath(BlobPath blobPath) { diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java index 16ed9d06a5e..66975e4dcc6 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java @@ -132,7 +132,7 @@ public final class HdfsRepository extends BlobStoreRepository { hadoopConfiguration.setBoolean("fs.hdfs.impl.disable.cache", true); // Create the filecontext with our user information - // This will correctly configure the filecontext to have our UGI as it's internal user. + // This will correctly configure the filecontext to have our UGI as its internal user. return ugi.doAs((PrivilegedAction) () -> { try { AbstractFileSystem fs = AbstractFileSystem.get(uri, hadoopConfiguration); diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsSecurityContext.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsSecurityContext.java index 3cd1a5a40fd..bd16d87d879 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsSecurityContext.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsSecurityContext.java @@ -56,7 +56,9 @@ class HdfsSecurityContext { // 1) hadoop dynamic proxy is messy with access rules new ReflectPermission("suppressAccessChecks"), // 2) allow hadoop to add credentials to our Subject - new AuthPermission("modifyPrivateCredentials") + new AuthPermission("modifyPrivateCredentials"), + // 3) RPC Engine requires this for re-establishing pooled connections over the lifetime of the client + new PrivateCredentialPermission("org.apache.hadoop.security.Credentials * \"*\"", "read") }; // If Security is enabled, we need all the following elevated permissions: diff --git a/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/30_snapshot_readonly.yml b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/30_snapshot_readonly.yml new file mode 100644 index 00000000000..c2a37964e70 --- /dev/null +++ b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/30_snapshot_readonly.yml @@ -0,0 +1,29 @@ +# Integration tests for HDFS Repository plugin +# +# Tests retrieving information about snapshot +# +--- +"Get a snapshot - readonly": + # Create repository + - do: + snapshot.create_repository: + repository: test_snapshot_repository_ro + body: + type: hdfs + settings: + uri: "hdfs://localhost:9999" + path: "/user/elasticsearch/existing/readonly-repository" + readonly: true + + # List snapshot info + - do: + snapshot.get: + repository: test_snapshot_repository_ro + snapshot: "_all" + + - length: { snapshots: 1 } + + # Remove our repository + - do: + snapshot.delete_repository: + repository: test_snapshot_repository_ro diff --git a/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot_readonly.yml b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot_readonly.yml new file mode 100644 index 00000000000..8c4c0347a15 --- /dev/null +++ b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot_readonly.yml @@ -0,0 +1,31 @@ +# Integration tests for HDFS Repository plugin +# +# Tests retrieving information about snapshot +# +--- +"Get a snapshot - readonly": + # Create repository + - do: + snapshot.create_repository: + repository: test_snapshot_repository_ro + body: + type: hdfs + settings: + uri: "hdfs://localhost:9998" + path: "/user/elasticsearch/existing/readonly-repository" + security: + principal: "elasticsearch@BUILD.ELASTIC.CO" + readonly: true + + # List snapshot info + - do: + snapshot.get: + repository: test_snapshot_repository_ro + snapshot: "_all" + + - length: { snapshots: 1 } + + # Remove our repository + - do: + snapshot.delete_repository: + repository: test_snapshot_repository_ro diff --git a/test/fixtures/hdfs-fixture/src/main/java/hdfs/MiniHDFS.java b/test/fixtures/hdfs-fixture/src/main/java/hdfs/MiniHDFS.java index 7d41d94e99a..73f4e443b07 100644 --- a/test/fixtures/hdfs-fixture/src/main/java/hdfs/MiniHDFS.java +++ b/test/fixtures/hdfs-fixture/src/main/java/hdfs/MiniHDFS.java @@ -19,7 +19,9 @@ package hdfs; +import java.io.File; import java.lang.management.ManagementFactory; +import java.net.URL; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; @@ -29,9 +31,11 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntryType; import org.apache.hadoop.fs.permission.FsAction; @@ -100,15 +104,35 @@ public class MiniHDFS { } MiniDFSCluster dfs = builder.build(); - // Set the elasticsearch user directory up - if (UserGroupInformation.isSecurityEnabled()) { - FileSystem fs = dfs.getFileSystem(); - org.apache.hadoop.fs.Path esUserPath = new org.apache.hadoop.fs.Path("/user/elasticsearch"); + // Configure contents of the filesystem + org.apache.hadoop.fs.Path esUserPath = new org.apache.hadoop.fs.Path("/user/elasticsearch"); + try (FileSystem fs = dfs.getFileSystem()) { + + // Set the elasticsearch user directory up fs.mkdirs(esUserPath); - List acls = new ArrayList<>(); - acls.add(new AclEntry.Builder().setType(AclEntryType.USER).setName("elasticsearch").setPermission(FsAction.ALL).build()); - fs.modifyAclEntries(esUserPath, acls); - fs.close(); + if (UserGroupInformation.isSecurityEnabled()) { + List acls = new ArrayList<>(); + acls.add(new AclEntry.Builder().setType(AclEntryType.USER).setName("elasticsearch").setPermission(FsAction.ALL).build()); + fs.modifyAclEntries(esUserPath, acls); + } + + // Install a pre-existing repository into HDFS + String directoryName = "readonly-repository"; + String archiveName = directoryName + ".tar.gz"; + URL readOnlyRepositoryArchiveURL = MiniHDFS.class.getClassLoader().getResource(archiveName); + if (readOnlyRepositoryArchiveURL != null) { + Path tempDirectory = Files.createTempDirectory(MiniHDFS.class.getName()); + File readOnlyRepositoryArchive = tempDirectory.resolve(archiveName).toFile(); + FileUtils.copyURLToFile(readOnlyRepositoryArchiveURL, readOnlyRepositoryArchive); + FileUtil.unTar(readOnlyRepositoryArchive, tempDirectory.toFile()); + + fs.copyFromLocalFile(true, true, + new org.apache.hadoop.fs.Path(tempDirectory.resolve(directoryName).toAbsolutePath().toUri()), + esUserPath.suffix("/existing/" + directoryName) + ); + + FileUtils.deleteDirectory(tempDirectory.toFile()); + } } // write our PID file diff --git a/test/fixtures/hdfs-fixture/src/main/resources/readonly-repository.tar.gz b/test/fixtures/hdfs-fixture/src/main/resources/readonly-repository.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..2cdb6d77c07d0a06029521d60fdc966e92248d72 GIT binary patch literal 1314 zcmV+-1>O1|iwFQ)qqIjP0S`X&6>Cgu-pu#=zCXvJBxxF2uE(NTlc~08Xdnk?@>NJB3M=Q zuNq0Z+=H672A4-jl4|Q;ka1D}I20B!*MB%1#%+P-J;5nlaQ&|anL)BFW7;e@@cL@d z;9>|4lm`_Y><&7Pn&e|?6cwk4u`qL2pO1@^%W^cX^bhg#Tr@Vn39rp|2l?Jn6O_-j z8cY(KxJU11A_|i_)$|yPjlC8Q%|xOoI-PcJKC*nRC1%&w*3Mf{k0~|nzgOc*Wp*>x zsAvBOqvQ53U@Rc8e*k#c)Ubcohc_(`{O|c+$^MZb2$27^f@=ELsZDyShvHry)Ei!- zp8vZEp*>Lxs&nYBAL`e!Q2^xU8t9 zxanZrk&+7&x9F?WPS5E0Z7t3EQn8ikZJX*@FL5Xw92|W6(ucp7*13QE%6I4Znm`Sz z*}t2Tx6=MaQ9NP)vIO?u3OxHS)nIKU``;>!6xQ8>aN@8WT=%TM`sT; zR+8<$$D9P8jisFO&{m45nq%=L_K&-p$U9SW=P1=jhtu-E>NR`gX<@_tKS2z+{U6Bx z9zJK#KWcObG_HRMi3s%H3eKSahRcI`@n7bf<5>I`F_J`x|E<7{|4;i3{}WZf`mqAw z`}Zx=u~j3j7>-^FzT51Wh6`7v!jq8^nL1I z!6%yI1%;VX`0mrx1Yv{d~0;C*RxOB$`T?TfXdMEmbF0PT?hw z&h~cq`T7dW83~1Gnwjw4j1}HOd-3RKR52$e;xRimsSW4S^NC?`acRGz09ttP&;4`M zb1lx~{}rVn)2wIzf+SSr|B{4du>V%z=l?XVQ^RKbf5*)~^Id=Z@Yz`Z8&|%(+Wpmd zJMQQxUHnA($5XaN45pcBuN&~}(1N+R6pP%N877N}nz9yorSI<2Gv$lY)Zk)uO{N`R z)ycBr7>uN-LLA%V)qD`*?)r#!g}@}s{H;VVgW(_uO(FP|5bMVc07--6cgy(lfAod*4;l3 z-aSm)JqNOzFLv