diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 42a8da1e8d4..7ebc8daef3f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -7301,7 +7301,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, Metadata metadata = FSDirEncryptionZoneOp.ensureKeyIsInitialized(dir, keyName, src); final FSPermissionChecker pc = getPermissionChecker(); - checkSuperuserPrivilege(pc); checkOperation(OperationCategory.WRITE); final FileStatus resultingStat; writeLock(); @@ -7363,7 +7362,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, boolean success = false; checkOperation(OperationCategory.READ); final FSPermissionChecker pc = getPermissionChecker(); - checkSuperuserPrivilege(pc); readLock(); try { checkOperation(OperationCategory.READ); @@ -7401,7 +7399,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, boolean success = false; checkOperation(OperationCategory.READ); final FSPermissionChecker pc = getPermissionChecker(); - checkSuperuserPrivilege(pc); readLock(); try { checkOperation(OperationCategory.READ); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 3b5e2bc6119..19870383ea7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -1307,7 +1307,6 @@ public class NameNodeRpcServer implements NamenodeProtocols { @Override // NamenodeProtocol public CheckpointSignature rollEditLog() throws IOException { checkNNStartup(); - namesystem.checkSuperuserPrivilege(); return namesystem.rollEditLog(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java index 9b977be33da..e189589955b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java @@ -47,14 +47,18 @@ import java.util.Iterator; import java.util.List; import java.util.Random; import java.util.Set; +import java.util.UUID; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicReference; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.key.JavaKeyStoreProvider; +import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -94,6 +98,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.apache.hadoop.hdfs.web.WebHdfsConstants; import org.apache.hadoop.io.erasurecode.ECSchema; +import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.net.DNSToSwitchMapping; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.ScriptBasedMapping; @@ -101,6 +106,7 @@ import org.apache.hadoop.net.StaticMapping; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.concurrent.HadoopExecutors; @@ -1663,6 +1669,59 @@ public class TestDistributedFileSystem { } } + @Test + public void testSuperUserPrivilege() throws Exception { + HdfsConfiguration conf = new HdfsConfiguration(); + File tmpDir = GenericTestUtils.getTestDir(UUID.randomUUID().toString()); + final Path jksPath = new Path(tmpDir.toString(), "test.jks"); + conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, + JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()); + + try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build()) { + cluster.waitActive(); + final DistributedFileSystem dfs = cluster.getFileSystem(); + Path dir = new Path("/testPrivilege"); + dfs.mkdirs(dir); + + final KeyProvider provider = + cluster.getNameNode().getNamesystem().getProvider(); + final KeyProvider.Options options = KeyProvider.options(conf); + provider.createKey("key", options); + provider.flush(); + + // Create a non-super user. + UserGroupInformation user = UserGroupInformation.createUserForTesting( + "Non_SuperUser", new String[] {"Non_SuperGroup"}); + + DistributedFileSystem userfs = (DistributedFileSystem) user.doAs( + (PrivilegedExceptionAction) () -> FileSystem.get(conf)); + + LambdaTestUtils.intercept(AccessControlException.class, + "Superuser privilege is required", + () -> userfs.createEncryptionZone(dir, "key")); + + RemoteException re = LambdaTestUtils.intercept(RemoteException.class, + "Superuser privilege is required", + () -> userfs.listEncryptionZones().hasNext()); + assertTrue(re.unwrapRemoteException() instanceof AccessControlException); + + re = LambdaTestUtils.intercept(RemoteException.class, + "Superuser privilege is required", + () -> userfs.listReencryptionStatus().hasNext()); + assertTrue(re.unwrapRemoteException() instanceof AccessControlException); + + LambdaTestUtils.intercept(AccessControlException.class, + "Superuser privilege is required", + () -> user.doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + cluster.getNameNode().getRpcServer().rollEditLog(); + return null; + } + })); + } + } + @Test public void testRemoveErasureCodingPolicy() throws Exception { Configuration conf = getTestConfiguration();