HDFS-14418. Remove redundant super user priveledge checks from namenode. Contributed by Ayush Saxena.
(cherry picked from commitbe6c8014e6
) (cherry picked from commit06b2eceb76
)
This commit is contained in:
parent
57ed0ff225
commit
7a915a0ba3
|
@ -7301,7 +7301,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
Metadata metadata = FSDirEncryptionZoneOp.ensureKeyIsInitialized(dir,
|
||||
keyName, src);
|
||||
final FSPermissionChecker pc = getPermissionChecker();
|
||||
checkSuperuserPrivilege(pc);
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
final FileStatus resultingStat;
|
||||
writeLock();
|
||||
|
@ -7363,7 +7362,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
boolean success = false;
|
||||
checkOperation(OperationCategory.READ);
|
||||
final FSPermissionChecker pc = getPermissionChecker();
|
||||
checkSuperuserPrivilege(pc);
|
||||
readLock();
|
||||
try {
|
||||
checkOperation(OperationCategory.READ);
|
||||
|
@ -7401,7 +7399,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
boolean success = false;
|
||||
checkOperation(OperationCategory.READ);
|
||||
final FSPermissionChecker pc = getPermissionChecker();
|
||||
checkSuperuserPrivilege(pc);
|
||||
readLock();
|
||||
try {
|
||||
checkOperation(OperationCategory.READ);
|
||||
|
|
|
@ -1307,7 +1307,6 @@ public class NameNodeRpcServer implements NamenodeProtocols {
|
|||
@Override // NamenodeProtocol
|
||||
public CheckpointSignature rollEditLog() throws IOException {
|
||||
checkNNStartup();
|
||||
namesystem.checkSuperuserPrivilege();
|
||||
return namesystem.rollEditLog();
|
||||
}
|
||||
|
||||
|
|
|
@ -47,14 +47,18 @@ import java.util.Iterator;
|
|||
import java.util.List;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.crypto.key.JavaKeyStoreProvider;
|
||||
import org.apache.hadoop.crypto.key.KeyProvider;
|
||||
import org.apache.hadoop.fs.BlockLocation;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
import org.apache.hadoop.fs.CreateFlag;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
|
@ -94,6 +98,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
|||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.hadoop.net.DNSToSwitchMapping;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.net.ScriptBasedMapping;
|
||||
|
@ -101,6 +106,7 @@ import org.apache.hadoop.net.StaticMapping;
|
|||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.hadoop.test.LambdaTestUtils;
|
||||
import org.apache.hadoop.util.DataChecksum;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import org.apache.hadoop.util.concurrent.HadoopExecutors;
|
||||
|
@ -1663,6 +1669,59 @@ public class TestDistributedFileSystem {
|
|||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSuperUserPrivilege() throws Exception {
|
||||
HdfsConfiguration conf = new HdfsConfiguration();
|
||||
File tmpDir = GenericTestUtils.getTestDir(UUID.randomUUID().toString());
|
||||
final Path jksPath = new Path(tmpDir.toString(), "test.jks");
|
||||
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
|
||||
JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri());
|
||||
|
||||
try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build()) {
|
||||
cluster.waitActive();
|
||||
final DistributedFileSystem dfs = cluster.getFileSystem();
|
||||
Path dir = new Path("/testPrivilege");
|
||||
dfs.mkdirs(dir);
|
||||
|
||||
final KeyProvider provider =
|
||||
cluster.getNameNode().getNamesystem().getProvider();
|
||||
final KeyProvider.Options options = KeyProvider.options(conf);
|
||||
provider.createKey("key", options);
|
||||
provider.flush();
|
||||
|
||||
// Create a non-super user.
|
||||
UserGroupInformation user = UserGroupInformation.createUserForTesting(
|
||||
"Non_SuperUser", new String[] {"Non_SuperGroup"});
|
||||
|
||||
DistributedFileSystem userfs = (DistributedFileSystem) user.doAs(
|
||||
(PrivilegedExceptionAction<FileSystem>) () -> FileSystem.get(conf));
|
||||
|
||||
LambdaTestUtils.intercept(AccessControlException.class,
|
||||
"Superuser privilege is required",
|
||||
() -> userfs.createEncryptionZone(dir, "key"));
|
||||
|
||||
RemoteException re = LambdaTestUtils.intercept(RemoteException.class,
|
||||
"Superuser privilege is required",
|
||||
() -> userfs.listEncryptionZones().hasNext());
|
||||
assertTrue(re.unwrapRemoteException() instanceof AccessControlException);
|
||||
|
||||
re = LambdaTestUtils.intercept(RemoteException.class,
|
||||
"Superuser privilege is required",
|
||||
() -> userfs.listReencryptionStatus().hasNext());
|
||||
assertTrue(re.unwrapRemoteException() instanceof AccessControlException);
|
||||
|
||||
LambdaTestUtils.intercept(AccessControlException.class,
|
||||
"Superuser privilege is required",
|
||||
() -> user.doAs(new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
cluster.getNameNode().getRpcServer().rollEditLog();
|
||||
return null;
|
||||
}
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRemoveErasureCodingPolicy() throws Exception {
|
||||
Configuration conf = getTestConfiguration();
|
||||
|
|
Loading…
Reference in New Issue