HDFS-14418. Remove redundant super user priveledge checks from namenode. Contributed by Ayush Saxena.

This commit is contained in:
Inigo Goiri 2019-04-16 10:34:31 -07:00
parent 2364c7d0bf
commit be6c8014e6
3 changed files with 55 additions and 4 deletions

View File

@ -7397,7 +7397,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
Metadata metadata = FSDirEncryptionZoneOp.ensureKeyIsInitialized(dir, Metadata metadata = FSDirEncryptionZoneOp.ensureKeyIsInitialized(dir,
keyName, src); keyName, src);
final FSPermissionChecker pc = getPermissionChecker(); final FSPermissionChecker pc = getPermissionChecker();
checkSuperuserPrivilege(pc);
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
final FileStatus resultingStat; final FileStatus resultingStat;
writeLock(); writeLock();
@ -7459,7 +7458,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
boolean success = false; boolean success = false;
checkOperation(OperationCategory.READ); checkOperation(OperationCategory.READ);
final FSPermissionChecker pc = getPermissionChecker(); final FSPermissionChecker pc = getPermissionChecker();
checkSuperuserPrivilege(pc);
readLock(); readLock();
try { try {
checkOperation(OperationCategory.READ); checkOperation(OperationCategory.READ);
@ -7497,7 +7495,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
boolean success = false; boolean success = false;
checkOperation(OperationCategory.READ); checkOperation(OperationCategory.READ);
final FSPermissionChecker pc = getPermissionChecker(); final FSPermissionChecker pc = getPermissionChecker();
checkSuperuserPrivilege(pc);
readLock(); readLock();
try { try {
checkOperation(OperationCategory.READ); checkOperation(OperationCategory.READ);

View File

@ -1331,7 +1331,6 @@ public class NameNodeRpcServer implements NamenodeProtocols {
@Override // NamenodeProtocol @Override // NamenodeProtocol
public CheckpointSignature rollEditLog() throws IOException { public CheckpointSignature rollEditLog() throws IOException {
checkNNStartup(); checkNNStartup();
namesystem.checkSuperuserPrivilege();
return namesystem.rollEditLog(); return namesystem.rollEditLog();
} }

View File

@ -97,6 +97,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.apache.hadoop.hdfs.web.WebHdfsConstants; import org.apache.hadoop.hdfs.web.WebHdfsConstants;
import org.apache.hadoop.io.erasurecode.ECSchema; import org.apache.hadoop.io.erasurecode.ECSchema;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.net.DNSToSwitchMapping; import org.apache.hadoop.net.DNSToSwitchMapping;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.ScriptBasedMapping; import org.apache.hadoop.net.ScriptBasedMapping;
@ -104,6 +105,7 @@ import org.apache.hadoop.net.StaticMapping;
import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.LambdaTestUtils;
import org.apache.hadoop.test.Whitebox; import org.apache.hadoop.test.Whitebox;
import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
@ -1804,6 +1806,59 @@ public class TestDistributedFileSystem {
} }
} }
@Test
public void testSuperUserPrivilege() throws Exception {
HdfsConfiguration conf = new HdfsConfiguration();
File tmpDir = GenericTestUtils.getTestDir(UUID.randomUUID().toString());
final Path jksPath = new Path(tmpDir.toString(), "test.jks");
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri());
try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build()) {
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
Path dir = new Path("/testPrivilege");
dfs.mkdirs(dir);
final KeyProvider provider =
cluster.getNameNode().getNamesystem().getProvider();
final KeyProvider.Options options = KeyProvider.options(conf);
provider.createKey("key", options);
provider.flush();
// Create a non-super user.
UserGroupInformation user = UserGroupInformation.createUserForTesting(
"Non_SuperUser", new String[] {"Non_SuperGroup"});
DistributedFileSystem userfs = (DistributedFileSystem) user.doAs(
(PrivilegedExceptionAction<FileSystem>) () -> FileSystem.get(conf));
LambdaTestUtils.intercept(AccessControlException.class,
"Superuser privilege is required",
() -> userfs.createEncryptionZone(dir, "key"));
RemoteException re = LambdaTestUtils.intercept(RemoteException.class,
"Superuser privilege is required",
() -> userfs.listEncryptionZones().hasNext());
assertTrue(re.unwrapRemoteException() instanceof AccessControlException);
re = LambdaTestUtils.intercept(RemoteException.class,
"Superuser privilege is required",
() -> userfs.listReencryptionStatus().hasNext());
assertTrue(re.unwrapRemoteException() instanceof AccessControlException);
LambdaTestUtils.intercept(AccessControlException.class,
"Superuser privilege is required",
() -> user.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
cluster.getNameNode().getRpcServer().rollEditLog();
return null;
}
}));
}
}
@Test @Test
public void testRemoveErasureCodingPolicy() throws Exception { public void testRemoveErasureCodingPolicy() throws Exception {
Configuration conf = getTestConfiguration(); Configuration conf = getTestConfiguration();