HDDS-1038. Support Service Level Authorization for Ozone. Contributed by Xiaoyu Yao and Ajay Kumar.
This commit is contained in:
parent
59ba3552d3
commit
9e0f3d1c52
|
@ -183,4 +183,34 @@ public final class HddsConfigKeys {
|
|||
public static final String HDDS_GRPC_TLS_TEST_CERT = "hdds.grpc.tls" +
|
||||
".test_cert";
|
||||
public static final boolean HDDS_GRPC_TLS_TEST_CERT_DEFAULT = false;
|
||||
|
||||
// Comma separated acls (users, groups) allowing clients accessing
|
||||
// datanode container protocol
|
||||
// when hadoop.security.authorization is true, this needs to be set in
|
||||
// hadoop-policy.xml, "*" allows all users/groups to access.
|
||||
public static final String
|
||||
HDDS_SECURITY_CLIENT_DATANODE_CONTAINER_PROTOCOL_ACL =
|
||||
"hdds.security.client.datanode.container.protocol.acl";
|
||||
|
||||
// Comma separated acls (users, groups) allowing clients accessing
|
||||
// scm container protocol
|
||||
// when hadoop.security.authorization is true, this needs to be set in
|
||||
// hadoop-policy.xml, "*" allows all users/groups to access.
|
||||
public static final String HDDS_SECURITY_CLIENT_SCM_CONTAINER_PROTOCOL_ACL =
|
||||
"hdds.security.client.scm.container.protocol.acl";
|
||||
|
||||
// Comma separated acls (users, groups) allowing clients accessing
|
||||
// scm block protocol
|
||||
// when hadoop.security.authorization is true, this needs to be set in
|
||||
// hadoop-policy.xml, "*" allows all users/groups to access.
|
||||
public static final String HDDS_SECURITY_CLIENT_SCM_BLOCK_PROTOCOL_ACL =
|
||||
"hdds.security.client.scm.block.protocol.acl";
|
||||
|
||||
// Comma separated acls (users, groups) allowing clients accessing
|
||||
// scm certificate protocol
|
||||
// when hadoop.security.authorization is true, this needs to be set in
|
||||
// hadoop-policy.xml, "*" allows all users/groups to access.
|
||||
public static final String HDDS_SECURITY_CLIENT_SCM_CERTIFICATE_PROTOCOL_ACL =
|
||||
"hdds.security.client.scm.certificate.protocol.acl";
|
||||
|
||||
}
|
|
@ -31,6 +31,12 @@ import org.apache.hadoop.security.KerberosInfo;
|
|||
@InterfaceAudience.Private
|
||||
public interface SCMSecurityProtocol {
|
||||
|
||||
@SuppressWarnings("checkstyle:ConstantName")
|
||||
/**
|
||||
* Version 1: Initial version.
|
||||
*/
|
||||
long versionID = 1L;
|
||||
|
||||
/**
|
||||
* Get SCM signed certificate for DataNode.
|
||||
*
|
||||
|
|
|
@ -26,7 +26,7 @@ import org.apache.hadoop.security.KerberosInfo;
|
|||
*/
|
||||
|
||||
@ProtocolInfo(protocolName =
|
||||
"org.apache.hadoop.ozone.protocol.SCMSecurityProtocol",
|
||||
"org.apache.hadoop.hdds.protocol.SCMSecurityProtocol",
|
||||
protocolVersion = 1)
|
||||
@KerberosInfo(serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
|
||||
public interface SCMSecurityProtocolPB extends
|
||||
|
|
|
@ -37,6 +37,12 @@ import java.util.List;
|
|||
@KerberosInfo(serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
|
||||
public interface ScmBlockLocationProtocol extends Closeable {
|
||||
|
||||
@SuppressWarnings("checkstyle:ConstantName")
|
||||
/**
|
||||
* Version 1: Initial version.
|
||||
*/
|
||||
long versionID = 1L;
|
||||
|
||||
/**
|
||||
* Asks SCM where a block should be allocated. SCM responds with the
|
||||
* set of datanodes that should be used creating this block.
|
||||
|
|
|
@ -37,6 +37,13 @@ import org.apache.hadoop.security.KerberosInfo;
|
|||
*/
|
||||
@KerberosInfo(serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
|
||||
public interface StorageContainerLocationProtocol extends Closeable {
|
||||
|
||||
@SuppressWarnings("checkstyle:ConstantName")
|
||||
/**
|
||||
* Version 1: Initial version.
|
||||
*/
|
||||
long versionID = 1L;
|
||||
|
||||
/**
|
||||
* Asks SCM where a container should be allocated. SCM responds with the
|
||||
* set of datanodes that should be used creating this container.
|
||||
|
|
|
@ -30,7 +30,7 @@ import org.apache.hadoop.security.KerberosInfo;
|
|||
* Protocol Buffers service interface to add Hadoop-specific annotations.
|
||||
*/
|
||||
@ProtocolInfo(protocolName =
|
||||
"org.apache.hadoop.ozone.protocol.StorageContainerLocationProtocol",
|
||||
"org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol",
|
||||
protocolVersion = 1)
|
||||
@KerberosInfo(
|
||||
serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
|
||||
|
|
|
@ -1963,5 +1963,50 @@
|
|||
Keytab used by Freon.
|
||||
</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>hdds.security.client.datanode.container.protocol.acl</name>
|
||||
<value>*</value>
|
||||
<tag>SECURITY</tag>
|
||||
<description>
|
||||
Comma separated list of users and groups allowed to access
|
||||
client datanode container protocol.
|
||||
</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>hdds.security.client.scm.block.protocol.acl</name>
|
||||
<value>*</value>
|
||||
<tag>SECURITY</tag>
|
||||
<description>
|
||||
Comma separated list of users and groups allowed to access
|
||||
client scm block protocol.
|
||||
</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>hdds.security.client.scm.certificate.protocol.acl</name>
|
||||
<value>*</value>
|
||||
<tag>SECURITY</tag>
|
||||
<description>
|
||||
Comma separated list of users and groups allowed to access
|
||||
client scm certificate protocol.
|
||||
</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>hdds.security.client.scm.container.protocol.acl</name>
|
||||
<value>*</value>
|
||||
<tag>SECURITY</tag>
|
||||
<description>
|
||||
Comma separated list of users and groups allowed to access
|
||||
client scm container protocol.
|
||||
</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>ozone.om.security.client.protocol.acl</name>
|
||||
<value>*</value>
|
||||
<tag>SECURITY</tag>
|
||||
<description>
|
||||
Comma separated list of users and groups allowed to access
|
||||
client ozone manager protocol.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
</configuration>
|
||||
|
|
|
@ -47,6 +47,13 @@ import org.apache.hadoop.security.KerberosInfo;
|
|||
serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
|
||||
@InterfaceAudience.Private
|
||||
public interface StorageContainerDatanodeProtocol {
|
||||
|
||||
@SuppressWarnings("checkstyle:ConstantName")
|
||||
/**
|
||||
* Version 1: Initial version.
|
||||
*/
|
||||
long versionID = 1L;
|
||||
|
||||
/**
|
||||
* Returns SCM version.
|
||||
* @return Version info.
|
||||
|
|
|
@ -23,6 +23,7 @@ package org.apache.hadoop.hdds.scm.server;
|
|||
|
||||
import com.google.common.collect.Maps;
|
||||
import com.google.protobuf.BlockingService;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
||||
import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos;
|
||||
|
@ -117,7 +118,10 @@ public class SCMBlockProtocolServer implements
|
|||
updateRPCListenAddress(
|
||||
conf, OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, scmBlockAddress,
|
||||
blockRpcServer);
|
||||
|
||||
if (conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
|
||||
false)) {
|
||||
blockRpcServer.refreshServiceAcl(conf, SCMPolicyProvider.getInstance());
|
||||
}
|
||||
}
|
||||
|
||||
public RPC.Server getBlockRpcServer() {
|
||||
|
|
|
@ -25,6 +25,7 @@ import com.google.common.annotations.VisibleForTesting;
|
|||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.Maps;
|
||||
import com.google.protobuf.BlockingService;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
||||
|
@ -133,7 +134,10 @@ public class SCMClientProtocolServer implements
|
|||
clientRpcAddress =
|
||||
updateRPCListenAddress(conf, OZONE_SCM_CLIENT_ADDRESS_KEY,
|
||||
scmAddress, clientRpcServer);
|
||||
|
||||
if (conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
|
||||
false)) {
|
||||
clientRpcServer.refreshServiceAcl(conf, SCMPolicyProvider.getInstance());
|
||||
}
|
||||
}
|
||||
|
||||
public RPC.Server getClientRpcServer() {
|
||||
|
|
|
@ -25,6 +25,7 @@ import com.google.common.annotations.VisibleForTesting;
|
|||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.Maps;
|
||||
import com.google.protobuf.BlockingService;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
||||
|
@ -179,6 +180,11 @@ public class SCMDatanodeProtocolServer implements
|
|||
conf, OZONE_SCM_DATANODE_ADDRESS_KEY, datanodeRpcAddr,
|
||||
datanodeRpcServer);
|
||||
|
||||
if (conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
|
||||
false)) {
|
||||
datanodeRpcServer.refreshServiceAcl(conf,
|
||||
SCMPolicyProvider.getInstance());
|
||||
}
|
||||
}
|
||||
|
||||
public void start() {
|
||||
|
|
|
@ -0,0 +1,80 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with this
|
||||
* work for additional information regarding copyright ownership. The ASF
|
||||
* licenses this file to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations under
|
||||
* the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdds.scm.server;
|
||||
|
||||
|
||||
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||
import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol;
|
||||
import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
|
||||
import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
|
||||
import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol;
|
||||
import org.apache.hadoop.security.authorize.PolicyProvider;
|
||||
import org.apache.hadoop.security.authorize.Service;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.apache.hadoop.hdds.HddsConfigKeys.*;
|
||||
|
||||
/**
|
||||
* {@link PolicyProvider} for SCM protocols.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
public final class SCMPolicyProvider extends PolicyProvider {
|
||||
|
||||
private static AtomicReference<SCMPolicyProvider> atomicReference =
|
||||
new AtomicReference<>();
|
||||
|
||||
private SCMPolicyProvider() {
|
||||
}
|
||||
|
||||
@Private
|
||||
@Unstable
|
||||
public static SCMPolicyProvider getInstance() {
|
||||
if (atomicReference.get() == null) {
|
||||
atomicReference.compareAndSet(null, new SCMPolicyProvider());
|
||||
}
|
||||
return atomicReference.get();
|
||||
}
|
||||
|
||||
private static final Service[] SCM_SERVICES =
|
||||
new Service[]{
|
||||
new Service(
|
||||
HDDS_SECURITY_CLIENT_DATANODE_CONTAINER_PROTOCOL_ACL,
|
||||
StorageContainerDatanodeProtocol.class),
|
||||
new Service(
|
||||
HDDS_SECURITY_CLIENT_SCM_CONTAINER_PROTOCOL_ACL,
|
||||
StorageContainerLocationProtocol.class),
|
||||
new Service(
|
||||
HDDS_SECURITY_CLIENT_SCM_BLOCK_PROTOCOL_ACL,
|
||||
ScmBlockLocationProtocol.class),
|
||||
new Service(
|
||||
HDDS_SECURITY_CLIENT_SCM_CERTIFICATE_PROTOCOL_ACL,
|
||||
SCMSecurityProtocol.class),
|
||||
};
|
||||
|
||||
@SuppressFBWarnings("EI_EXPOSE_REP")
|
||||
@Override
|
||||
public Service[] getServices() {
|
||||
return SCM_SERVICES;
|
||||
}
|
||||
|
||||
}
|
|
@ -26,6 +26,7 @@ import java.util.concurrent.ExecutionException;
|
|||
import java.util.concurrent.Future;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
|
||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.OzoneManagerDetailsProto;
|
||||
|
@ -86,6 +87,10 @@ public class SCMSecurityProtocolServer implements SCMSecurityProtocol {
|
|||
SCMSecurityProtocolPB.class,
|
||||
secureProtoPbService,
|
||||
handlerCount);
|
||||
if (conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
|
||||
false)) {
|
||||
rpcServer.refreshServiceAcl(conf, SCMPolicyProvider.getInstance());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -210,4 +210,10 @@ public final class OMConfigKeys {
|
|||
public static final long OZONE_DB_CHECKPOINT_TRANSFER_RATE_DEFAULT =
|
||||
0; //no throttling
|
||||
|
||||
// Comma separated acls (users, groups) allowing clients accessing
|
||||
// OM client protocol
|
||||
// when hadoop.security.authorization is true, this needs to be set in
|
||||
// hadoop-policy.xml, "*" allows all users/groups to access.
|
||||
public static final String OZONE_OM_SECURITY_CLIENT_PROTOCOL_ACL =
|
||||
"ozone.om.security.client.protocol.acl";
|
||||
}
|
||||
|
|
|
@ -47,6 +47,12 @@ import org.apache.hadoop.security.KerberosInfo;
|
|||
public interface OzoneManagerProtocol
|
||||
extends OzoneManagerSecurityProtocol, Closeable {
|
||||
|
||||
@SuppressWarnings("checkstyle:ConstantName")
|
||||
/**
|
||||
* Version 1: Initial version.
|
||||
*/
|
||||
long versionID = 1L;
|
||||
|
||||
/**
|
||||
* Creates a volume.
|
||||
* @param args - Arguments to create Volume.
|
||||
|
|
|
@ -30,7 +30,7 @@ import org.apache.hadoop.ozone.security.OzoneDelegationTokenSelector;
|
|||
* Protocol used to communicate with OM.
|
||||
*/
|
||||
@ProtocolInfo(protocolName =
|
||||
"org.apache.hadoop.ozone.protocol.OzoneManagerProtocol",
|
||||
"org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol",
|
||||
protocolVersion = 1)
|
||||
@KerberosInfo(
|
||||
serverPrincipal = OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY)
|
||||
|
|
|
@ -45,6 +45,13 @@ CORE-SITE.XML_hadoop.security.authentication=kerberos
|
|||
CORE-SITE.XML_hadoop.security.auth_to_local=RULE:[2:$1@$0](.*)s/.*/root/
|
||||
CORE-SITE.XML_hadoop.security.key.provider.path=kms://http@kms:9600/kms
|
||||
|
||||
CORE-SITE.XML_hadoop.security.authorization=true
|
||||
HADOOP-POLICY.XML_ozone.om.security.client.protocol.acl=*
|
||||
HADOOP-POLICY.XML_hdds.security.client.datanode.container.protocol.acl=*
|
||||
HADOOP-POLICY.XML_hdds.security.client.scm.container.protocol.acl=*
|
||||
HADOOP-POLICY.XML_hdds.security.client.scm.block.protocol.acl=*
|
||||
HADOOP-POLICY.XML_hdds.security.client.scm.certificate.protocol.acl=*
|
||||
|
||||
HDFS-SITE.XML_rpc.metrics.quantile.enable=true
|
||||
HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
|
||||
LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
|
||||
|
|
|
@ -55,8 +55,8 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.
|
|||
*/
|
||||
public class TestBCSID {
|
||||
|
||||
private static OzoneConfiguration conf = new OzoneConfiguration();
|
||||
private static MiniOzoneCluster cluster;
|
||||
private static OzoneConfiguration conf;
|
||||
private static OzoneClient client;
|
||||
private static ObjectStore objectStore;
|
||||
private static String volumeName;
|
||||
|
@ -69,7 +69,6 @@ public class TestBCSID {
|
|||
*/
|
||||
@BeforeClass
|
||||
public static void init() throws Exception {
|
||||
conf = new OzoneConfiguration();
|
||||
String path = GenericTestUtils
|
||||
.getTempPath(TestBCSID.class.getSimpleName());
|
||||
File baseDir = new File(path);
|
||||
|
|
|
@ -70,7 +70,7 @@ import static java.nio.charset.StandardCharsets.UTF_8;
|
|||
public class TestCloseContainerHandlingByClient {
|
||||
|
||||
private static MiniOzoneCluster cluster;
|
||||
private static OzoneConfiguration conf;
|
||||
private static OzoneConfiguration conf = new OzoneConfiguration();
|
||||
private static OzoneClient client;
|
||||
private static ObjectStore objectStore;
|
||||
private static int chunkSize;
|
||||
|
@ -88,7 +88,6 @@ public class TestCloseContainerHandlingByClient {
|
|||
*/
|
||||
@BeforeClass
|
||||
public static void init() throws Exception {
|
||||
conf = new OzoneConfiguration();
|
||||
chunkSize = (int) OzoneConsts.MB;
|
||||
blockSize = 4 * chunkSize;
|
||||
conf.set(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT, "5000ms");
|
||||
|
|
|
@ -56,7 +56,7 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTER
|
|||
public class TestContainerStateMachine {
|
||||
|
||||
private static MiniOzoneCluster cluster;
|
||||
private static OzoneConfiguration conf;
|
||||
private static OzoneConfiguration conf = new OzoneConfiguration();
|
||||
private static OzoneClient client;
|
||||
private static ObjectStore objectStore;
|
||||
private static String volumeName;
|
||||
|
@ -70,7 +70,6 @@ public class TestContainerStateMachine {
|
|||
*/
|
||||
@BeforeClass
|
||||
public static void init() throws Exception {
|
||||
conf = new OzoneConfiguration();
|
||||
path = GenericTestUtils
|
||||
.getTempPath(TestContainerStateMachine.class.getSimpleName());
|
||||
File baseDir = new File(path);
|
||||
|
|
|
@ -706,6 +706,7 @@ public abstract class TestOzoneRpcClientAbstract {
|
|||
}
|
||||
|
||||
|
||||
@Ignore("Debug Jenkins Timeout")
|
||||
@Test
|
||||
public void testPutKeyRatisThreeNodesParallel() throws IOException,
|
||||
InterruptedException {
|
||||
|
|
|
@ -56,6 +56,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|||
<version>2.2.0</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.code.findbugs</groupId>
|
||||
<artifactId>findbugs</artifactId>
|
||||
<version>3.0.1</version>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
|
||||
</dependencies>
|
||||
<build>
|
||||
|
|
|
@ -0,0 +1,67 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with this
|
||||
* work for additional information regarding copyright ownership. The ASF
|
||||
* licenses this file to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations under
|
||||
* the License.
|
||||
*/
|
||||
package org.apache.hadoop.ozone.om;
|
||||
|
||||
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||
import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
|
||||
import org.apache.hadoop.security.authorize.PolicyProvider;
|
||||
import org.apache.hadoop.security.authorize.Service;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.apache.hadoop.ozone.om.OMConfigKeys
|
||||
.OZONE_OM_SECURITY_CLIENT_PROTOCOL_ACL;
|
||||
|
||||
/**
|
||||
* {@link PolicyProvider} for OM protocols.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
public final class OMPolicyProvider extends PolicyProvider {
|
||||
|
||||
private static AtomicReference<OMPolicyProvider> atomicReference =
|
||||
new AtomicReference<>();
|
||||
|
||||
private OMPolicyProvider() {
|
||||
}
|
||||
|
||||
@Private
|
||||
@Unstable
|
||||
public static OMPolicyProvider getInstance() {
|
||||
if (atomicReference.get() == null) {
|
||||
atomicReference.compareAndSet(null, new OMPolicyProvider());
|
||||
}
|
||||
return atomicReference.get();
|
||||
}
|
||||
|
||||
private static final Service[] OM_SERVICES =
|
||||
new Service[]{
|
||||
new Service(OZONE_OM_SECURITY_CLIENT_PROTOCOL_ACL,
|
||||
OzoneManagerProtocol.class),
|
||||
};
|
||||
|
||||
@SuppressFBWarnings("EI_EXPOSE_REP")
|
||||
@Override
|
||||
public Service[] getServices() {
|
||||
return OM_SERVICES;
|
||||
}
|
||||
|
||||
}
|
|
@ -30,6 +30,7 @@ import java.util.Objects;
|
|||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.crypto.key.KeyProvider;
|
||||
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdds.HddsConfigKeys;
|
||||
|
@ -263,20 +264,19 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
|
|||
// Load HA related configurations
|
||||
loadOMHAConfigs(configuration);
|
||||
|
||||
if (!testSecureOmFlag || !isOzoneSecurityEnabled()) {
|
||||
scmContainerClient = getScmContainerClient(configuration);
|
||||
// verifies that the SCM info in the OM Version file is correct.
|
||||
scmBlockClient = getScmBlockClient(configuration);
|
||||
scmContainerClient = getScmContainerClient(configuration);
|
||||
// verifies that the SCM info in the OM Version file is correct.
|
||||
scmBlockClient = getScmBlockClient(configuration);
|
||||
|
||||
// For testing purpose only, not hit scm from om as Hadoop UGI can't login
|
||||
// two principals in the same JVM.
|
||||
if (!testSecureOmFlag) {
|
||||
ScmInfo scmInfo = scmBlockClient.getScmInfo();
|
||||
if (!(scmInfo.getClusterId().equals(omStorage.getClusterID()) && scmInfo
|
||||
.getScmId().equals(omStorage.getScmId()))) {
|
||||
throw new OMException("SCM version info mismatch.",
|
||||
ResultCodes.SCM_VERSION_MISMATCH_ERROR);
|
||||
}
|
||||
} else {
|
||||
// For testing purpose only
|
||||
scmContainerClient = null;
|
||||
scmBlockClient = null;
|
||||
}
|
||||
|
||||
RPC.setProtocolEngine(configuration, OzoneManagerProtocolPB.class,
|
||||
|
@ -778,6 +778,11 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
|
|||
.build();
|
||||
|
||||
DFSUtil.addPBProtocol(conf, protocol, instance, rpcServer);
|
||||
|
||||
if (conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
|
||||
false)) {
|
||||
rpcServer.refreshServiceAcl(conf, OMPolicyProvider.getInstance());
|
||||
}
|
||||
return rpcServer;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue