HDFS-10424. DatanodeLifelineProtocol not able to use under security cluster. Contributed by Chris Nauroth.

This commit is contained in:
Chris Nauroth 2016-05-20 12:47:41 -07:00
parent d364ceac85
commit bcde1562d2
5 changed files with 143 additions and 3 deletions

View File

@ -204,6 +204,9 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
public static final String
HADOOP_SECURITY_SERVICE_AUTHORIZATION_TRACING =
"security.trace.protocol.acl";
public static final String
HADOOP_SECURITY_SERVICE_AUTHORIZATION_DATANODE_LIFELINE =
"security.datanode.lifeline.protocol.acl";
public static final String
SECURITY_HA_SERVICE_PROTOCOL_ACL = "security.ha.service.protocol.acl";
public static final String

View File

@ -24,6 +24,7 @@ import org.apache.hadoop.ha.ZKFCProtocol;
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeLifelineProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
@ -76,7 +77,10 @@ public class HDFSPolicyProvider extends PolicyProvider {
GenericRefreshProtocol.class),
new Service(
CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_TRACING,
TraceAdminProtocol.class)
TraceAdminProtocol.class),
new Service(
CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_DATANODE_LIFELINE,
DatanodeLifelineProtocol.class)
};
@Override

View File

@ -21,6 +21,7 @@ import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URL;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@ -45,9 +46,12 @@ import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RPC.Server;
import org.apache.hadoop.net.NetUtils;
import com.google.common.annotations.VisibleForTesting;
import com.google.protobuf.BlockingService;
class JournalNodeRpcServer implements QJournalProtocol {
@InterfaceAudience.Private
@VisibleForTesting
public class JournalNodeRpcServer implements QJournalProtocol {
private static final int HANDLER_COUNT = 5;
private final JournalNode jn;

View File

@ -44,6 +44,7 @@ import java.util.concurrent.Callable;
import com.google.common.collect.Lists;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.ReconfigurationTaskStatus;
import org.apache.hadoop.crypto.CryptoProtocolVersion;
@ -208,7 +209,9 @@ import com.google.protobuf.BlockingService;
* This class is responsible for handling all of the RPC calls to the NameNode.
* It is created, started, and stopped by {@link NameNode}.
*/
class NameNodeRpcServer implements NamenodeProtocols {
@InterfaceAudience.Private
@VisibleForTesting
public class NameNodeRpcServer implements NamenodeProtocols {
private static final Logger LOG = NameNode.LOG;
private static final Logger stateChangeLog = NameNode.stateChangeLog;

View File

@ -0,0 +1,126 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.*;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import org.apache.commons.lang.ClassUtils;
import org.apache.hadoop.hdfs.protocol.ReconfigurationProtocol;
import org.apache.hadoop.hdfs.qjournal.server.JournalNodeRpcServer;
import org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.security.authorize.Service;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Test suite covering HDFSPolicyProvider. We expect that it contains a
* security policy definition for every RPC protocol used in HDFS. The test
* suite works by scanning an RPC server's class to find the protocol interfaces
* it implements, and then comparing that to the protocol interfaces covered in
* HDFSPolicyProvider. This is a parameterized test repeated for multiple HDFS
* RPC server classes.
*/
@RunWith(Parameterized.class)
public class TestHDFSPolicyProvider {
private static final Logger LOG =
LoggerFactory.getLogger(TestHDFSPolicyProvider.class);
private static List<Class<?>> policyProviderProtocols;
private static final Comparator<Class<?>> CLASS_NAME_COMPARATOR =
new Comparator<Class<?>>() {
@Override
public int compare(Class<?> lhs, Class<?> rhs) {
return lhs.getName().compareTo(rhs.getName());
}
};
@Rule
public TestName testName = new TestName();
private final Class<?> rpcServerClass;
@BeforeClass
public static void initialize() {
Service[] services = new HDFSPolicyProvider().getServices();
policyProviderProtocols = new ArrayList<>(services.length);
for (Service service : services) {
policyProviderProtocols.add(service.getProtocol());
}
Collections.sort(policyProviderProtocols, CLASS_NAME_COMPARATOR);
}
public TestHDFSPolicyProvider(Class<?> rpcServerClass) {
this.rpcServerClass = rpcServerClass;
}
@Parameters(name = "protocolsForServer-{0}")
public static List<Class<?>[]> data() {
return Arrays.asList(new Class<?>[][]{
{NameNodeRpcServer.class},
{DataNode.class},
{JournalNodeRpcServer.class}
});
}
@Test
public void testPolicyProviderForServer() {
List<?> ifaces = ClassUtils.getAllInterfaces(rpcServerClass);
List<Class<?>> serverProtocols = new ArrayList<>(ifaces.size());
for (Object obj : ifaces) {
Class<?> iface = (Class<?>)obj;
// ReconfigurationProtocol is not covered in HDFSPolicyProvider
// currently, so we have a special case to skip it. This needs follow-up
// investigation.
if (iface.getSimpleName().endsWith("Protocol") &&
iface != ReconfigurationProtocol.class) {
serverProtocols.add(iface);
}
}
Collections.sort(serverProtocols, CLASS_NAME_COMPARATOR);
LOG.info("Running test {} for RPC server {}. Found server protocols {} "
+ "and policy provider protocols {}.", testName.getMethodName(),
rpcServerClass.getName(), serverProtocols, policyProviderProtocols);
assertFalse("Expected to find at least one protocol in server.",
serverProtocols.isEmpty());
assertTrue(
String.format("Expected all protocols for server %s to be defined in "
+ "%s. Server contains protocols %s. Policy provider contains "
+ "protocols %s.", rpcServerClass.getName(),
HDFSPolicyProvider.class.getName(), serverProtocols,
policyProviderProtocols),
policyProviderProtocols.containsAll(serverProtocols));
}
}