HBASE-6104. Require EXEC permission to call coprocessor endpoints

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1556098 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Andrew Kyle Purtell 2014-01-07 01:14:57 +00:00
parent b7427d664b
commit b6a875e83a
7 changed files with 311 additions and 2 deletions

View File

@ -1112,4 +1112,20 @@ possible configurations would overwhelm and obscure the important.
as the SimpleLoadBalancer). as the SimpleLoadBalancer).
</description> </description>
</property> </property>
<property>
<name>hbase.security.exec.permission.checks</name>
<value>false</value>
<description>
If this setting is enabled and ACL based access control is active (the
AccessController coprocessor is installed either as a system coprocessor
or on a table as a table coprocessor) then you must grant all relevant
users EXEC privilege if they require the ability to execute coprocessor
endpoint calls. EXEC privilege, like any other permission, can be
granted globally to a user, or to a user on a per table or per namespace
basis. For more information on coprocessor endpoints, see the coprocessor
section of the HBase online manual. For more information on granting or
revoking permissions using the AccessController, see the security
section of the HBase online manual.
</description>
</property>
</configuration> </configuration>

View File

@ -0,0 +1,65 @@
/*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.coprocessor;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import com.google.protobuf.Message;
import com.google.protobuf.Service;
/**
* Coprocessors implement this interface to observe and mediate endpoint invocations
* on a region.
*/
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
@InterfaceStability.Evolving
public interface EndpointObserver extends Coprocessor {
/**
* Called before an Endpoint service method is invoked.
* The request message can be altered by returning a new instance. Throwing an
* exception will abort the invocation.
* Calling {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no
* effect in this hook.
* @param ctx the environment provided by the region server
* @param service the endpoint service
* @param methodName the invoked service method
* @param request the request message
* @return the possibly modified message
* @throws IOException
*/
Message preEndpointInvocation(ObserverContext<RegionCoprocessorEnvironment> ctx, Service service,
String methodName, Message request) throws IOException;
/**
* Called after an Endpoint service method is invoked. The response message can be
* altered using the builder.
* @param ctx the environment provided by the region server
* @param service the endpoint service
* @param methodName the invoked service method
* @param request the request message
* @param responseBuilder the response message builder
* @throws IOException
*/
void postEndpointInvocation(ObserverContext<RegionCoprocessorEnvironment> ctx, Service service,
String methodName, Message request, Message.Builder responseBuilder) throws IOException;
}

View File

@ -5409,6 +5409,11 @@ public class HRegion implements HeapSize { // , Writable{
Message request = service.getRequestPrototype(methodDesc).newBuilderForType() Message request = service.getRequestPrototype(methodDesc).newBuilderForType()
.mergeFrom(call.getRequest()).build(); .mergeFrom(call.getRequest()).build();
if (coprocessorHost != null) {
request = coprocessorHost.preEndpointInvocation(service, methodName, request);
}
final Message.Builder responseBuilder = final Message.Builder responseBuilder =
service.getResponsePrototype(methodDesc).newBuilderForType(); service.getResponsePrototype(methodDesc).newBuilderForType();
service.callMethod(methodDesc, controller, request, new RpcCallback<Message>() { service.callMethod(methodDesc, controller, request, new RpcCallback<Message>() {
@ -5420,6 +5425,10 @@ public class HRegion implements HeapSize { // , Writable{
} }
}); });
if (coprocessorHost != null) {
coprocessorHost.postEndpointInvocation(service, methodName, request, responseBuilder);
}
return responseBuilder.build(); return responseBuilder.build();
} }

View File

@ -52,6 +52,7 @@ import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.coprocessor.CoprocessorService; import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
import org.apache.hadoop.hbase.coprocessor.EndpointObserver;
import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.coprocessor.RegionObserver;
@ -71,6 +72,8 @@ import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableList;
import com.google.protobuf.Message;
import com.google.protobuf.Service;
/** /**
* Implements the coprocessor environment and runtime support for coprocessors * Implements the coprocessor environment and runtime support for coprocessors
@ -2093,4 +2096,54 @@ public class RegionCoprocessorHost
} }
return newCell; return newCell;
} }
public Message preEndpointInvocation(final Service service, final String methodName,
Message request) throws IOException {
ObserverContext<RegionCoprocessorEnvironment> ctx = null;
for (RegionEnvironment env : coprocessors) {
if (env.getInstance() instanceof EndpointObserver) {
ctx = ObserverContext.createAndPrepare(env, ctx);
Thread currentThread = Thread.currentThread();
ClassLoader cl = currentThread.getContextClassLoader();
try {
currentThread.setContextClassLoader(env.getClassLoader());
request = ((EndpointObserver) env.getInstance()).preEndpointInvocation(ctx, service,
methodName, request);
} catch (Throwable e) {
handleCoprocessorThrowable(env, e);
} finally {
currentThread.setContextClassLoader(cl);
}
if (ctx.shouldComplete()) {
break;
}
}
}
return request;
}
public void postEndpointInvocation(final Service service, final String methodName,
final Message request, final Message.Builder responseBuilder) throws IOException {
ObserverContext<RegionCoprocessorEnvironment> ctx = null;
for (RegionEnvironment env : coprocessors) {
if (env.getInstance() instanceof EndpointObserver) {
ctx = ObserverContext.createAndPrepare(env, ctx);
Thread currentThread = Thread.currentThread();
ClassLoader cl = currentThread.getContextClassLoader();
try {
currentThread.setContextClassLoader(env.getClassLoader());
((EndpointObserver) env.getInstance()).postEndpointInvocation(ctx, service,
methodName, request, responseBuilder);
} catch (Throwable e) {
handleCoprocessorThrowable(env, e);
} finally {
currentThread.setContextClassLoader(cl);
}
if (ctx.shouldComplete()) {
break;
}
}
}
}
} }

View File

@ -25,6 +25,7 @@ import java.util.Set;
import java.util.TreeMap; import java.util.TreeMap;
import java.util.TreeSet; import java.util.TreeSet;
import com.google.protobuf.Message;
import com.google.protobuf.RpcCallback; import com.google.protobuf.RpcCallback;
import com.google.protobuf.RpcController; import com.google.protobuf.RpcController;
import com.google.protobuf.Service; import com.google.protobuf.Service;
@ -128,13 +129,16 @@ import static org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.Acc
*/ */
public class AccessController extends BaseRegionObserver public class AccessController extends BaseRegionObserver
implements MasterObserver, RegionServerObserver, implements MasterObserver, RegionServerObserver,
AccessControlService.Interface, CoprocessorService { AccessControlService.Interface, CoprocessorService, EndpointObserver {
public static final Log LOG = LogFactory.getLog(AccessController.class); public static final Log LOG = LogFactory.getLog(AccessController.class);
private static final Log AUDITLOG = private static final Log AUDITLOG =
LogFactory.getLog("SecurityLogger."+AccessController.class.getName()); LogFactory.getLog("SecurityLogger."+AccessController.class.getName());
static final String EXEC_PERMISSION_CHECKS_KEY = "hbase.security.exec.permission.checks";
static final boolean DEFAULT_EXEC_PERMISSION_CHECKS = false;
TableAuthManager authManager = null; TableAuthManager authManager = null;
// flags if we are running on a region of the _acl_ table // flags if we are running on a region of the _acl_ table
@ -153,6 +157,9 @@ public class AccessController extends BaseRegionObserver
// flags if we are able to support cell ACLs // flags if we are able to support cell ACLs
boolean canPersistCellACLs; boolean canPersistCellACLs;
// flags if we should check EXEC permissions
boolean shouldCheckExecPermissions;
private volatile boolean initialized = false; private volatile boolean initialized = false;
public HRegion getRegion() { public HRegion getRegion() {
@ -176,6 +183,8 @@ public class AccessController extends BaseRegionObserver
byte[] serialized = AccessControlLists.writePermissionsAsBytes(perms, e.getConfiguration()); byte[] serialized = AccessControlLists.writePermissionsAsBytes(perms, e.getConfiguration());
this.authManager.getZKPermissionWatcher().writeToZookeeper(entry, serialized); this.authManager.getZKPermissionWatcher().writeToZookeeper(entry, serialized);
} }
shouldCheckExecPermissions = e.getConfiguration().getBoolean(EXEC_PERMISSION_CHECKS_KEY,
DEFAULT_EXEC_PERMISSION_CHECKS);
initialized = true; initialized = true;
} }
@ -1475,7 +1484,29 @@ public class AccessController extends BaseRegionObserver
} }
} }
/* ---- EndpointObserver implementation ---- */
@Override
public Message preEndpointInvocation(ObserverContext<RegionCoprocessorEnvironment> ctx,
Service service, String methodName, Message request) throws IOException {
// Don't intercept calls to our own AccessControlService, we check for
// appropriate permissions in the service handlers
if (shouldCheckExecPermissions && !(service instanceof AccessControlService)) {
requirePermission("invoke(" + service.getDescriptorForType().getName() + "." +
methodName + ")",
getTableName(ctx.getEnvironment()), null, null,
Action.EXEC);
}
return request;
}
@Override
public void postEndpointInvocation(ObserverContext<RegionCoprocessorEnvironment> ctx,
Service service, String methodName, Message request, Message.Builder responseBuilder)
throws IOException { }
/* ---- Protobuf AccessControlService implementation ---- */ /* ---- Protobuf AccessControlService implementation ---- */
@Override @Override
public void grant(RpcController controller, public void grant(RpcController controller,
AccessControlProtos.GrantRequest request, AccessControlProtos.GrantRequest request,

View File

@ -37,6 +37,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
@ -58,10 +59,22 @@ import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos.CountRequest;
import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos.CountResponse;
import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos.HelloRequest;
import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos.HelloResponse;
import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos.IncrementCountRequest;
import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos.IncrementCountResponse;
import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos.NoopRequest;
import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos.NoopResponse;
import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos.PingRequest;
import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos.PingResponse;
import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos.PingService;
import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContext;
@ -82,8 +95,10 @@ import org.apache.hadoop.hbase.security.access.Permission.Action;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.util.JVMClusterUtil;
import org.apache.hadoop.hbase.util.TestTableName; import org.apache.hadoop.hbase.util.TestTableName;
import org.apache.log4j.Level; import org.apache.log4j.Level;
import org.apache.log4j.Logger; import org.apache.log4j.Logger;
import org.junit.After; import org.junit.After;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.Before; import org.junit.Before;
@ -94,6 +109,9 @@ import org.junit.experimental.categories.Category;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
import com.google.protobuf.BlockingRpcChannel; import com.google.protobuf.BlockingRpcChannel;
import com.google.protobuf.RpcCallback;
import com.google.protobuf.RpcController;
import com.google.protobuf.Service;
import com.google.protobuf.ServiceException; import com.google.protobuf.ServiceException;
/** /**
@ -148,7 +166,10 @@ public class TestAccessController extends SecureTestUtil {
"org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner"); "org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner");
conf.set("hbase.master.logcleaner.plugins", conf.set("hbase.master.logcleaner.plugins",
"org.apache.hadoop.hbase.master.snapshot.SnapshotLogCleaner"); "org.apache.hadoop.hbase.master.snapshot.SnapshotLogCleaner");
// Enable security
SecureTestUtil.enableSecurity(conf); SecureTestUtil.enableSecurity(conf);
// Enable EXEC permission checking
conf.setBoolean(AccessController.EXEC_PERMISSION_CHECKS_KEY, true);
TEST_UTIL.startMiniCluster(); TEST_UTIL.startMiniCluster();
MasterCoprocessorHost cpHost = TEST_UTIL.getMiniHBaseCluster().getMaster().getCoprocessorHost(); MasterCoprocessorHost cpHost = TEST_UTIL.getMiniHBaseCluster().getMaster().getCoprocessorHost();
@ -2246,4 +2267,98 @@ public class TestAccessController extends SecureTestUtil {
verifyAllowed(getAction, USER_NONE); verifyAllowed(getAction, USER_NONE);
} }
public static class PingCoprocessor extends PingService implements Coprocessor,
CoprocessorService {
@Override
public void start(CoprocessorEnvironment env) throws IOException { }
@Override
public void stop(CoprocessorEnvironment env) throws IOException { }
@Override
public Service getService() {
return this;
}
@Override
public void ping(RpcController controller, PingRequest request,
RpcCallback<PingResponse> callback) {
callback.run(PingResponse.newBuilder().setPong("Pong!").build());
}
@Override
public void count(RpcController controller, CountRequest request,
RpcCallback<CountResponse> callback) {
callback.run(CountResponse.newBuilder().build());
}
@Override
public void increment(RpcController controller, IncrementCountRequest requet,
RpcCallback<IncrementCountResponse> callback) {
callback.run(IncrementCountResponse.newBuilder().build());
}
@Override
public void hello(RpcController controller, HelloRequest request,
RpcCallback<HelloResponse> callback) {
callback.run(HelloResponse.newBuilder().setResponse("Hello!").build());
}
@Override
public void noop(RpcController controller, NoopRequest request,
RpcCallback<NoopResponse> callback) {
callback.run(NoopResponse.newBuilder().build());
}
}
@Test
public void testCoprocessorExec() throws Exception {
// Set up our ping endpoint service on all regions of our test table
for (JVMClusterUtil.RegionServerThread thread:
TEST_UTIL.getMiniHBaseCluster().getRegionServerThreads()) {
HRegionServer rs = thread.getRegionServer();
for (HRegion region: rs.getOnlineRegions(TEST_TABLE.getTableName())) {
region.getCoprocessorHost().load(PingCoprocessor.class,
Coprocessor.PRIORITY_USER, conf);
}
}
// Create users for testing, and grant EXEC privileges on our test table
// only to user A
User userA = User.createUserForTesting(conf, "UserA", new String[0]);
User userB = User.createUserForTesting(conf, "UserB", new String[0]);
SecureTestUtil.grantOnTable(TEST_UTIL, userA.getShortName(),
TEST_TABLE.getTableName(), null, null,
Permission.Action.EXEC);
// Create an action for invoking our test endpoint
AccessTestAction execEndpointAction = new AccessTestAction() {
@Override
public Object run() throws Exception {
HTable t = new HTable(conf, TEST_TABLE.getTableName());
try {
BlockingRpcChannel service = t.coprocessorService(HConstants.EMPTY_BYTE_ARRAY);
PingCoprocessor.newBlockingStub(service).noop(null, NoopRequest.newBuilder().build());
} finally {
t.close();
}
return null;
}
};
// Verify that EXEC permission is checked correctly
verifyDenied(execEndpointAction, userB);
verifyAllowed(execEndpointAction, userA);
// Now grant EXEC to the entire namespace to user B
SecureTestUtil.grantOnNamespace(TEST_UTIL, userB.getShortName(),
TEST_TABLE.getTableName().getNamespaceAsString(),
Permission.Action.EXEC);
// User B should now be allowed also
verifyAllowed(execEndpointAction, userA, userB);
}
} }

View File

@ -38,13 +38,17 @@ import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService;
import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.GetAuthsResponse; import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.GetAuthsResponse;
import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse; import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse;
import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.security.access.AccessControlLists; import org.apache.hadoop.hbase.security.access.AccessControlLists;
import org.apache.hadoop.hbase.security.access.AccessController; import org.apache.hadoop.hbase.security.access.AccessController;
import org.apache.hadoop.hbase.security.access.Permission;
import org.apache.hadoop.hbase.security.access.SecureTestUtil; import org.apache.hadoop.hbase.security.access.SecureTestUtil;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.BeforeClass; import org.junit.BeforeClass;
import org.junit.Rule; import org.junit.Rule;
@ -52,6 +56,7 @@ import org.junit.Test;
import org.junit.experimental.categories.Category; import org.junit.experimental.categories.Category;
import org.junit.rules.TestName; import org.junit.rules.TestName;
import com.google.protobuf.BlockingRpcChannel;
import com.google.protobuf.ByteString; import com.google.protobuf.ByteString;
@Category(MediumTests.class) @Category(MediumTests.class)
@ -86,9 +91,24 @@ public class TestVisibilityLabelsWithACL {
TEST_UTIL.waitTableEnabled(AccessControlLists.ACL_TABLE_NAME.getName(), 50000); TEST_UTIL.waitTableEnabled(AccessControlLists.ACL_TABLE_NAME.getName(), 50000);
// Wait for the labels table to become available // Wait for the labels table to become available
TEST_UTIL.waitTableEnabled(LABELS_TABLE_NAME.getName(), 50000); TEST_UTIL.waitTableEnabled(LABELS_TABLE_NAME.getName(), 50000);
addLabels();
// Create users for testing
SUPERUSER = User.createUserForTesting(conf, "admin", new String[] { "supergroup" }); SUPERUSER = User.createUserForTesting(conf, "admin", new String[] { "supergroup" });
NORMAL_USER = User.createUserForTesting(conf, "user1", new String[] {}); NORMAL_USER = User.createUserForTesting(conf, "user1", new String[] {});
addLabels(); // Grant NORMAL_USER EXEC privilege on the labels table. For the purposes of this
// test, we want to insure that access is denied even with the ability to access
// the endpoint.
HTable acl = new HTable(conf, AccessControlLists.ACL_TABLE_NAME);
try {
BlockingRpcChannel service = acl.coprocessorService(LABELS_TABLE_NAME.getName());
AccessControlService.BlockingInterface protocol =
AccessControlService.newBlockingStub(service);
ProtobufUtil.grant(protocol, NORMAL_USER.getShortName(), LABELS_TABLE_NAME, null, null,
Permission.Action.EXEC);
} finally {
acl.close();
}
} }
@AfterClass @AfterClass