From 33dbfd2053e94c5ceeef74bdf0465e3bc9304e7e Mon Sep 17 00:00:00 2001 From: Andrew Kyle Purtell Date: Wed, 25 Dec 2013 02:00:21 +0000 Subject: [PATCH] HBASE-6104. Require EXEC permission to call coprocessor endpoints git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1553344 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop/hbase/regionserver/HRegion.java | 9 ++ .../regionserver/RegionCoprocessorHost.java | 43 ++++++ .../security/access/AccessController.java | 25 +++- .../security/access/TestAccessController.java | 130 ++++++++++++++++++ .../TestVisibilityLabelsWithACL.java | 22 ++- 5 files changed, 227 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 6b70cdf1b93..686a99239e5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -5398,6 +5398,11 @@ public class HRegion implements HeapSize { // , Writable{ Message request = service.getRequestPrototype(methodDesc).newBuilderForType() .mergeFrom(call.getRequest()).build(); + + if (coprocessorHost != null) { + request = coprocessorHost.preEndpointInvocation(service, methodName, request); + } + final Message.Builder responseBuilder = service.getResponsePrototype(methodDesc).newBuilderForType(); service.callMethod(methodDesc, controller, request, new RpcCallback() { @@ -5409,6 +5414,10 @@ public class HRegion implements HeapSize { // , Writable{ } }); + if (coprocessorHost != null) { + coprocessorHost.postEndpointInvocation(service, methodName, request, responseBuilder); + } + return responseBuilder.build(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java index 810ff792c19..788474cba5e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java @@ -52,6 +52,7 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.CoprocessorService; +import org.apache.hadoop.hbase.coprocessor.EndpointObserver; import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; @@ -71,6 +72,8 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.util.StringUtils; import com.google.common.collect.ImmutableList; +import com.google.protobuf.Message; +import com.google.protobuf.Service; /** * Implements the coprocessor environment and runtime support for coprocessors @@ -1802,4 +1805,44 @@ public class RegionCoprocessorHost } return newCell; } + + public Message preEndpointInvocation(Service service, String methodName, Message request) + throws IOException { + ObserverContext ctx = null; + for (RegionEnvironment env : coprocessors) { + if (env.getInstance() instanceof EndpointObserver) { + ctx = ObserverContext.createAndPrepare(env, ctx); + try { + request = ((EndpointObserver) env.getInstance()).preEndpointInvocation(ctx, service, + methodName, request); + } catch (Throwable e) { + handleCoprocessorThrowable(env, e); + } + if (ctx.shouldComplete()) { + break; + } + } + } + return request; + } + + public void postEndpointInvocation(Service service, String methodName, Message request, + Message.Builder responseBuilder) throws IOException { + ObserverContext ctx = null; + for (RegionEnvironment env : coprocessors) { + if (env.getInstance() instanceof EndpointObserver) { + ctx = ObserverContext.createAndPrepare(env, ctx); + try { + ((EndpointObserver) env.getInstance()).postEndpointInvocation(ctx, service, methodName, + request, responseBuilder); + } catch (Throwable e) { + handleCoprocessorThrowable(env, e); + } + if (ctx.shouldComplete()) { + break; + } + } + } + } + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index ef88100852f..2a42bce83f5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -25,6 +25,7 @@ import java.util.Set; import java.util.TreeMap; import java.util.TreeSet; +import com.google.protobuf.Message; import com.google.protobuf.RpcCallback; import com.google.protobuf.RpcController; import com.google.protobuf.Service; @@ -128,7 +129,7 @@ import static org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.Acc */ public class AccessController extends BaseRegionObserver implements MasterObserver, RegionServerObserver, - AccessControlService.Interface, CoprocessorService { + AccessControlService.Interface, CoprocessorService, EndpointObserver { public static final Log LOG = LogFactory.getLog(AccessController.class); @@ -1467,7 +1468,29 @@ public class AccessController extends BaseRegionObserver } } + /* ---- EndpointObserver implementation ---- */ + + @Override + public Message preEndpointInvocation(ObserverContext ctx, + Service service, String methodName, Message request) throws IOException { + // Don't intercept calls to our own AccessControlService, we check for + // appropriate permissions in the service handlers + if (!(service instanceof AccessControlService)) { + requirePermission("invoke(" + service.getDescriptorForType().getName() + "." + + methodName + ")", + getTableName(ctx.getEnvironment()), null, null, + Action.EXEC); + } + return request; + } + + @Override + public void postEndpointInvocation(ObserverContext ctx, + Service service, String methodName, Message request, Message.Builder responseBuilder) + throws IOException { } + /* ---- Protobuf AccessControlService implementation ---- */ + @Override public void grant(RpcController controller, AccessControlProtos.GrantRequest request, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java index 025f37bb40b..a5f66fc75a3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java @@ -40,6 +40,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Coprocessor; +import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; @@ -62,10 +63,22 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.coprocessor.CoprocessorService; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment; +import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos.CountRequest; +import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos.CountResponse; +import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos.HelloRequest; +import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos.HelloResponse; +import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos.IncrementCountRequest; +import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos.IncrementCountResponse; +import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos.NoopRequest; +import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos.NoopResponse; +import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos.PingRequest; +import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos.PingResponse; +import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos.PingService; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileContext; @@ -88,8 +101,10 @@ import org.apache.hadoop.hbase.security.access.Permission.Action; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.util.TestTableName; + import org.apache.log4j.Level; import org.apache.log4j.Logger; + import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -100,6 +115,9 @@ import org.junit.experimental.categories.Category; import com.google.common.collect.Lists; import com.google.protobuf.BlockingRpcChannel; +import com.google.protobuf.RpcCallback; +import com.google.protobuf.RpcController; +import com.google.protobuf.Service; import com.google.protobuf.ServiceException; /** @@ -2493,4 +2511,116 @@ public class TestAccessController extends SecureTestUtil { verifyAllowed(getAction, USER_NONE); } + public static class PingCoprocessor extends PingService implements Coprocessor, + CoprocessorService { + + @Override + public void start(CoprocessorEnvironment env) throws IOException { } + + @Override + public void stop(CoprocessorEnvironment env) throws IOException { } + + @Override + public Service getService() { + return this; + } + + @Override + public void ping(RpcController controller, PingRequest request, + RpcCallback callback) { + callback.run(PingResponse.newBuilder().setPong("Pong!").build()); + } + + @Override + public void count(RpcController controller, CountRequest request, + RpcCallback callback) { + callback.run(CountResponse.newBuilder().build()); + } + + @Override + public void increment(RpcController controller, IncrementCountRequest requet, + RpcCallback callback) { + callback.run(IncrementCountResponse.newBuilder().build()); + } + + @Override + public void hello(RpcController controller, HelloRequest request, + RpcCallback callback) { + callback.run(HelloResponse.newBuilder().setResponse("Hello!").build()); + } + + @Override + public void noop(RpcController controller, NoopRequest request, + RpcCallback callback) { + callback.run(NoopResponse.newBuilder().build()); + } + } + + @Test + public void testCoprocessorExec() throws Exception { + // Set up our ping endpoint service on all regions of our test table + for (JVMClusterUtil.RegionServerThread thread: + TEST_UTIL.getMiniHBaseCluster().getRegionServerThreads()) { + HRegionServer rs = thread.getRegionServer(); + for (HRegion region: rs.getOnlineRegions(TEST_TABLE.getTableName())) { + region.getCoprocessorHost().load(PingCoprocessor.class, + Coprocessor.PRIORITY_USER, conf); + } + } + + // Create users for testing, and grant EXEC privileges on our test table + // only to user A + User userA = User.createUserForTesting(conf, "UserA", new String[0]); + User userB = User.createUserForTesting(conf, "UserB", new String[0]); + HTable acl = new HTable(conf, AccessControlLists.ACL_TABLE_NAME); + try { + BlockingRpcChannel service = acl.coprocessorService(HConstants.EMPTY_BYTE_ARRAY); + AccessControlService.BlockingInterface protocol = + AccessControlService.newBlockingStub(service); + AccessControlProtos.GrantRequest request = RequestConverter. + buildGrantRequest(userA.getShortName(), TEST_TABLE.getTableName(), null, null, + AccessControlProtos.Permission.Action.EXEC); + protocol.grant(null, request); + } finally { + acl.close(); + } + + // Create an action for invoking our test endpoint + AccessTestAction execEndpointAction = new AccessTestAction() { + @Override + public Object run() throws Exception { + HTable t = new HTable(conf, TEST_TABLE.getTableName()); + try { + BlockingRpcChannel service = t.coprocessorService(HConstants.EMPTY_BYTE_ARRAY); + PingCoprocessor.newBlockingStub(service).noop(null, NoopRequest.newBuilder().build()); + } finally { + t.close(); + } + return null; + } + }; + + // Verify that EXEC permission is checked correctly + verifyDenied(execEndpointAction, userB); + verifyAllowed(execEndpointAction, userA); + + // Now grant EXEC to the entire namespace to user B + acl = new HTable(conf, AccessControlLists.ACL_TABLE_NAME); + try { + BlockingRpcChannel service = acl.coprocessorService(HConstants.EMPTY_BYTE_ARRAY); + AccessControlService.BlockingInterface protocol = + AccessControlService.newBlockingStub(service); + AccessControlProtos.GrantRequest request = RequestConverter. + buildGrantRequest(userB.getShortName(), + TEST_TABLE.getTableName().getNamespaceAsString(), + AccessControlProtos.Permission.Action.EXEC); + protocol.grant(null, request); + } finally { + acl.close(); + } + + // User B should now be allowed also + verifyAllowed(execEndpointAction, userA, userB); + } + } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithACL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithACL.java index 0502669e904..24c5210b904 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithACL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithACL.java @@ -38,13 +38,17 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService; import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.GetAuthsResponse; import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.access.AccessControlLists; import org.apache.hadoop.hbase.security.access.AccessController; +import org.apache.hadoop.hbase.security.access.Permission; import org.apache.hadoop.hbase.security.access.SecureTestUtil; import org.apache.hadoop.hbase.util.Bytes; + import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Rule; @@ -52,6 +56,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; +import com.google.protobuf.BlockingRpcChannel; import com.google.protobuf.ByteString; @Category(MediumTests.class) @@ -86,9 +91,24 @@ public class TestVisibilityLabelsWithACL { TEST_UTIL.waitTableEnabled(AccessControlLists.ACL_TABLE_NAME.getName(), 50000); // Wait for the labels table to become available TEST_UTIL.waitTableEnabled(LABELS_TABLE_NAME.getName(), 50000); + addLabels(); + + // Create users for testing SUPERUSER = User.createUserForTesting(conf, "admin", new String[] { "supergroup" }); NORMAL_USER = User.createUserForTesting(conf, "user1", new String[] {}); - addLabels(); + // Grant NORMAL_USER EXEC privilege on the labels table. For the purposes of this + // test, we want to insure that access is denied even with the ability to access + // the endpoint. + HTable acl = new HTable(conf, AccessControlLists.ACL_TABLE_NAME); + try { + BlockingRpcChannel service = acl.coprocessorService(LABELS_TABLE_NAME.getName()); + AccessControlService.BlockingInterface protocol = + AccessControlService.newBlockingStub(service); + ProtobufUtil.grant(protocol, NORMAL_USER.getShortName(), LABELS_TABLE_NAME, null, null, + Permission.Action.EXEC); + } finally { + acl.close(); + } } @AfterClass