HBASE-16700 Allow for coprocessor whitelisting

Signed-off-by: Enis Soztutar <enis@apache.org>
This commit is contained in:
Clay Baenziger 2016-12-04 17:16:02 -05:00 committed by Enis Soztutar
parent 1f8d8bfa8b
commit c7b8b63cd1
3 changed files with 557 additions and 0 deletions

View File

@ -0,0 +1,201 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.security.access;
import java.io.IOException;
import java.net.URI;
import java.nio.file.PathMatcher;
import java.util.Collection;
import java.util.List;
import java.util.regex.Matcher;
import org.apache.commons.io.FilenameUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver;
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.util.Bytes;
/**
* Master observer for restricting coprocessor assignments.
*/
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class CoprocessorWhitelistMasterObserver extends BaseMasterObserver {
public static final String CP_COPROCESSOR_WHITELIST_PATHS_KEY =
"hbase.coprocessor.region.whitelist.paths";
private static final Log LOG = LogFactory
.getLog(CoprocessorWhitelistMasterObserver.class);
@Override
public void preModifyTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName, HTableDescriptor htd) throws IOException {
verifyCoprocessors(ctx, htd);
}
@Override
public void preCreateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
HTableDescriptor htd, HRegionInfo[] regions) throws IOException {
verifyCoprocessors(ctx, htd);
}
/**
* Validates a single whitelist path against the coprocessor path
* @param coprocPath the path to the coprocessor including scheme
* @param wlPath can be:
* 1) a "*" to wildcard all coprocessor paths
* 2) a specific filesystem (e.g. hdfs://my-cluster/)
* 3) a wildcard path to be evaluated by
* {@link FilenameUtils.wildcardMatch}
* path can specify scheme or not (e.g.
* "file:///usr/hbase/coprocessors" or for all
* filesystems "/usr/hbase/coprocessors")
* @return if the path was found under the wlPath
* @throws IOException if a failure occurs in getting the path file system
*/
private static boolean validatePath(Path coprocPath, Path wlPath,
Configuration conf) throws IOException {
// verify if all are allowed
if (wlPath.toString().equals("*")) {
return(true);
}
// verify we are on the same filesystem if wlPath has a scheme
if (!wlPath.isAbsoluteAndSchemeAuthorityNull()) {
String wlPathScheme = wlPath.toUri().getScheme();
String coprocPathScheme = coprocPath.toUri().getScheme();
String wlPathHost = wlPath.toUri().getHost();
String coprocPathHost = coprocPath.toUri().getHost();
if (wlPathScheme != null) {
wlPathScheme = wlPathScheme.toString().toLowerCase();
} else {
wlPathScheme = "";
}
if (wlPathHost != null) {
wlPathHost = wlPathHost.toString().toLowerCase();
} else {
wlPathHost = "";
}
if (coprocPathScheme != null) {
coprocPathScheme = coprocPathScheme.toString().toLowerCase();
} else {
coprocPathScheme = "";
}
if (coprocPathHost != null) {
coprocPathHost = coprocPathHost.toString().toLowerCase();
} else {
coprocPathHost = "";
}
if (!wlPathScheme.equals(coprocPathScheme) || !wlPathHost.equals(coprocPathHost)) {
return(false);
}
}
// allow any on this file-system (file systems were verified to be the same above)
if (wlPath.isRoot()) {
return(true);
}
// allow "loose" matches stripping scheme
if (FilenameUtils.wildcardMatch(
Path.getPathWithoutSchemeAndAuthority(coprocPath).toString(),
Path.getPathWithoutSchemeAndAuthority(wlPath).toString())) {
return(true);
}
return(false);
}
/**
* Perform the validation checks for a coprocessor to determine if the path
* is white listed or not.
* @throws IOException if path is not included in whitelist or a failure
* occurs in processing
* @param ctx as passed in from the coprocessor
* @param htd as passed in from the coprocessor
*/
private void verifyCoprocessors(ObserverContext<MasterCoprocessorEnvironment> ctx,
HTableDescriptor htd) throws IOException {
MasterServices services = ctx.getEnvironment().getMasterServices();
Configuration conf = services.getConfiguration();
Collection<String> paths =
conf.getStringCollection(
CP_COPROCESSOR_WHITELIST_PATHS_KEY);
List<String> coprocs = htd.getCoprocessors();
for (int i = 0; i < coprocs.size(); i++) {
String coproc = coprocs.get(i);
String coprocSpec = Bytes.toString(htd.getValue(
Bytes.toBytes("coprocessor$" + (i + 1))));
if (coprocSpec == null) {
continue;
}
// File path is the 1st field of the coprocessor spec
Matcher matcher =
HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(coprocSpec);
if (matcher == null || !matcher.matches()) {
continue;
}
String coprocPathStr = matcher.group(1).trim();
// Check if coprocessor is being loaded via the classpath (i.e. no file path)
if (coprocPathStr.equals("")) {
break;
}
Path coprocPath = new Path(coprocPathStr);
String coprocessorClass = matcher.group(2).trim();
boolean foundPathMatch = false;
for (String pathStr : paths) {
Path wlPath = new Path(pathStr);
try {
foundPathMatch = validatePath(coprocPath, wlPath, conf);
if (foundPathMatch == true) {
LOG.debug(String.format("Coprocessor %s found in directory %s",
coprocessorClass, pathStr));
break;
}
} catch (IOException e) {
LOG.warn(String.format("Failed to validate white list path %s for coprocessor path %s",
pathStr, coprocPathStr));
}
}
if (!foundPathMatch) {
throw new IOException(String.format("Loading %s DENIED in %s",
coprocessorClass, CP_COPROCESSOR_WHITELIST_PATHS_KEY));
}
}
}
}

View File

@ -0,0 +1,336 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.security.access;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotEnabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.SecurityTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.apache.hadoop.hbase.CategoryBasedTimeout;
import org.junit.rules.TestRule;
import org.junit.After;
import org.junit.ClassRule;
import org.junit.experimental.categories.Category;
import java.io.IOException;
/**
* Performs coprocessor loads for variuos paths and malformed strings
*/
@Category({SecurityTests.class, MediumTests.class})
public class TestCoprocessorWhitelistMasterObserver extends SecureTestUtil {
private static final Log LOG = LogFactory.getLog(TestCoprocessorWhitelistMasterObserver.class);
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
private static Configuration conf;
private static final TableName TEST_TABLE = TableName.valueOf("testTable");
private static final byte[] TEST_FAMILY = Bytes.toBytes("fam1");
@After
public void tearDownTestCoprocessorWhitelistMasterObserver() throws Exception {
Admin admin = UTIL.getHBaseAdmin();
try {
try {
admin.disableTable(TEST_TABLE);
} catch (TableNotEnabledException ex) {
// Table was left disabled by test
LOG.info("Table was left disabled by test");
}
admin.deleteTable(TEST_TABLE);
} catch (TableNotFoundException ex) {
// Table was not created for some reason?
LOG.info("Table was not created for some reason");
}
UTIL.shutdownMiniCluster();
}
@ClassRule
public static TestRule timeout =
CategoryBasedTimeout.forClass(TestCoprocessorWhitelistMasterObserver.class);
/**
* Test a table modification adding a coprocessor path
* which is not whitelisted
* @result An IOException should be thrown and caught
* to show coprocessor is working as desired
* @param whitelistedPaths A String array of paths to add in
* for the whitelisting configuration
* @param coprocessorPath A String to use as the
* path for a mock coprocessor
*/
private static void positiveTestCase(String[] whitelistedPaths,
String coprocessorPath) throws Exception {
Configuration conf = UTIL.getConfiguration();
// load coprocessor under test
conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
CoprocessorWhitelistMasterObserver.class.getName());
conf.setStrings(
CoprocessorWhitelistMasterObserver.CP_COPROCESSOR_WHITELIST_PATHS_KEY,
whitelistedPaths);
// set retries low to raise exception quickly
conf.setInt("hbase.client.retries.number", 1);
UTIL.startMiniCluster();
Table table = UTIL.createTable(TEST_TABLE,
new byte[][] { TEST_FAMILY });
UTIL.waitUntilAllRegionsAssigned(TEST_TABLE);
Connection connection = ConnectionFactory.createConnection(conf);
Table t = connection.getTable(TEST_TABLE);
HTableDescriptor htd = t.getTableDescriptor();
htd.addCoprocessor("net.clayb.hbase.coprocessor.NotWhitelisted",
new Path(coprocessorPath),
Coprocessor.PRIORITY_USER, null);
LOG.info("Modifying Table");
try {
connection.getAdmin().modifyTable(TEST_TABLE, htd);
fail("Expected coprocessor to raise IOException");
} catch (IOException e) {
// swallow exception from coprocessor
}
LOG.info("Done Modifying Table");
assertEquals(0, t.getTableDescriptor().getCoprocessors().size());
}
/**
* Test a table modification adding a coprocessor path
* which is whitelisted
* @result The coprocessor should be added to the table
* descriptor successfully
* @param whitelistedPaths A String array of paths to add in
* for the whitelisting configuration
* @param coprocessorPath A String to use as the
* path for a mock coprocessor
*/
private static void negativeTestCase(String[] whitelistedPaths,
String coprocessorPath) throws Exception {
Configuration conf = UTIL.getConfiguration();
// load coprocessor under test
conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
CoprocessorWhitelistMasterObserver.class.getName());
// set retries low to raise exception quickly
conf.setInt("hbase.client.retries.number", 1);
// set a coprocessor whitelist path for test
conf.setStrings(
CoprocessorWhitelistMasterObserver.CP_COPROCESSOR_WHITELIST_PATHS_KEY,
whitelistedPaths);
UTIL.startMiniCluster();
Table table = UTIL.createTable(TEST_TABLE,
new byte[][] { TEST_FAMILY });
UTIL.waitUntilAllRegionsAssigned(TEST_TABLE);
Connection connection = ConnectionFactory.createConnection(conf);
Admin admin = connection.getAdmin();
// disable table so we do not actually try loading non-existant
// coprocessor file
admin.disableTable(TEST_TABLE);
Table t = connection.getTable(TEST_TABLE);
HTableDescriptor htd = t.getTableDescriptor();
htd.addCoprocessor("net.clayb.hbase.coprocessor.Whitelisted",
new Path(coprocessorPath),
Coprocessor.PRIORITY_USER, null);
LOG.info("Modifying Table");
admin.modifyTable(TEST_TABLE, htd);
assertEquals(1, t.getTableDescriptor().getCoprocessors().size());
LOG.info("Done Modifying Table");
}
/**
* Test a table modification adding a coprocessor path
* which is not whitelisted
* @result An IOException should be thrown and caught
* to show coprocessor is working as desired
*/
@Test
@Category(MediumTests.class)
public void testSubstringNonWhitelisted() throws Exception {
positiveTestCase(new String[]{"/permitted/*"},
"file:///notpermitted/couldnotpossiblyexist.jar");
}
/**
* Test a table creation including a coprocessor path
* which is not whitelisted
* @result Coprocessor should be added to table descriptor
* Table is disabled to avoid an IOException due to
* the added coprocessor not actually existing on disk
*/
@Test
@Category(MediumTests.class)
public void testDifferentFileSystemNonWhitelisted() throws Exception {
positiveTestCase(new String[]{"hdfs://foo/bar"},
"file:///notpermitted/couldnotpossiblyexist.jar");
}
/**
* Test a table modification adding a coprocessor path
* which is whitelisted
* @result Coprocessor should be added to table descriptor
* Table is disabled to avoid an IOException due to
* the added coprocessor not actually existing on disk
*/
@Test
@Category(MediumTests.class)
public void testSchemeAndDirectorywhitelisted() throws Exception {
negativeTestCase(new String[]{"/tmp","file:///permitted/*"},
"file:///permitted/couldnotpossiblyexist.jar");
}
/**
* Test a table modification adding a coprocessor path
* which is whitelisted
* @result Coprocessor should be added to table descriptor
* Table is disabled to avoid an IOException due to
* the added coprocessor not actually existing on disk
*/
@Test
@Category(MediumTests.class)
public void testSchemeWhitelisted() throws Exception {
negativeTestCase(new String[]{"file:///"},
"file:///permitted/couldnotpossiblyexist.jar");
}
/**
* Test a table modification adding a coprocessor path
* which is whitelisted
* @result Coprocessor should be added to table descriptor
* Table is disabled to avoid an IOException due to
* the added coprocessor not actually existing on disk
*/
@Test
@Category(MediumTests.class)
public void testDFSNameWhitelistedWorks() throws Exception {
negativeTestCase(new String[]{"hdfs://Your-FileSystem"},
"hdfs://Your-FileSystem/permitted/couldnotpossiblyexist.jar");
}
/**
* Test a table modification adding a coprocessor path
* which is whitelisted
* @result Coprocessor should be added to table descriptor
* Table is disabled to avoid an IOException due to
* the added coprocessor not actually existing on disk
*/
@Test
@Category(MediumTests.class)
public void testDFSNameNotWhitelistedFails() throws Exception {
positiveTestCase(new String[]{"hdfs://Your-FileSystem"},
"hdfs://My-FileSystem/permitted/couldnotpossiblyexist.jar");
}
/**
* Test a table modification adding a coprocessor path
* which is whitelisted
* @result Coprocessor should be added to table descriptor
* Table is disabled to avoid an IOException due to
* the added coprocessor not actually existing on disk
*/
@Test
@Category(MediumTests.class)
public void testBlanketWhitelist() throws Exception {
negativeTestCase(new String[]{"*"},
"hdfs:///permitted/couldnotpossiblyexist.jar");
}
/**
* Test a table creation including a coprocessor path
* which is not whitelisted
* @result Table will not be created due to the offending coprocessor
*/
@Test
@Category(MediumTests.class)
public void testCreationNonWhitelistedCoprocessorPath() throws Exception {
Configuration conf = UTIL.getConfiguration();
// load coprocessor under test
conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
CoprocessorWhitelistMasterObserver.class.getName());
conf.setStrings(CoprocessorWhitelistMasterObserver.CP_COPROCESSOR_WHITELIST_PATHS_KEY,
new String[]{});
// set retries low to raise exception quickly
conf.setInt("hbase.client.retries.number", 1);
UTIL.startMiniCluster();
HTableDescriptor htd = new HTableDescriptor(TEST_TABLE);
HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY);
htd.addFamily(hcd);
htd.addCoprocessor("net.clayb.hbase.coprocessor.NotWhitelisted",
new Path("file:///notpermitted/couldnotpossiblyexist.jar"),
Coprocessor.PRIORITY_USER, null);
Connection connection = ConnectionFactory.createConnection(conf);
Admin admin = connection.getAdmin();
LOG.info("Creating Table");
try {
admin.createTable(htd);
fail("Expected coprocessor to raise IOException");
} catch (IOException e) {
// swallow exception from coprocessor
}
LOG.info("Done Creating Table");
// ensure table was not created
assertEquals(new HTableDescriptor[0],
admin.listTables("^" + TEST_TABLE.getNameAsString() + "$"));
}
/**
* Test a table creation including a coprocessor path
* which is on the classpath
* @result Table will be created with the coprocessor
*/
@Test
@Category(MediumTests.class)
public void testCreationClasspathCoprocessor() throws Exception {
Configuration conf = UTIL.getConfiguration();
// load coprocessor under test
conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
CoprocessorWhitelistMasterObserver.class.getName());
conf.setStrings(CoprocessorWhitelistMasterObserver.CP_COPROCESSOR_WHITELIST_PATHS_KEY,
new String[]{});
// set retries low to raise exception quickly
conf.setInt("hbase.client.retries.number", 1);
UTIL.startMiniCluster();
HTableDescriptor htd = new HTableDescriptor(TEST_TABLE);
HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY);
htd.addFamily(hcd);
htd.addCoprocessor("org.apache.hadoop.hbase.coprocessor.BaseRegionObserver");
Connection connection = ConnectionFactory.createConnection(conf);
Admin admin = connection.getAdmin();
LOG.info("Creating Table");
admin.createTable(htd);
// ensure table was created and coprocessor is added to table
LOG.info("Done Creating Table");
Table t = connection.getTable(TEST_TABLE);
assertEquals(1, t.getTableDescriptor().getCoprocessors().size());
}
}

View File

@ -818,3 +818,23 @@ The metrics sampling rate as described in <<hbase_metrics>>.
.Coprocessor Metrics UI .Coprocessor Metrics UI
image::coprocessor_stats.png[] image::coprocessor_stats.png[]
== Restricting Coprocessor Usage
Restricting arbitrary user coprocessors can be a big concern in multitenant environments. HBase provides a continuum of options for ensuring only expected coprocessors are running:
- `hbase.coprocessor.enabled`: Enables or disables all coprocessors. This will limit the functionality of HBase, as disabling all coprocessors will disable some security providers. An example coproccessor so affected is `org.apache.hadoop.hbase.security.access.AccessController`.
* `hbase.coprocessor.user.enabled`: Enables or disables loading coprocessors on tables (i.e. user coprocessors).
* One can statically load coprocessors via the following tunables in `hbase-site.xml`:
** `hbase.coprocessor.regionserver.classes`: A comma-separated list of coprocessors that are loaded by region servers
** `hbase.coprocessor.region.classes`: A comma-separated list of RegionObserver and Endpoint coprocessors
** `hbase.coprocessor.user.region.classes`: A comma-separated list of coprocessors that are loaded by all regions
** `hbase.coprocessor.master.classes`: A comma-separated list of coprocessors that are loaded by the master (MasterObserver coprocessors)
** `hbase.coprocessor.wal.classes`: A comma-separated list of WALObserver coprocessors to load
* `hbase.coprocessor.abortonerror`: Whether to abort the daemon which has loaded the coprocessor if the coprocessor should error other than `IOError`. If this is set to false and an access controller coprocessor should have a fatal error the coprocessor will be circumvented, as such in secure installations this is advised to be `true`; however, one may override this on a per-table basis for user coprocessors, to ensure they do not abort their running region server and are instead unloaded on error.
* `hbase.coprocessor.region.whitelist.paths`: A comma separated list available for those loading `org.apache.hadoop.hbase.security.access.CoprocessorWhitelistMasterObserver` whereby one can use the following options to white-list paths from which coprocessors may be loaded.
** Coprocessors on the classpath are implicitly white-listed
** `*` to wildcard all coprocessor paths
** An entire filesystem (e.g. `hdfs://my-cluster/`)
** A wildcard path to be evaluated by link:https://commons.apache.org/proper/commons-io/javadocs/api-release/org/apache/commons/io/FilenameUtils.html[FilenameUtils.wildcardMatch]
** Note: Path can specify scheme or not (e.g. `file:///usr/hbase/lib/coprocessors` or for all filesystems `/usr/hbase/lib/coprocessors`)