HBASE-22776 Rename config names in user scan snapshot feature (#440)

This commit is contained in:
meiyi 2019-08-08 16:01:02 +08:00 committed by GitHub
parent 547cec4078
commit 0e5dc6d7ce
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 630 additions and 336 deletions

View File

@ -71,6 +71,8 @@ import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.security.AccessDeniedException;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.security.access.AccessChecker;
import org.apache.hadoop.hbase.security.access.SnapshotScannerHDFSAclCleaner;
import org.apache.hadoop.hbase.security.access.SnapshotScannerHDFSAclHelper;
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
@ -1123,6 +1125,10 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
// Inject snapshot cleaners, if snapshot.enable is true
hfileCleaners.add(SnapshotHFileCleaner.class.getName());
hfileCleaners.add(HFileLinkCleaner.class.getName());
// If sync acl to HDFS feature is enabled, then inject the cleaner
if (SnapshotScannerHDFSAclHelper.isAclSyncToHdfsEnabled(conf)) {
hfileCleaners.add(SnapshotScannerHDFSAclCleaner.class.getName());
}
// Set cleaners conf
conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,

View File

@ -27,7 +27,6 @@ import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate;
import org.apache.yetus.audience.InterfaceAudience;
@ -59,7 +58,7 @@ public class SnapshotScannerHDFSAclCleaner extends BaseHFileCleanerDelegate {
@Override
public void setConf(Configuration conf) {
super.setConf(conf);
userScanSnapshotEnabled = isUserScanSnapshotEnabled(conf);
userScanSnapshotEnabled = SnapshotScannerHDFSAclHelper.isAclSyncToHdfsEnabled(conf);
}
@Override
@ -82,13 +81,6 @@ public class SnapshotScannerHDFSAclCleaner extends BaseHFileCleanerDelegate {
return true;
}
private boolean isUserScanSnapshotEnabled(Configuration conf) {
String masterCoprocessors = conf.get(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY);
return conf.getBoolean(SnapshotScannerHDFSAclHelper.USER_SCAN_SNAPSHOT_ENABLE, false)
&& masterCoprocessors.contains(SnapshotScannerHDFSAclController.class.getName())
&& masterCoprocessors.contains(AccessController.class.getName());
}
private boolean isEmptyArchiveDirDeletable(Path dir) {
try {
if (isArchiveDataDir(dir)) {

View File

@ -119,7 +119,7 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast
public void preMasterInitialization(ObserverContext<MasterCoprocessorEnvironment> c)
throws IOException {
if (c.getEnvironment().getConfiguration()
.getBoolean(SnapshotScannerHDFSAclHelper.USER_SCAN_SNAPSHOT_ENABLE, false)) {
.getBoolean(SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE, false)) {
MasterCoprocessorEnvironment mEnv = c.getEnvironment();
if (!(mEnv instanceof HasMasterServices)) {
throw new IOException("Does not implement HMasterServices");
@ -133,7 +133,7 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast
userProvider = UserProvider.instantiate(c.getEnvironment().getConfiguration());
} else {
LOG.warn("Try to initialize the coprocessor SnapshotScannerHDFSAclController but failure "
+ "because the config " + SnapshotScannerHDFSAclHelper.USER_SCAN_SNAPSHOT_ENABLE
+ "because the config " + SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE
+ " is false.");
}
}
@ -213,7 +213,9 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast
public void postCompletedTruncateTableAction(ObserverContext<MasterCoprocessorEnvironment> c,
TableName tableName) throws IOException {
if (needHandleTableHdfsAcl(tableName, "truncateTable " + tableName)) {
// Since the table directories is recreated, so add HDFS acls again
// 1. create tmp table directories
hdfsAclHelper.createTableDirectories(tableName);
// 2. Since the table directories is recreated, so add HDFS acls again
Set<String> users = hdfsAclHelper.getUsersWithTableReadAction(tableName, false, false);
hdfsAclHelper.addTableAcl(tableName, users, "truncate");
}
@ -233,9 +235,11 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast
try (Table aclTable =
ctx.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) {
Set<String> users = SnapshotScannerHDFSAclStorage.getTableUsers(aclTable, tableName);
// 1. Delete table owner permission is synced to HDFS in acl table
// 1. Remove table archive directory default ACLs
hdfsAclHelper.removeTableDefaultAcl(tableName, users);
// 2. Delete table owner permission is synced to HDFS in acl table
SnapshotScannerHDFSAclStorage.deleteTableHdfsAcl(aclTable, tableName);
// 2. Remove namespace access acls
// 3. Remove namespace access acls
Set<String> removeUsers = filterUsersToRemoveNsAccessAcl(aclTable, tableName, users);
if (removeUsers.size() > 0) {
hdfsAclHelper.removeNamespaceAccessAcl(tableName, removeUsers, "delete");
@ -251,7 +255,7 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast
try (Table aclTable =
ctx.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) {
if (needHandleTableHdfsAcl(currentDescriptor, "modifyTable " + tableName)
&& !hdfsAclHelper.isTableUserScanSnapshotEnabled(oldDescriptor)) {
&& !hdfsAclHelper.isAclSyncToHdfsEnabled(oldDescriptor)) {
// 1. Create table directories used for acl inherited
hdfsAclHelper.createTableDirectories(tableName);
// 2. Add table users HDFS acls
@ -264,7 +268,7 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast
SnapshotScannerHDFSAclStorage.addUserTableHdfsAcl(ctx.getEnvironment().getConnection(),
tableUsers, tableName);
} else if (needHandleTableHdfsAcl(oldDescriptor, "modifyTable " + tableName)
&& !hdfsAclHelper.isTableUserScanSnapshotEnabled(currentDescriptor)) {
&& !hdfsAclHelper.isAclSyncToHdfsEnabled(currentDescriptor)) {
// 1. Remove empty table directories
List<Path> tableRootPaths = hdfsAclHelper.getTableRootPaths(tableName, false);
for (Path path : tableRootPaths) {
@ -290,10 +294,16 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast
public void postDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
String namespace) throws IOException {
if (checkInitialized("deleteNamespace " + namespace)) {
// 1. Record namespace user acl is not synced to HDFS
try (Table aclTable =
ctx.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) {
// 1. Delete namespace archive dir default ACLs
Set<String> users = SnapshotScannerHDFSAclStorage.getEntryUsers(aclTable,
PermissionStorage.toNamespaceEntry(Bytes.toBytes(namespace)));
hdfsAclHelper.removeNamespaceDefaultAcl(namespace, users);
// 2. Record namespace user acl is not synced to HDFS
SnapshotScannerHDFSAclStorage.deleteNamespaceHdfsAcl(ctx.getEnvironment().getConnection(),
namespace);
// 2. Delete tmp namespace directory
// 3. Delete tmp namespace directory
/**
* Delete namespace tmp directory because it's created by this coprocessor when namespace is
* created to make namespace default acl can be inherited by tables. The namespace data
@ -303,6 +313,7 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast
hdfsAclHelper.deleteEmptyDir(pathHelper.getTmpNsDir(namespace));
}
}
}
@Override
public void postGrant(ObserverContext<MasterCoprocessorEnvironment> c,
@ -364,7 +375,9 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast
UserPermission tPerm = getUserTablePermission(conf, userName, tableName);
if (tPerm != null && hdfsAclHelper.containReadAction(tPerm)) {
if (!isHdfsAclSet(aclTable, userName, tableName)) {
// 1. Add HDFS acl
// 1. create table dirs
hdfsAclHelper.createTableDirectories(tableName);
// 2. Add HDFS acl
hdfsAclHelper.grantAcl(userPermission, new HashSet<>(0), new HashSet<>(0));
}
// 2. Record table acl is synced to HDFS
@ -547,13 +560,13 @@ public class SnapshotScannerHDFSAclController implements MasterCoprocessor, Mast
private boolean needHandleTableHdfsAcl(TableName tableName, String operation) throws IOException {
return !tableName.isSystemTable() && checkInitialized(operation) && hdfsAclHelper
.isTableUserScanSnapshotEnabled(masterServices.getTableDescriptors().get(tableName));
.isAclSyncToHdfsEnabled(masterServices.getTableDescriptors().get(tableName));
}
private boolean needHandleTableHdfsAcl(TableDescriptor tableDescriptor, String operation) {
TableName tableName = tableDescriptor.getTableName();
return !tableName.isSystemTable() && checkInitialized(operation)
&& hdfsAclHelper.isTableUserScanSnapshotEnabled(tableDescriptor);
&& hdfsAclHelper.isAclSyncToHdfsEnabled(tableDescriptor);
}
private User getActiveUser(ObserverContext<?> ctx) throws IOException {

View File

@ -28,6 +28,7 @@ import java.io.Closeable;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
@ -53,6 +54,7 @@ import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.SnapshotDescription;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.yetus.audience.InterfaceAudience;
@ -71,23 +73,23 @@ import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFacto
public class SnapshotScannerHDFSAclHelper implements Closeable {
private static final Logger LOG = LoggerFactory.getLogger(SnapshotScannerHDFSAclHelper.class);
public static final String USER_SCAN_SNAPSHOT_ENABLE = "hbase.user.scan.snapshot.enable";
public static final String USER_SCAN_SNAPSHOT_THREAD_NUMBER =
"hbase.user.scan.snapshot.thread.number";
public static final String ACL_SYNC_TO_HDFS_ENABLE = "hbase.acl.sync.to.hdfs.enable";
public static final String ACL_SYNC_TO_HDFS_THREAD_NUMBER =
"hbase.acl.sync.to.hdfs.thread.number";
// The tmp directory to restore snapshot, it can not be a sub directory of HBase root dir
public static final String SNAPSHOT_RESTORE_TMP_DIR = "hbase.snapshot.restore.tmp.dir";
public static final String SNAPSHOT_RESTORE_TMP_DIR_DEFAULT =
"/hbase/.tmpdir-to-restore-snapshot";
// The default permission of the common directories if the feature is enabled.
public static final String COMMON_DIRECTORY_PERMISSION =
"hbase.user.scan.snapshot.common.directory.permission";
"hbase.acl.sync.to.hdfs.common.directory.permission";
// The secure HBase permission is 700, 751 means all others have execute access and the mask is
// set to read-execute to make the extended access ACL entries can work. Be cautious to set
// this value.
public static final String COMMON_DIRECTORY_PERMISSION_DEFAULT = "751";
// The default permission of the snapshot restore directories if the feature is enabled.
public static final String SNAPSHOT_RESTORE_DIRECTORY_PERMISSION =
"hbase.user.scan.snapshot.restore.directory.permission";
"hbase.acl.sync.to.hdfs.restore.directory.permission";
// 753 means all others have write-execute access.
public static final String SNAPSHOT_RESTORE_DIRECTORY_PERMISSION_DEFAULT = "753";
@ -102,7 +104,7 @@ public class SnapshotScannerHDFSAclHelper implements Closeable {
this.conf = configuration;
this.pathHelper = new PathHelper(conf);
this.fs = pathHelper.getFileSystem();
this.pool = Executors.newFixedThreadPool(conf.getInt(USER_SCAN_SNAPSHOT_THREAD_NUMBER, 10),
this.pool = Executors.newFixedThreadPool(conf.getInt(ACL_SYNC_TO_HDFS_THREAD_NUMBER, 10),
new ThreadFactoryBuilder().setNameFormat("hdfs-acl-thread-%d").setDaemon(true).build());
this.admin = connection.getAdmin();
}
@ -230,6 +232,50 @@ public class SnapshotScannerHDFSAclHelper implements Closeable {
}
}
/**
* Remove default acl from namespace archive dir when delete namespace
* @param namespace the namespace
* @param removeUsers the users whose default acl will be removed
* @return false if an error occurred, otherwise true
*/
public boolean removeNamespaceDefaultAcl(String namespace, Set<String> removeUsers) {
try {
long start = System.currentTimeMillis();
Path archiveNsDir = pathHelper.getArchiveNsDir(namespace);
HDFSAclOperation operation = new HDFSAclOperation(fs, archiveNsDir, removeUsers,
HDFSAclOperation.OperationType.REMOVE, false, HDFSAclOperation.AclType.DEFAULT);
operation.handleAcl();
LOG.info("Remove HDFS acl when delete namespace {}, cost {} ms", namespace,
System.currentTimeMillis() - start);
return true;
} catch (Exception e) {
LOG.error("Remove HDFS acl error when delete namespace {}", namespace, e);
return false;
}
}
/**
* Remove default acl from table archive dir when delete table
* @param tableName the table name
* @param removeUsers the users whose default acl will be removed
* @return false if an error occurred, otherwise true
*/
public boolean removeTableDefaultAcl(TableName tableName, Set<String> removeUsers) {
try {
long start = System.currentTimeMillis();
Path archiveTableDir = pathHelper.getArchiveTableDir(tableName);
HDFSAclOperation operation = new HDFSAclOperation(fs, archiveTableDir, removeUsers,
HDFSAclOperation.OperationType.REMOVE, false, HDFSAclOperation.AclType.DEFAULT);
operation.handleAcl();
LOG.info("Remove HDFS acl when delete table {}, cost {} ms", tableName,
System.currentTimeMillis() - start);
return true;
} catch (Exception e) {
LOG.error("Remove HDFS acl error when delete table {}", tableName, e);
return false;
}
}
/**
* Add table user acls
* @param tableName the table
@ -349,7 +395,7 @@ public class SnapshotScannerHDFSAclHelper implements Closeable {
Set<TableName> tables = new HashSet<>();
for (String namespace : namespaces) {
tables.addAll(admin.listTableDescriptorsByNamespace(Bytes.toBytes(namespace)).stream()
.filter(this::isTableUserScanSnapshotEnabled).map(TableDescriptor::getTableName)
.filter(this::isAclSyncToHdfsEnabled).map(TableDescriptor::getTableName)
.collect(Collectors.toSet()));
}
handleTableAcl(tables, users, skipNamespaces, skipTables, operationType);
@ -403,7 +449,7 @@ public class SnapshotScannerHDFSAclHelper implements Closeable {
* return paths that user will global permission will visit
* @return the path list
*/
private List<Path> getGlobalRootPaths() {
List<Path> getGlobalRootPaths() {
return Lists.newArrayList(pathHelper.getTmpDataDir(), pathHelper.getDataDir(),
pathHelper.getMobDataDir(), pathHelper.getArchiveDataDir(), pathHelper.getSnapshotRootDir());
}
@ -511,9 +557,20 @@ public class SnapshotScannerHDFSAclHelper implements Closeable {
return !tablePermission.hasFamily() && !tablePermission.hasQualifier();
}
boolean isTableUserScanSnapshotEnabled(TableDescriptor tableDescriptor) {
public static boolean isAclSyncToHdfsEnabled(Configuration conf) {
String[] masterCoprocessors = conf.getStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY);
Set<String> masterCoprocessorSet = new HashSet<>();
if (masterCoprocessors != null) {
Collections.addAll(masterCoprocessorSet, masterCoprocessors);
}
return conf.getBoolean(SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE, false)
&& masterCoprocessorSet.contains(SnapshotScannerHDFSAclController.class.getName())
&& masterCoprocessorSet.contains(AccessController.class.getName());
}
boolean isAclSyncToHdfsEnabled(TableDescriptor tableDescriptor) {
return tableDescriptor == null ? false
: Boolean.valueOf(tableDescriptor.getValue(USER_SCAN_SNAPSHOT_ENABLE));
: Boolean.valueOf(tableDescriptor.getValue(ACL_SYNC_TO_HDFS_ENABLE));
}
PathHelper getPathHelper() {