HBASE-22578 HFileCleaner should not delete empty ns/table directories used for user san snapshot feature (#337)

This commit is contained in:
meiyi 2019-07-24 15:14:56 +08:00 committed by huzheng
parent ed72cd7c1a
commit ff9589c9ab
4 changed files with 211 additions and 6 deletions

View File

@ -407,6 +407,26 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
return deleteFiles(filesToDelete) == files.size();
}
/**
* Check if a empty directory with no subdirs or subfiles can be deleted
* @param dir Path of the directory
* @return True if the directory can be deleted, otherwise false
*/
private boolean isEmptyDirDeletable(Path dir) {
for (T cleaner : cleanersChain) {
if (cleaner.isStopped() || this.getStopper().isStopped()) {
LOG.warn("A file cleaner {} is stopped, won't delete the empty directory {}",
this.getName(), dir);
return false;
}
if (!cleaner.isEmptyDirDeletable(dir)) {
// If one of the cleaner need the empty directory, skip delete it
return false;
}
}
return true;
}
/**
* Delete the given files
* @param filesToDelete files to delete
@ -513,9 +533,9 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
allSubdirsDeleted = deleteAction(() -> getCleanResult(tasks), "subdirs");
}
boolean result = allFilesDeleted && allSubdirsDeleted;
// if and only if files and subdirs under current dir are deleted successfully, and
// it is not the root dir, then task will try to delete it.
boolean result = allFilesDeleted && allSubdirsDeleted && isEmptyDirDeletable(dir);
// if and only if files and subdirs under current dir are deleted successfully, and the empty
// directory can be deleted, and it is not the root dir then task will try to delete it.
if (result && !root) {
result &= deleteAction(() -> fs.delete(dir, false), "dir");
}

View File

@ -17,12 +17,13 @@
*/
package org.apache.hadoop.hbase.master.cleaner;
import org.apache.yetus.audience.InterfaceAudience;
import java.util.Map;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.hbase.Stoppable;
import java.util.Map;
import org.apache.yetus.audience.InterfaceAudience;
/**
* General interface for cleaning files from a folder (generally an archive or
@ -50,4 +51,13 @@ public interface FileCleanerDelegate extends Configurable, Stoppable {
*/
default void preClean() {
}
/**
* Check if a empty directory with no subdirs or subfiles can be deleted
* @param dir Path of the directory
* @return True if the directory can be deleted, otherwise false
*/
default boolean isEmptyDirDeletable(Path dir) {
return true;
}
}

View File

@ -0,0 +1,135 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.security.access;
import java.io.IOException;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
/**
* Implementation of a file cleaner that checks if a empty directory with no subdirs and subfiles is
* deletable when user scan snapshot feature is enabled
*/
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
@InterfaceStability.Evolving
public class SnapshotScannerHDFSAclCleaner extends BaseHFileCleanerDelegate {
private static final Logger LOG = LoggerFactory.getLogger(SnapshotScannerHDFSAclCleaner.class);
private HMaster master;
private boolean userScanSnapshotEnabled = false;
@Override
public void init(Map<String, Object> params) {
if (params != null && params.containsKey(HMaster.MASTER)) {
this.master = (HMaster) params.get(HMaster.MASTER);
}
}
@Override
public void setConf(Configuration conf) {
super.setConf(conf);
userScanSnapshotEnabled = isUserScanSnapshotEnabled(conf);
}
@Override
protected boolean isFileDeletable(FileStatus fStat) {
// This plugin does not handle the file deletions, so return true by default
return true;
}
@Override
public boolean isEmptyDirDeletable(Path dir) {
if (userScanSnapshotEnabled) {
/*
* If user scan snapshot feature is enabled(see HBASE-21995), then when namespace or table
* exists, the archive namespace or table directories should not be deleted because the HDFS
* acls are set at these directories; the archive data directory should not be deleted because
* the HDFS acls of global permission is set at this directory.
*/
return isEmptyArchiveDirDeletable(dir);
}
return true;
}
private boolean isUserScanSnapshotEnabled(Configuration conf) {
String masterCoprocessors = conf.get(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY);
return conf.getBoolean(SnapshotScannerHDFSAclHelper.USER_SCAN_SNAPSHOT_ENABLE, false)
&& masterCoprocessors.contains(SnapshotScannerHDFSAclController.class.getName())
&& masterCoprocessors.contains(AccessController.class.getName());
}
private boolean isEmptyArchiveDirDeletable(Path dir) {
try {
if (isArchiveDataDir(dir)) {
return false;
} else if (isArchiveNamespaceDir(dir) && namespaceExists(dir.getName())) {
return false;
} else if (isArchiveTableDir(dir)
&& tableExists(TableName.valueOf(dir.getParent().getName(), dir.getName()))) {
return false;
}
return true;
} catch (IOException e) {
LOG.warn("Check if empty dir {} is deletable error", dir, e);
return false;
}
}
@VisibleForTesting
static boolean isArchiveDataDir(Path path) {
if (path != null && path.getName().equals(HConstants.BASE_NAMESPACE_DIR)) {
Path parent = path.getParent();
return parent != null && parent.getName().equals(HConstants.HFILE_ARCHIVE_DIRECTORY);
}
return false;
}
@VisibleForTesting
static boolean isArchiveNamespaceDir(Path path) {
return path != null && isArchiveDataDir(path.getParent());
}
@VisibleForTesting
static boolean isArchiveTableDir(Path path) {
return path != null && isArchiveNamespaceDir(path.getParent());
}
private boolean namespaceExists(String namespace) throws IOException {
return master != null && master.listNamespaces().contains(namespace);
}
private boolean tableExists(TableName tableName) throws IOException {
return master != null && MetaTableAccessor.tableExists(master.getConnection(), tableName);
}
}

View File

@ -21,6 +21,8 @@ package org.apache.hadoop.hbase.security.access;
import static org.apache.hadoop.hbase.security.access.Permission.Action.READ;
import static org.apache.hadoop.hbase.security.access.Permission.Action.WRITE;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
@ -46,10 +48,12 @@ import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.client.TableSnapshotScanner;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.SecurityTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.HFileArchiveUtil;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.ClassRule;
@ -94,6 +98,9 @@ public class TestSnapshotScannerHDFSAclController {
conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
conf.get(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY) + ","
+ SnapshotScannerHDFSAclController.class.getName());
// set hfile cleaner plugin
conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
SnapshotScannerHDFSAclCleaner.class.getName());
TEST_UTIL.startMiniCluster();
admin = TEST_UTIL.getAdmin();
@ -546,6 +553,39 @@ public class TestSnapshotScannerHDFSAclController {
}
}
@Test
public void testCleanArchiveTableDir() throws Exception {
final String grantUserName = name.getMethodName();
User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {});
String namespace = name.getMethodName();
TableName table = TableName.valueOf(namespace, "t1");
String snapshot = namespace + "t1";
TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table);
admin.snapshot(snapshot, table);
TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName, table, READ);
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot, 6);
// HFileCleaner will not delete archive table directory even if it's a empty directory
HFileCleaner cleaner = TEST_UTIL.getHBaseCluster().getMaster().getHFileCleaner();
cleaner.choreForTesting();
Path archiveTableDir = HFileArchiveUtil.getTableArchivePath(rootDir, table);
assertTrue(fs.exists(archiveTableDir));
// delete table and grant user can scan snapshot
admin.disableTable(table);
admin.deleteTable(table);
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot, 6);
// Check SnapshotScannerHDFSAclCleaner method
assertTrue(SnapshotScannerHDFSAclCleaner.isArchiveTableDir(archiveTableDir));
assertTrue(SnapshotScannerHDFSAclCleaner.isArchiveNamespaceDir(archiveTableDir.getParent()));
assertTrue(
SnapshotScannerHDFSAclCleaner.isArchiveDataDir(archiveTableDir.getParent().getParent()));
assertFalse(SnapshotScannerHDFSAclCleaner
.isArchiveDataDir(archiveTableDir.getParent().getParent().getParent()));
}
@Test
public void testGrantMobTable() throws Exception {
final String grantUserName = name.getMethodName();