From a67490abd3eabc6e663c0b28096bb3c7dac6c788 Mon Sep 17 00:00:00 2001 From: meiyi Date: Thu, 23 Dec 2021 16:45:36 +0800 Subject: [PATCH] HBASE-26615 Snapshot referenced data files are deleted when delete a table with merge regions (#3971) Signed-off-by: Duo Zhang --- .../procedure/DeleteTableProcedure.java | 16 +++++-- .../client/TestTableSnapshotScanner.java | 42 +++++++++++++++++++ 2 files changed, 55 insertions(+), 3 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java index 297efc240fc..f250c6d4ddb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java @@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DeleteTableState; +import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; @InterfaceAudience.Private public class DeleteTableProcedure @@ -281,9 +282,18 @@ public class DeleteTableProcedure if (fs.exists(tableDir)) { // Archive regions from FS (temp directory) if (archive) { - List regionDirList = regions.stream().filter(RegionReplicaUtil::isDefaultReplica) - .map(region -> - FSUtils.getRegionDirFromTableDir(tableDir, region)).collect(Collectors.toList()); + List regionDirList = new ArrayList<>(); + for (RegionInfo region : regions) { + if (RegionReplicaUtil.isDefaultReplica(region)) { + regionDirList.add(FSUtils.getRegionDirFromTableDir(tableDir, region)); + List mergeRegions = + env.getAssignmentManager().getRegionStateStore().getMergeRegions(region); + if (!CollectionUtils.isEmpty(mergeRegions)) { + mergeRegions.stream().forEach( + r -> regionDirList.add(FSUtils.getRegionDirFromTableDir(tableDir, r))); + } + } + } HFileArchiver .archiveRegions(env.getMasterConfiguration(), fs, mfs.getRootDir(), tableDir, regionDirList); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java index fcc22866814..32eac9af0e1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java @@ -468,6 +468,48 @@ public class TestTableSnapshotScanner { } } + @Test + public void testDeleteTableWithMergedRegions() throws Exception { + setupCluster(); + final TableName tableName = TableName.valueOf(this.name.getMethodName()); + String snapshotName = tableName.getNameAsString() + "_snapshot"; + Configuration conf = UTIL.getConfiguration(); + try (Admin admin = UTIL.getConnection().getAdmin()) { + // disable compaction + admin.compactionSwitch(false, + admin.getRegionServers().stream().map(s -> s.getServerName()).collect(Collectors.toList())); + // create table + Table table = UTIL.createTable(tableName, FAMILIES, 1, bbb, yyy, 3); + List regions = admin.getRegions(tableName); + Assert.assertEquals(3, regions.size()); + // write some data + UTIL.loadTable(table, FAMILIES); + // merge region + admin.mergeRegionsAsync(new byte[][] { regions.get(0).getEncodedNameAsBytes(), + regions.get(1).getEncodedNameAsBytes() }, + false).get(); + regions = admin.getRegions(tableName); + Assert.assertEquals(2, regions.size()); + // snapshot + admin.snapshot(snapshotName, tableName); + // verify snapshot + try (TableSnapshotScanner scanner = + new TableSnapshotScanner(conf, UTIL.getDataTestDirOnTestFS(snapshotName), snapshotName, + new Scan().withStartRow(bbb).withStopRow(yyy))) { + verifyScanner(scanner, bbb, yyy); + } + // drop table + admin.disableTable(tableName); + admin.deleteTable(tableName); + // verify snapshot + try (TableSnapshotScanner scanner = + new TableSnapshotScanner(conf, UTIL.getDataTestDirOnTestFS(snapshotName), snapshotName, + new Scan().withStartRow(bbb).withStopRow(yyy))) { + verifyScanner(scanner, bbb, yyy); + } + } + } + private void traverseAndSetFileTime(Path path, long time) throws IOException { fs.setTimes(path, time, -1); if (fs.isDirectory(path)) {