HBASE-26615 Snapshot referenced data files are deleted when delete a table with merge regions (#3971)
Signed-off-by: Duo Zhang <zhangduo@apache.org>
This commit is contained in:
parent
dbdef5982a
commit
a67490abd3
|
@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
|||
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DeleteTableState;
|
||||
import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils;
|
||||
|
||||
@InterfaceAudience.Private
|
||||
public class DeleteTableProcedure
|
||||
|
@ -281,9 +282,18 @@ public class DeleteTableProcedure
|
|||
if (fs.exists(tableDir)) {
|
||||
// Archive regions from FS (temp directory)
|
||||
if (archive) {
|
||||
List<Path> regionDirList = regions.stream().filter(RegionReplicaUtil::isDefaultReplica)
|
||||
.map(region ->
|
||||
FSUtils.getRegionDirFromTableDir(tableDir, region)).collect(Collectors.toList());
|
||||
List<Path> regionDirList = new ArrayList<>();
|
||||
for (RegionInfo region : regions) {
|
||||
if (RegionReplicaUtil.isDefaultReplica(region)) {
|
||||
regionDirList.add(FSUtils.getRegionDirFromTableDir(tableDir, region));
|
||||
List<RegionInfo> mergeRegions =
|
||||
env.getAssignmentManager().getRegionStateStore().getMergeRegions(region);
|
||||
if (!CollectionUtils.isEmpty(mergeRegions)) {
|
||||
mergeRegions.stream().forEach(
|
||||
r -> regionDirList.add(FSUtils.getRegionDirFromTableDir(tableDir, r)));
|
||||
}
|
||||
}
|
||||
}
|
||||
HFileArchiver
|
||||
.archiveRegions(env.getMasterConfiguration(), fs, mfs.getRootDir(), tableDir,
|
||||
regionDirList);
|
||||
|
|
|
@ -468,6 +468,48 @@ public class TestTableSnapshotScanner {
|
|||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDeleteTableWithMergedRegions() throws Exception {
|
||||
setupCluster();
|
||||
final TableName tableName = TableName.valueOf(this.name.getMethodName());
|
||||
String snapshotName = tableName.getNameAsString() + "_snapshot";
|
||||
Configuration conf = UTIL.getConfiguration();
|
||||
try (Admin admin = UTIL.getConnection().getAdmin()) {
|
||||
// disable compaction
|
||||
admin.compactionSwitch(false,
|
||||
admin.getRegionServers().stream().map(s -> s.getServerName()).collect(Collectors.toList()));
|
||||
// create table
|
||||
Table table = UTIL.createTable(tableName, FAMILIES, 1, bbb, yyy, 3);
|
||||
List<RegionInfo> regions = admin.getRegions(tableName);
|
||||
Assert.assertEquals(3, regions.size());
|
||||
// write some data
|
||||
UTIL.loadTable(table, FAMILIES);
|
||||
// merge region
|
||||
admin.mergeRegionsAsync(new byte[][] { regions.get(0).getEncodedNameAsBytes(),
|
||||
regions.get(1).getEncodedNameAsBytes() },
|
||||
false).get();
|
||||
regions = admin.getRegions(tableName);
|
||||
Assert.assertEquals(2, regions.size());
|
||||
// snapshot
|
||||
admin.snapshot(snapshotName, tableName);
|
||||
// verify snapshot
|
||||
try (TableSnapshotScanner scanner =
|
||||
new TableSnapshotScanner(conf, UTIL.getDataTestDirOnTestFS(snapshotName), snapshotName,
|
||||
new Scan().withStartRow(bbb).withStopRow(yyy))) {
|
||||
verifyScanner(scanner, bbb, yyy);
|
||||
}
|
||||
// drop table
|
||||
admin.disableTable(tableName);
|
||||
admin.deleteTable(tableName);
|
||||
// verify snapshot
|
||||
try (TableSnapshotScanner scanner =
|
||||
new TableSnapshotScanner(conf, UTIL.getDataTestDirOnTestFS(snapshotName), snapshotName,
|
||||
new Scan().withStartRow(bbb).withStopRow(yyy))) {
|
||||
verifyScanner(scanner, bbb, yyy);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void traverseAndSetFileTime(Path path, long time) throws IOException {
|
||||
fs.setTimes(path, time, -1);
|
||||
if (fs.isDirectory(path)) {
|
||||
|
|
Loading…
Reference in New Issue