From d8c1e0e004e3aa838d599be7c7e30f3736b02ef8 Mon Sep 17 00:00:00 2001 From: huzheng Date: Wed, 24 May 2017 20:31:57 +0800 Subject: [PATCH] HBASE-16011 TableSnapshotScanner and TableSnapshotInputFormat can produce duplicate rows if split table. Signed-off-by: tedyu --- .../hbase/client/TableSnapshotScanner.java | 5 +- .../TableSnapshotInputFormatImpl.java | 6 +- .../client/TestTableSnapshotScanner.java | 50 +++++++++++++ .../TestTableSnapshotInputFormat.java | 70 +++++++++++++++++++ .../hbase/snapshot/SnapshotTestingUtils.java | 13 +++- 5 files changed, 141 insertions(+), 3 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java index 4601ae46ac9..707befb9816 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java @@ -128,7 +128,10 @@ public class TableSnapshotScanner extends AbstractClientScanner { htd = meta.getTableDescriptor(); regions = new ArrayList(restoredRegions.size()); - for (HRegionInfo hri: restoredRegions) { + for (HRegionInfo hri : restoredRegions) { + if (hri.isOffline() && (hri.isSplit() || hri.isSplitParent())) { + continue; + } if (CellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(), hri.getStartKey(), hri.getEndKey())) { regions.add(hri); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java index d06fdf902a9..9f52850d959 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java @@ -274,7 +274,11 @@ public class TableSnapshotInputFormatImpl { List regionInfos = Lists.newArrayListWithCapacity(regionManifests.size()); for (SnapshotRegionManifest regionManifest : regionManifests) { - regionInfos.add(HRegionInfo.convert(regionManifest.getRegionInfo())); + HRegionInfo hri = HRegionInfo.convert(regionManifest.getRegionInfo()); + if (hri.isOffline() && (hri.isSplit() || hri.isSplitParent())) { + continue; + } + regionInfos.add(hri); } return regionInfos; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java index 0e2b670bf28..da0df406a20 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java @@ -109,6 +109,56 @@ public class TestTableSnapshotScanner { table.close(); } + @Test + public void testNoDuplicateResultsWhenSplitting() throws Exception { + setupCluster(); + TableName tableName = TableName.valueOf("testNoDuplicateResultsWhenSplitting"); + String snapshotName = "testSnapshotBug"; + try { + if (UTIL.getHBaseAdmin().tableExists(tableName)) { + UTIL.deleteTable(tableName); + } + + UTIL.createTable(tableName, FAMILIES); + Admin admin = UTIL.getHBaseAdmin(); + + // put some stuff in the table + Table table = UTIL.getConnection().getTable(tableName); + UTIL.loadTable(table, FAMILIES); + + // split to 2 regions + admin.split(tableName, Bytes.toBytes("eee")); + TestTableSnapshotInputFormat.blockUntilSplitFinished(UTIL, tableName, 2); + + Path rootDir = FSUtils.getRootDir(UTIL.getConfiguration()); + FileSystem fs = rootDir.getFileSystem(UTIL.getConfiguration()); + + SnapshotTestingUtils.createSnapshotAndValidate(admin, tableName, + Arrays.asList(FAMILIES), null, snapshotName, rootDir, fs, true); + + // load different values + byte[] value = Bytes.toBytes("after_snapshot_value"); + UTIL.loadTable(table, FAMILIES, value); + + // cause flush to create new files in the region + admin.flush(tableName); + table.close(); + + Path restoreDir = UTIL.getDataTestDirOnTestFS(snapshotName); + Scan scan = new Scan().withStartRow(bbb).withStopRow(yyy); // limit the scan + + TableSnapshotScanner scanner = + new TableSnapshotScanner(UTIL.getConfiguration(), restoreDir, snapshotName, scan); + + verifyScanner(scanner, bbb, yyy); + scanner.close(); + } finally { + UTIL.getHBaseAdmin().deleteSnapshot(snapshotName); + UTIL.deleteTable(tableName); + tearDownCluster(); + } + } + @Test public void testWithSingleRegion() throws Exception { testScanner(UTIL, "testWithSingleRegion", 1, false); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java index 349f96ee034..72928ebc165 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java @@ -22,14 +22,20 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import java.io.IOException; +import java.util.Arrays; import java.util.List; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HDFSBlocksDistribution; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Result; @@ -37,6 +43,7 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat.TableSnapshotRegionSplit; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.Job; @@ -212,6 +219,69 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa } } + public static void blockUntilSplitFinished(HBaseTestingUtility util, TableName tableName, + int expectedRegionSize) throws Exception { + for (int i = 0; i < 100; i++) { + List hRegionInfoList = util.getHBaseAdmin().getTableRegions(tableName); + if (hRegionInfoList.size() >= expectedRegionSize) { + break; + } + Thread.sleep(1000); + } + } + + @Test + public void testNoDuplicateResultsWhenSplitting() throws Exception { + setupCluster(); + TableName tableName = TableName.valueOf("testNoDuplicateResultsWhenSplitting"); + String snapshotName = "testSnapshotBug"; + try { + if (UTIL.getHBaseAdmin().tableExists(tableName)) { + UTIL.deleteTable(tableName); + } + + UTIL.createTable(tableName, FAMILIES); + HBaseAdmin admin = UTIL.getHBaseAdmin(); + + // put some stuff in the table + Table table = UTIL.getConnection().getTable(tableName); + UTIL.loadTable(table, FAMILIES); + + // split to 2 regions + admin.split(tableName, Bytes.toBytes("eee")); + blockUntilSplitFinished(UTIL, tableName, 2); + + Path rootDir = FSUtils.getRootDir(UTIL.getConfiguration()); + FileSystem fs = rootDir.getFileSystem(UTIL.getConfiguration()); + + SnapshotTestingUtils.createSnapshotAndValidate(admin, tableName, Arrays.asList(FAMILIES), + null, snapshotName, rootDir, fs, true); + + // load different values + byte[] value = Bytes.toBytes("after_snapshot_value"); + UTIL.loadTable(table, FAMILIES, value); + + // cause flush to create new files in the region + admin.flush(tableName); + table.close(); + + Job job = new Job(UTIL.getConfiguration()); + Path tmpTableDir = UTIL.getDataTestDirOnTestFS(snapshotName); + // limit the scan + Scan scan = new Scan().withStartRow(getStartRow()).withStopRow(getEndRow()); + + TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan, + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false, + tmpTableDir); + + verifyWithMockedMapReduce(job, 2, 2, getStartRow(), getEndRow()); + } finally { + UTIL.getHBaseAdmin().deleteSnapshot(snapshotName); + UTIL.deleteTable(tableName); + tearDownCluster(); + } + } + private void verifyWithMockedMapReduce(Job job, int numRegions, int expectedNumSplits, byte[] startRow, byte[] stopRow) throws IOException, InterruptedException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java index 403fffa1472..9ab19775aad 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java @@ -245,7 +245,18 @@ public final class SnapshotTestingUtils { List regions = admin.getTableRegions(tableName); // remove the non-default regions RegionReplicaUtil.removeNonDefaultRegions(regions); - assertEquals(regions.size(), regionManifests.size()); + + // if create snapshot when table splitting, parent region will be included to the snapshot + // region manifest. we should exclude the parent regions. + int regionCountExclusiveSplitParent = 0; + for (SnapshotRegionManifest snapshotRegionManifest : regionManifests.values()) { + HRegionInfo hri = HRegionInfo.convert(snapshotRegionManifest.getRegionInfo()); + if (hri.isOffline() && (hri.isSplit() || hri.isSplitParent())) { + continue; + } + regionCountExclusiveSplitParent++; + } + assertEquals(regions.size(), regionCountExclusiveSplitParent); // Verify Regions (redundant check, see MasterSnapshotVerifier) for (HRegionInfo info : regions) {