From 7abea1c4bac0bd72b8352055f45dc1698a7a5d28 Mon Sep 17 00:00:00 2001 From: Xiaolin Ha Date: Mon, 6 Mar 2023 17:23:42 +0800 Subject: [PATCH] Revert "HBASE-25709 Close region may stuck when region is compacting and skipped most cells read (#4536)" This reverts commit 37858bb6b058f5af17d19aabedba69816b2941aa. --- .../org/apache/hadoop/hbase/HConstants.java | 5 -- .../hbase/mob/DefaultMobStoreCompactor.java | 6 +- .../regionserver/compactions/Compactor.java | 9 +-- .../hbase/mob/FaultyMobStoreCompactor.java | 6 +- .../hbase/regionserver/TestCompaction.java | 1 - .../hbase/regionserver/TestHRegion.java | 69 ------------------- 6 files changed, 6 insertions(+), 90 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index d56607653fc..26c105b40cb 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -326,11 +326,6 @@ public final class HConstants { public static final String COMPACTION_KV_MAX = "hbase.hstore.compaction.kv.max"; public static final int COMPACTION_KV_MAX_DEFAULT = 10; - /** Parameter name for the scanner size limit to be used in compactions */ - public static final String COMPACTION_SCANNER_SIZE_MAX = - "hbase.hstore.compaction.scanner.size.limit"; - public static final long COMPACTION_SCANNER_SIZE_MAX_DEFAULT = 10 * 1024 * 1024L; // 10MB - /** Parameter name for HBase instance root directory */ public static final String HBASE_DIR = "hbase.rootdir"; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java index 44f77b62ad8..695a632b243 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java @@ -345,10 +345,8 @@ public class DefaultMobStoreCompactor extends DefaultCompactor { long cellsSizeCompactedToMob = 0, cellsSizeCompactedFromMob = 0; boolean finished = false; - ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(compactionKVMax) - .setSizeLimit(ScannerContext.LimitScope.BETWEEN_CELLS, Long.MAX_VALUE, Long.MAX_VALUE, - compactScannerSizeLimit) - .build(); + ScannerContext scannerContext = + ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build(); throughputController.start(compactionName); KeyValueScanner kvs = (scanner instanceof KeyValueScanner) ? (KeyValueScanner) scanner : null; long shippedCallSizeLimit = diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java index d9ad265da64..363fa29909e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java @@ -83,7 +83,6 @@ public abstract class Compactor { protected final Configuration conf; protected final HStore store; protected final int compactionKVMax; - protected final long compactScannerSizeLimit; protected final Compression.Algorithm majorCompactionCompression; protected final Compression.Algorithm minorCompactionCompression; @@ -110,8 +109,6 @@ public abstract class Compactor { this.store = store; this.compactionKVMax = this.conf.getInt(HConstants.COMPACTION_KV_MAX, HConstants.COMPACTION_KV_MAX_DEFAULT); - this.compactScannerSizeLimit = this.conf.getLong(HConstants.COMPACTION_SCANNER_SIZE_MAX, - HConstants.COMPACTION_SCANNER_SIZE_MAX_DEFAULT); this.majorCompactionCompression = (store.getColumnFamilyDescriptor() == null) ? Compression.Algorithm.NONE : store.getColumnFamilyDescriptor().getMajorCompactionCompressionType(); @@ -432,10 +429,8 @@ public abstract class Compactor { String compactionName = ThroughputControlUtil.getNameForThrottling(store, "compaction"); long now = 0; boolean hasMore; - ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(compactionKVMax) - .setSizeLimit(ScannerContext.LimitScope.BETWEEN_CELLS, Long.MAX_VALUE, Long.MAX_VALUE, - compactScannerSizeLimit) - .build(); + ScannerContext scannerContext = + ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build(); throughputController.start(compactionName); Shipper shipper = (scanner instanceof Shipper) ? (Shipper) scanner : null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/FaultyMobStoreCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/FaultyMobStoreCompactor.java index 943a7acb648..96675fb69e5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/FaultyMobStoreCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/FaultyMobStoreCompactor.java @@ -142,10 +142,8 @@ public class FaultyMobStoreCompactor extends DefaultMobStoreCompactor { long cellsSizeCompactedToMob = 0, cellsSizeCompactedFromMob = 0; boolean finished = false; - ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(compactionKVMax) - .setSizeLimit(ScannerContext.LimitScope.BETWEEN_CELLS, Long.MAX_VALUE, Long.MAX_VALUE, - compactScannerSizeLimit) - .build(); + ScannerContext scannerContext = + ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build(); throughputController.start(compactionName); KeyValueScanner kvs = (scanner instanceof KeyValueScanner) ? (KeyValueScanner) scanner : null; long shippedCallSizeLimit = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java index c0bc72079cb..c4d5d35f7d8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java @@ -121,7 +121,6 @@ public class TestCompaction { // Set cache flush size to 1MB conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024 * 1024); conf.setInt(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER, 100); - conf.setLong(HConstants.COMPACTION_SCANNER_SIZE_MAX, 10L); conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY, NoLimitThroughputController.class.getName()); compactionThreshold = conf.getInt("hbase.hstore.compactionThreshold", 3); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index 4282d16fa6e..637aab9ff5c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -6919,75 +6919,6 @@ public class TestHRegion { assertNull(r.getValue(fam1, q1)); } - @Test - public void testTTLsUsingSmallHeartBeatCells() throws IOException { - IncrementingEnvironmentEdge edge = new IncrementingEnvironmentEdge(); - EnvironmentEdgeManager.injectEdge(edge); - - final byte[] row = Bytes.toBytes("testRow"); - final byte[] q1 = Bytes.toBytes("q1"); - final byte[] q2 = Bytes.toBytes("q2"); - final byte[] q3 = Bytes.toBytes("q3"); - final byte[] q4 = Bytes.toBytes("q4"); - final byte[] q5 = Bytes.toBytes("q5"); - final byte[] q6 = Bytes.toBytes("q6"); - final byte[] q7 = Bytes.toBytes("q7"); - final byte[] q8 = Bytes.toBytes("q8"); - - // 10 seconds - int ttlSecs = 10; - TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(fam1).setTimeToLive(ttlSecs).build()).build(); - - Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); - conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MIN_FORMAT_VERSION_WITH_TAGS); - // using small heart beat cells - conf.setLong(StoreScanner.HBASE_CELLS_SCANNED_PER_HEARTBEAT_CHECK, 2); - - region = HBaseTestingUtil.createRegionAndWAL( - RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(), - TEST_UTIL.getDataTestDir(), conf, tableDescriptor); - assertNotNull(region); - long now = EnvironmentEdgeManager.currentTime(); - // Add a cell that will expire in 5 seconds via cell TTL - region.put(new Put(row).addColumn(fam1, q1, now, HConstants.EMPTY_BYTE_ARRAY)); - region.put(new Put(row).addColumn(fam1, q2, now, HConstants.EMPTY_BYTE_ARRAY)); - region.put(new Put(row).addColumn(fam1, q3, now, HConstants.EMPTY_BYTE_ARRAY)); - // Add a cell that will expire after 10 seconds via family setting - region - .put(new Put(row).addColumn(fam1, q4, now + ttlSecs * 1000 + 1, HConstants.EMPTY_BYTE_ARRAY)); - region - .put(new Put(row).addColumn(fam1, q5, now + ttlSecs * 1000 + 1, HConstants.EMPTY_BYTE_ARRAY)); - - region.put(new Put(row).addColumn(fam1, q6, now, HConstants.EMPTY_BYTE_ARRAY)); - region.put(new Put(row).addColumn(fam1, q7, now, HConstants.EMPTY_BYTE_ARRAY)); - region - .put(new Put(row).addColumn(fam1, q8, now + ttlSecs * 1000 + 1, HConstants.EMPTY_BYTE_ARRAY)); - - // Flush so we are sure store scanning gets this right - region.flush(true); - - // A query at time T+0 should return all cells - checkScan(8); - region.delete(new Delete(row).addColumn(fam1, q8)); - - // Increment time to T+ttlSecs seconds - edge.incrementTime(ttlSecs * 1000); - checkScan(2); - } - - private void checkScan(int expectCellSize) throws IOException { - Scan s = new Scan().withStartRow(row); - ScannerContext.Builder contextBuilder = ScannerContext.newBuilder(true); - ScannerContext scannerContext = contextBuilder.build(); - RegionScanner scanner = region.getScanner(s); - List kvs = new ArrayList<>(); - scanner.next(kvs, scannerContext); - assertEquals(expectCellSize, kvs.size()); - scanner.close(); - } - @Test public void testIncrementTimestampsAreMonotonic() throws IOException { region = initHRegion(tableName, method, CONF, fam1);