diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java index 3de7992cb12..cc610e5a737 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java @@ -107,8 +107,7 @@ public class DefaultMobStoreFlusher extends DefaultStoreFlusher { if (cellsCount == 0) return result; // don't flush if there are no entries // Use a store scanner to find which rows to flush. - long smallestReadPoint = store.getSmallestReadPoint(); - InternalScanner scanner = createScanner(snapshot.getScanners(), smallestReadPoint, tracker); + InternalScanner scanner = createScanner(snapshot.getScanners(), tracker); StoreFileWriter writer; try { // TODO: We can fail in the below block before we complete adding this flush to diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java index f1bb45bc73e..a7d7fb1f3d5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java @@ -52,8 +52,7 @@ public class DefaultStoreFlusher extends StoreFlusher { if (cellsCount == 0) return result; // don't flush if there are no entries // Use a store scanner to find which rows to flush. - long smallestReadPoint = store.getSmallestReadPoint(); - InternalScanner scanner = createScanner(snapshot.getScanners(), smallestReadPoint, tracker); + InternalScanner scanner = createScanner(snapshot.getScanners(), tracker); StoreFileWriter writer; try { // TODO: We can fail in the below block before we complete adding this flush to @@ -66,7 +65,7 @@ public class DefaultStoreFlusher extends StoreFlusher { snapshot.isTagsPresent(), false); IOException e = null; try { - performFlush(scanner, writer, smallestReadPoint, throughputController); + performFlush(scanner, writer, throughputController); } catch (IOException ioe) { e = ioe; // throw the exception out diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java index 4c539ce12ef..67eb375ee6f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java @@ -73,20 +73,20 @@ abstract class StoreFlusher { /** * Creates the scanner for flushing snapshot. Also calls coprocessors. * @param snapshotScanners - * @param smallestReadPoint * @return The scanner; null if coprocessor is canceling the flush. */ protected final InternalScanner createScanner(List snapshotScanners, - long smallestReadPoint, FlushLifeCycleTracker tracker) throws IOException { + FlushLifeCycleTracker tracker) throws IOException { ScanInfo scanInfo; if (store.getCoprocessorHost() != null) { scanInfo = store.getCoprocessorHost().preFlushScannerOpen(store, tracker); } else { scanInfo = store.getScanInfo(); } + final long smallestReadPoint = store.getSmallestReadPoint(); InternalScanner scanner = new StoreScanner(store, scanInfo, snapshotScanners, ScanType.COMPACT_RETAIN_DELETES, smallestReadPoint, HConstants.OLDEST_TIMESTAMP); - assert scanner != null; + if (store.getCoprocessorHost() != null) { try { return store.getCoprocessorHost().preFlush(store, scanner, tracker); @@ -102,11 +102,10 @@ abstract class StoreFlusher { * Performs memstore flush, writing data from scanner into sink. * @param scanner Scanner to get data from. * @param sink Sink to write data to. Could be StoreFile.Writer. - * @param smallestReadPoint Smallest read point used for the flush. * @param throughputController A controller to avoid flush too fast */ protected void performFlush(InternalScanner scanner, CellSink sink, - long smallestReadPoint, ThroughputController throughputController) throws IOException { + ThroughputController throughputController) throws IOException { int compactionKVMax = conf.getInt(HConstants.COMPACTION_KV_MAX, HConstants.COMPACTION_KV_MAX_DEFAULT); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java index 59b91d5b624..7beec5e2e8a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java @@ -61,8 +61,7 @@ public class StripeStoreFlusher extends StoreFlusher { int cellsCount = snapshot.getCellsCount(); if (cellsCount == 0) return result; // don't flush if there are no entries - long smallestReadPoint = store.getSmallestReadPoint(); - InternalScanner scanner = createScanner(snapshot.getScanners(), smallestReadPoint, tracker); + InternalScanner scanner = createScanner(snapshot.getScanners(), tracker); // Let policy select flush method. StripeFlushRequest req = this.policy.selectFlush(store.getComparator(), this.stripes, @@ -77,7 +76,7 @@ public class StripeStoreFlusher extends StoreFlusher { mw.init(storeScanner, factory); synchronized (flushLock) { - performFlush(scanner, mw, smallestReadPoint, throughputController); + performFlush(scanner, mw, throughputController); result = mw.commitWriters(cacheFlushSeqNum, false); success = true; }