diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java index 53e680dd5cd..99d5a6ae005 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.client; -import java.io.IOException; import java.util.Map; import com.google.common.collect.Maps; @@ -193,12 +192,8 @@ public abstract class Query extends OperationWithAttributes { */ public Query setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) { - try { - colFamTimeRangeMap.put(cf, new TimeRange(minStamp, maxStamp)); - return this; - } catch (IOException ioe) { - throw new IllegalArgumentException(ioe); - } + colFamTimeRangeMap.put(cf, new TimeRange(minStamp, maxStamp)); + return this; } /** @@ -207,6 +202,4 @@ public abstract class Query extends OperationWithAttributes { public Map getColumnFamilyTimeRange() { return this.colFamTimeRangeMap; } - - -} +} \ No newline at end of file diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 769945b757d..8cb937a11ab 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -529,7 +529,12 @@ public final class HConstants { /** * Timestamp to use when we want to refer to the oldest cell. + * Special! Used in fake Cells only. Should never be the timestamp on an actual Cell returned to + * a client. + * @deprecated Should not be public since hbase-1.3.0. For internal use only. Move internal to + * Scanners flagged as special timestamp value never to be returned as timestamp on a Cell. */ + @Deprecated public static final long OLDEST_TIMESTAMP = Long.MIN_VALUE; /** diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java index eb31b87baf6..6d502fd9c1a 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase; -import java.io.DataInput; import java.io.EOFException; import java.io.IOException; import java.io.InputStream; @@ -27,8 +26,8 @@ import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.KeyValue.Type; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.io.util.StreamUtils; import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; @@ -257,7 +256,7 @@ public class KeyValueUtil { /** * Create a KeyValue for the specified row, family and qualifier that would be * larger than or equal to all other possible KeyValues that have the same - * row, family, qualifier. Used for reseeking. + * row, family, qualifier. Used for reseeking. Should NEVER be returned to a client. * * @param row * row key @@ -303,7 +302,7 @@ public class KeyValueUtil { * but creates the last key on the row/column of this KV (the value part of * the returned KV is always empty). Used in creating "fake keys" for the * multi-column Bloom filter optimization to skip the row/column we already - * know is not in the file. + * know is not in the file. Not to be returned to clients. * * @param kv - cell * @return the last key on the row/column of the given key-value pair diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java index 2b706446f62..8f2369477dd 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java @@ -26,22 +26,23 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.util.Bytes; /** - * Represents an interval of version timestamps. + * Represents an interval of version timestamps. Presumes timestamps between + * {@link #INITIAL_MIN_TIMESTAMP} and {@link #INITIAL_MAX_TIMESTAMP} only. Gets freaked out if + * passed a timestamp that is < {@link #INITIAL_MIN_TIMESTAMP}, *

* Evaluated according to minStamp <= timestamp < maxStamp * or [minStamp,maxStamp) in interval notation. *

* Only used internally; should not be accessed directly by clients. + *

Immutable. Thread-safe. */ @InterfaceAudience.Public @InterfaceStability.Stable public class TimeRange { - static final long INITIAL_MIN_TIMESTAMP = 0L; - private static final long MIN_TIME = INITIAL_MIN_TIMESTAMP; - static final long INITIAL_MAX_TIMESTAMP = Long.MAX_VALUE; - static final long MAX_TIME = INITIAL_MAX_TIMESTAMP; - private long minStamp = MIN_TIME; - private long maxStamp = MAX_TIME; + public static final long INITIAL_MIN_TIMESTAMP = 0L; + public static final long INITIAL_MAX_TIMESTAMP = Long.MAX_VALUE; + private final long minStamp; + private final long maxStamp; private final boolean allTime; /** @@ -51,7 +52,7 @@ public class TimeRange { */ @Deprecated public TimeRange() { - allTime = true; + this(INITIAL_MIN_TIMESTAMP, INITIAL_MAX_TIMESTAMP); } /** @@ -61,8 +62,7 @@ public class TimeRange { */ @Deprecated public TimeRange(long minStamp) { - this.minStamp = minStamp; - this.allTime = this.minStamp == MIN_TIME; + this(minStamp, INITIAL_MAX_TIMESTAMP); } /** @@ -72,30 +72,7 @@ public class TimeRange { */ @Deprecated public TimeRange(byte [] minStamp) { - this.minStamp = Bytes.toLong(minStamp); - this.allTime = false; - } - - /** - * Represents interval [minStamp, maxStamp) - * @param minStamp the minimum timestamp, inclusive - * @param maxStamp the maximum timestamp, exclusive - * @throws IllegalArgumentException if either <0, - * @throws IOException if max smaller than min. - * @deprecated This is made @InterfaceAudience.Private in the 2.0 line and above - */ - @Deprecated - public TimeRange(long minStamp, long maxStamp) throws IOException { - if (minStamp < 0 || maxStamp < 0) { - throw new IllegalArgumentException("Timestamp cannot be negative. minStamp:" + minStamp - + ", maxStamp:" + maxStamp); - } - if (maxStamp < minStamp) { - throw new IOException("maxStamp is smaller than minStamp"); - } - this.minStamp = minStamp; - this.maxStamp = maxStamp; - this.allTime = this.minStamp == MIN_TIME && this.maxStamp == MAX_TIME; + this(Bytes.toLong(minStamp)); } /** @@ -111,6 +88,35 @@ public class TimeRange { this(Bytes.toLong(minStamp), Bytes.toLong(maxStamp)); } + /** + * Represents interval [minStamp, maxStamp) + * @param minStamp the minimum timestamp, inclusive + * @param maxStamp the maximum timestamp, exclusive + * @throws IllegalArgumentException if either <0, + * @deprecated This is made @InterfaceAudience.Private in the 2.0 line and above + */ + @Deprecated + public TimeRange(long minStamp, long maxStamp) { + check(minStamp, maxStamp); + this.minStamp = minStamp; + this.maxStamp = maxStamp; + this.allTime = isAllTime(minStamp, maxStamp); + } + + private static boolean isAllTime(long minStamp, long maxStamp) { + return minStamp == INITIAL_MIN_TIMESTAMP && maxStamp == INITIAL_MAX_TIMESTAMP; + } + + private static void check(long minStamp, long maxStamp) { + if (minStamp < 0 || maxStamp < 0) { + throw new IllegalArgumentException("Timestamp cannot be negative. minStamp:" + minStamp + + ", maxStamp:" + maxStamp); + } + if (maxStamp < minStamp) { + throw new IllegalArgumentException("maxStamp is smaller than minStamp"); + } + } + /** * @return the smallest timestamp that should be considered */ @@ -136,14 +142,15 @@ public class TimeRange { /** * Check if the specified timestamp is within this TimeRange. *

- * Returns true if within interval [minStamp, maxStamp), false - * if not. + * Returns true if within interval [minStamp, maxStamp), false if not. * @param bytes timestamp to check * @param offset offset into the bytes * @return true if within TimeRange, false if not */ public boolean withinTimeRange(byte [] bytes, int offset) { - if(allTime) return true; + if (allTime) { + return true; + } return withinTimeRange(Bytes.toLong(bytes, offset)); } @@ -156,6 +163,7 @@ public class TimeRange { * @return true if within TimeRange, false if not */ public boolean withinTimeRange(long timestamp) { + assert timestamp >= 0; if (this.allTime) { return true; } @@ -174,6 +182,7 @@ public class TimeRange { if (this.allTime) { return true; } + assert tr.getMin() >= 0; return getMin() < tr.getMax() && getMax() >= tr.getMin(); } @@ -186,27 +195,29 @@ public class TimeRange { * @return true if within TimeRange, false if not */ public boolean withinOrAfterTimeRange(long timestamp) { - if(allTime) return true; + assert timestamp >= 0; + if (allTime) { + return true; + } // check if >= minStamp - return (timestamp >= minStamp); + return timestamp >= minStamp; } /** - * Compare the timestamp to timerange - * @param timestamp + * Compare the timestamp to timerange. * @return -1 if timestamp is less than timerange, * 0 if timestamp is within timerange, * 1 if timestamp is greater than timerange */ public int compare(long timestamp) { - if (allTime) return 0; - if (timestamp < minStamp) { - return -1; - } else if (timestamp >= maxStamp) { - return 1; - } else { + assert timestamp >= 0; + if (this.allTime) { return 0; } + if (timestamp < minStamp) { + return -1; + } + return timestamp >= maxStamp? 1: 0; } @Override @@ -218,4 +229,4 @@ public class TimeRange { sb.append(this.minStamp); return sb.toString(); } -} +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java index 935813c5d61..90c16f9921b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java @@ -68,8 +68,8 @@ public class DefaultStoreFlusher extends StoreFlusher { /* isCompaction = */ false, /* includeMVCCReadpoint = */ true, /* includesTags = */ snapshot.isTagsPresent(), - /* shouldDropBehind = */ false); - writer.setTimeRangeTracker(snapshot.getTimeRangeTracker()); + /* shouldDropBehind = */ false, + snapshot.getTimeRangeTracker()); IOException e = null; try { performFlush(scanner, writer, smallestReadPoint, throughputController); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java index 2914b05fd9e..acf208e6234 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java @@ -45,7 +45,7 @@ import org.apache.hadoop.hbase.util.Bytes; * believes that the current column should be skipped (by timestamp, filter etc.) * *

- * These two methods returns a + * These two methods returns a * {@link org.apache.hadoop.hbase.regionserver.ScanQueryMatcher.MatchCode} * to define what action should be taken. *

@@ -76,7 +76,7 @@ public class ExplicitColumnTracker implements ColumnTracker { * @param minVersions minimum number of versions to keep * @param maxVersions maximum versions to return per column * @param oldestUnexpiredTS the oldest timestamp we are interested in, - * based on TTL + * based on TTL */ public ExplicitColumnTracker(NavigableSet columns, int minVersions, int maxVersions, long oldestUnexpiredTS) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index db666410049..66eaebc6632 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -998,6 +998,23 @@ public class HStore implements Store { public StoreFile.Writer createWriterInTmp(long maxKeyCount, Compression.Algorithm compression, boolean isCompaction, boolean includeMVCCReadpoint, boolean includesTag, boolean shouldDropBehind) + throws IOException { + return createWriterInTmp(maxKeyCount, compression, isCompaction, includeMVCCReadpoint, + includesTag, shouldDropBehind, null); + } + + /* + * @param maxKeyCount + * @param compression Compression algorithm to use + * @param isCompaction whether we are creating a new file in a compaction + * @param includesMVCCReadPoint - whether to include MVCC or not + * @param includesTag - includesTag or not + * @return Writer for a new StoreFile in the tmp dir. + */ + @Override + public StoreFile.Writer createWriterInTmp(long maxKeyCount, Compression.Algorithm compression, + boolean isCompaction, boolean includeMVCCReadpoint, boolean includesTag, + boolean shouldDropBehind, final TimeRangeTracker trt) throws IOException { final CacheConfig writerCacheConf; if (isCompaction) { @@ -1014,7 +1031,7 @@ public class HStore implements Store { } HFileContext hFileContext = createFileContext(compression, includeMVCCReadpoint, includesTag, cryptoContext); - StoreFile.Writer w = new StoreFile.WriterBuilder(conf, writerCacheConf, + StoreFile.WriterBuilder builder = new StoreFile.WriterBuilder(conf, writerCacheConf, this.getFileSystem()) .withFilePath(fs.createTempName()) .withComparator(comparator) @@ -1022,9 +1039,11 @@ public class HStore implements Store { .withMaxKeyCount(maxKeyCount) .withFavoredNodes(favoredNodes) .withFileContext(hFileContext) - .withShouldDropCacheBehind(shouldDropBehind) - .build(); - return w; + .withShouldDropCacheBehind(shouldDropBehind); + if (trt != null) { + builder.withTimeRangeTracker(trt); + } + return builder.build(); } private HFileContext createFileContext(Compression.Algorithm compression, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java index 99d9dddbfd4..7dfe7c2d6b7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java @@ -409,7 +409,16 @@ public class ScanQueryMatcher { } } - int timestampComparison = tr.compare(timestamp); + // NOTE: Cryptic stuff! + // if the timestamp is HConstants.OLDEST_TIMESTAMP, then this is a fake cell made to prime a + // Scanner; See KeyValueUTil#createLastOnRow. This Cell should never end up returning out of + // here a matchcode of INCLUDE else we will return to the client a fake Cell. If we call + // TimeRange, it will return 0 because it doesn't deal in OLDEST_TIMESTAMP and we will fall + // into the later code where we could return a matchcode of INCLUDE. See HBASE-16074 "ITBLL + // fails, reports lost big or tiny families" for a horror story. Check here for + // OLDEST_TIMESTAMP. TimeRange#compare is about more generic timestamps, between 0L and + // Long.MAX_LONG. It doesn't do OLDEST_TIMESTAMP weird handling. + int timestampComparison = timestamp == HConstants.OLDEST_TIMESTAMP? -1: tr.compare(timestamp); if (timestampComparison >= 1) { return MatchCode.SKIP; } else if (timestampComparison <= -1) { @@ -584,13 +593,6 @@ public class ScanQueryMatcher { } } - public Cell getKeyForNextRow(Cell kv) { - return KeyValueUtil.createLastOnRow( - kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), - null, 0, 0, - null, 0, 0); - } - /** * @param nextIndexed the key of the next entry in the block index (if any) * @param kv The Cell we're using to calculate the seek key diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java index dec27ada909..e7a4de5a862 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java @@ -211,8 +211,24 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf boolean shouldDropBehind ) throws IOException; - - + /** + * @param maxKeyCount + * @param compression Compression algorithm to use + * @param isCompaction whether we are creating a new file in a compaction + * @param includeMVCCReadpoint whether we should out the MVCC readpoint + * @param shouldDropBehind should the writer drop caches behind writes + * @param trt Ready-made timetracker to use. + * @return Writer for a new StoreFile in the tmp dir. + */ + StoreFile.Writer createWriterInTmp( + long maxKeyCount, + Compression.Algorithm compression, + boolean isCompaction, + boolean includeMVCCReadpoint, + boolean includesTags, + boolean shouldDropBehind, + final TimeRangeTracker trt + ) throws IOException; // Compaction oriented methods diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java index c48380d3876..a5f49a687fe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java @@ -619,6 +619,7 @@ public class StoreFile { private Path filePath; private InetSocketAddress[] favoredNodes; private HFileContext fileContext; + private TimeRangeTracker trt; public WriterBuilder(Configuration conf, CacheConfig cacheConf, FileSystem fs) { @@ -627,6 +628,17 @@ public class StoreFile { this.fs = fs; } + /** + * @param trt A premade TimeRangeTracker to use rather than build one per append (building one + * of these is expensive so good to pass one in if you have one). + * @return this (for chained invocation) + */ + public WriterBuilder withTimeRangeTracker(final TimeRangeTracker trt) { + Preconditions.checkNotNull(trt); + this.trt = trt; + return this; + } + /** * Use either this method or {@link #withFilePath}, but not both. * @param dir Path to column family directory. The directory is created if @@ -720,7 +732,7 @@ public class StoreFile { comparator = KeyValue.COMPARATOR; } return new Writer(fs, filePath, - conf, cacheConf, comparator, bloomType, maxKeyCount, favoredNodes, fileContext); + conf, cacheConf, comparator, bloomType, maxKeyCount, favoredNodes, fileContext, trt); } } @@ -796,7 +808,6 @@ public class StoreFile { private Cell lastDeleteFamilyCell = null; private long deleteFamilyCnt = 0; - TimeRangeTracker timeRangeTracker = new TimeRangeTracker(); /** * timeRangeTrackerSet is used to figure if we were passed a filled-out TimeRangeTracker or not. * When flushing a memstore, we set the TimeRangeTracker that it accumulated during updates to @@ -804,7 +815,8 @@ public class StoreFile { * recalculate the timeRangeTracker bounds; it was done already as part of add-to-memstore. * A completed TimeRangeTracker is not set in cases of compactions when it is recalculated. */ - boolean timeRangeTrackerSet = false; + private final boolean timeRangeTrackerSet; + final TimeRangeTracker timeRangeTracker; protected HFile.Writer writer; @@ -827,6 +839,36 @@ public class StoreFile { final KVComparator comparator, BloomType bloomType, long maxKeys, InetSocketAddress[] favoredNodes, HFileContext fileContext) throws IOException { + this(fs, path, conf, cacheConf, comparator, bloomType, maxKeys, favoredNodes, fileContext, + null); + } + + /** + * Creates an HFile.Writer that also write helpful meta data. + * @param fs file system to write to + * @param path file name to create + * @param conf user configuration + * @param comparator key comparator + * @param bloomType bloom filter setting + * @param maxKeys the expected maximum number of keys to be added. Was used + * for Bloom filter size in {@link HFile} format version 1. + * @param favoredNodes + * @param fileContext - The HFile context + * @param trt Ready-made timetracker to use. + * @throws IOException problem writing to FS + */ + private Writer(FileSystem fs, Path path, + final Configuration conf, + CacheConfig cacheConf, + final KVComparator comparator, BloomType bloomType, long maxKeys, + InetSocketAddress[] favoredNodes, HFileContext fileContext, + final TimeRangeTracker trt) + throws IOException { + // If passed a TimeRangeTracker, use it. Set timeRangeTrackerSet so we don't destroy it. + // TODO: put the state of the TRT on the TRT; i.e. make a read-only version (TimeRange) when + // it no longer writable. + this.timeRangeTrackerSet = trt != null; + this.timeRangeTracker = this.timeRangeTrackerSet? trt: new TimeRangeTracker(); writer = HFile.getWriterFactory(conf, cacheConf) .withPath(fs, path) .withComparator(comparator) @@ -887,19 +929,6 @@ public class StoreFile { appendFileInfo(EARLIEST_PUT_TS, Bytes.toBytes(earliestPutTs)); } - /** - * Set TimeRangeTracker. - * Called when flushing to pass us a pre-calculated TimeRangeTracker, one made during updates - * to memstore so we don't have to make one ourselves as Cells get appended. Call before first - * append. If this method is not called, we will calculate our own range of the Cells that - * comprise this StoreFile (and write them on the end as metadata). It is good to have this stuff - * passed because it is expensive to make. - */ - public void setTimeRangeTracker(final TimeRangeTracker trt) { - this.timeRangeTracker = trt; - timeRangeTrackerSet = true; - } - /** * Record the earlest Put timestamp. * @@ -1665,7 +1694,7 @@ public class StoreFile { } public long getMaxTimestamp() { - return timeRange == null ? Long.MAX_VALUE : timeRange.getMax(); + return timeRange == null ? TimeRange.INITIAL_MAX_TIMESTAMP: timeRange.getMax(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java index beb07588b7e..4447556c2f2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java @@ -398,8 +398,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner // We can only exclude store files based on TTL if minVersions is set to 0. // Otherwise, we might have to return KVs that have technically expired. - long expiredTimestampCutoff = minVersions == 0 ? oldestUnexpiredTS : - Long.MIN_VALUE; + long expiredTimestampCutoff = minVersions == 0 ? oldestUnexpiredTS: Long.MIN_VALUE; // include only those scan files which pass all filters for (KeyValueScanner kvs : allScanners) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java index 0c3432ca1a7..c367b526202 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java @@ -115,8 +115,8 @@ public class StripeStoreFlusher extends StoreFlusher { /* isCompaction = */ false, /* includeMVCCReadpoint = */ true, /* includesTags = */ true, - /* shouldDropBehind = */ false); - writer.setTimeRangeTracker(tracker); + /* shouldDropBehind = */ false, + tracker); return writer; } }; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java index f4175cc2fde..78950f841af 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hbase.util.Writables; import org.apache.hadoop.io.Writable; /** - * Stores minimum and maximum timestamp values. Both timestamps are inclusive. + * Stores minimum and maximum timestamp values. * Use this class at write-time ONLY. Too much synchronization to use at read time * (TODO: there are two scenarios writing, once when lots of concurrency as part of memstore * updates but then later we can make one as part of a compaction when there is only one thread @@ -45,8 +45,8 @@ import org.apache.hadoop.io.Writable; @InterfaceAudience.Private public class TimeRangeTracker implements Writable { static final long INITIAL_MIN_TIMESTAMP = Long.MAX_VALUE; + static final long INITIAL_MAX_TIMESTAMP = -1L; long minimumTimestamp = INITIAL_MIN_TIMESTAMP; - static final long INITIAL_MAX_TIMESTAMP = -1; long maximumTimestamp = INITIAL_MAX_TIMESTAMP; /** @@ -125,7 +125,7 @@ public class TimeRangeTracker implements Writable { } /** - * Check if the range has any overlap with TimeRange + * Check if the range has ANY overlap with TimeRange * @param tr TimeRange * @return True if there is overlap, false otherwise */ @@ -185,22 +185,19 @@ public class TimeRangeTracker implements Writable { return trt == null? null: trt.toTimeRange(); } - private boolean isFreshInstance() { - return getMin() == INITIAL_MIN_TIMESTAMP && getMax() == INITIAL_MAX_TIMESTAMP; - } - /** * @return Make a TimeRange from current state of this. */ TimeRange toTimeRange() throws IOException { long min = getMin(); long max = getMax(); - // Check for the case where the TimeRangeTracker is fresh. In that case it has - // initial values that are antithetical to a TimeRange... Return an uninitialized TimeRange - // if passed an uninitialized TimeRangeTracker. - if (isFreshInstance()) { - return new TimeRange(); + // Initial TimeRangeTracker timestamps are the opposite of what you want for a TimeRange. Fix! + if (min == INITIAL_MIN_TIMESTAMP) { + min = TimeRange.INITIAL_MIN_TIMESTAMP; + } + if (max == INITIAL_MAX_TIMESTAMP) { + max = TimeRange.INITIAL_MAX_TIMESTAMP; } return new TimeRange(min, max); } -} +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java index 5f77ef7bf2a..5be07f4fb0b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java @@ -21,6 +21,8 @@ package org.apache.hadoop.hbase.regionserver; import static org.apache.hadoop.hbase.regionserver.ScanQueryMatcher.MatchCode.INCLUDE; import static org.apache.hadoop.hbase.regionserver.ScanQueryMatcher.MatchCode.SKIP; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import java.io.IOException; import java.util.ArrayList; @@ -28,24 +30,28 @@ import java.util.List; import java.util.NavigableSet; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HBaseTestCase; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeepDeletedCells; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.KeyValue.Type; -import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.regionserver.ScanQueryMatcher.MatchCode; +import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.junit.Before; +import org.junit.Test; import org.junit.experimental.categories.Category; @Category(SmallTests.class) -public class TestQueryMatcher extends HBaseTestCase { +public class TestQueryMatcher { private static final boolean PRINT = false; + private Configuration conf; private byte[] row1; private byte[] row2; @@ -66,8 +72,9 @@ public class TestQueryMatcher extends HBaseTestCase { KVComparator rowComparator; private Scan scan; + @Before public void setUp() throws Exception { - super.setUp(); + this.conf = HBaseConfiguration.create(); row1 = Bytes.toBytes("row1"); row2 = Bytes.toBytes("row2"); row3 = Bytes.toBytes("row3"); @@ -90,7 +97,25 @@ public class TestQueryMatcher extends HBaseTestCase { this.scan = new Scan(get); rowComparator = KeyValue.COMPARATOR; + } + /** + * This is a cryptic test. It is checking that we don't include a fake cell, one that has a + * timestamp of {@link HConstants#OLDEST_TIMESTAMP}. See HBASE-16074 for background. + * @throws IOException + */ + @Test + public void testNeverIncludeFakeCell() throws IOException { + long now = EnvironmentEdgeManager.currentTime(); + // Do with fam2 which has a col2 qualifier. + ScanQueryMatcher qm = new ScanQueryMatcher(scan, + new ScanInfo(this.conf, fam2, 10, 1, ttl, KeepDeletedCells.FALSE, 0, rowComparator), + get.getFamilyMap().get(fam2), now - ttl, now); + Cell kv = new KeyValue(row1, fam2, col2, 1, data); + Cell cell = KeyValueUtil.createLastOnRowCol(kv); + qm.setRow(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength()); + MatchCode code = qm.match(cell); + assertFalse(code.compareTo(MatchCode.SEEK_NEXT_COL) != 0); } private void _testMatch_ExplicitColumns(Scan scan, List expected) throws IOException { @@ -127,6 +152,7 @@ public class TestQueryMatcher extends HBaseTestCase { } } + @Test public void testMatch_ExplicitColumns() throws IOException { //Moving up from the Tracker by using Gets and List instead @@ -144,6 +170,7 @@ public class TestQueryMatcher extends HBaseTestCase { _testMatch_ExplicitColumns(scan, expected); } + @Test public void testMatch_Wildcard() throws IOException { //Moving up from the Tracker by using Gets and List instead @@ -199,6 +226,7 @@ public class TestQueryMatcher extends HBaseTestCase { * * @throws IOException */ + @Test public void testMatch_ExpiredExplicit() throws IOException { @@ -245,7 +273,6 @@ public class TestQueryMatcher extends HBaseTestCase { } } - /** * Verify that {@link ScanQueryMatcher} only skips expired KeyValue * instances and does not exit early from the row (skipping @@ -254,6 +281,7 @@ public class TestQueryMatcher extends HBaseTestCase { * * @throws IOException */ + @Test public void testMatch_ExpiredWildcard() throws IOException { @@ -299,6 +327,7 @@ public class TestQueryMatcher extends HBaseTestCase { } } + @Test public void testMatch_PartialRangeDropDeletes() throws Exception { // Some ranges. testDropDeletes( @@ -347,5 +376,4 @@ public class TestQueryMatcher extends HBaseTestCase { assertEquals(expected[i], actual.get(i)); } } -} - +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTimeRangeTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTimeRangeTracker.java index b68b1a854a4..377801ccf71 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTimeRangeTracker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTimeRangeTracker.java @@ -25,11 +25,48 @@ import java.io.IOException; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Writables; import org.junit.Test; import org.junit.experimental.categories.Category; @Category({SmallTests.class}) public class TestTimeRangeTracker { + @Test + public void testExtreme() { + TimeRange tr = new TimeRange(); + assertTrue(tr.includesTimeRange(new TimeRange())); + TimeRangeTracker trt = new TimeRangeTracker(); + assertFalse(trt.includesTimeRange(new TimeRange())); + trt.includeTimestamp(1); + trt.includeTimestamp(10); + assertTrue(trt.includesTimeRange(new TimeRange())); + } + + @Test + public void testTimeRangeInitialized() { + TimeRangeTracker src = new TimeRangeTracker(); + TimeRange tr = new TimeRange(System.currentTimeMillis()); + assertFalse(src.includesTimeRange(tr)); + } + + @Test + public void testTimeRangeTrackerNullIsSameAsTimeRangeNull() throws IOException { + TimeRangeTracker src = new TimeRangeTracker(1, 2); + byte [] bytes = Writables.getBytes(src); + TimeRange tgt = TimeRangeTracker.getTimeRange(bytes); + assertEquals(src.getMin(), tgt.getMin()); + assertEquals(src.getMax(), tgt.getMax()); + } + + @Test + public void testSerialization() throws IOException { + TimeRangeTracker src = new TimeRangeTracker(1, 2); + TimeRangeTracker tgt = new TimeRangeTracker(); + Writables.copyWritable(src, tgt); + assertEquals(src.getMin(), tgt.getMin()); + assertEquals(src.getMax(), tgt.getMax()); + } + @Test public void testAlwaysDecrementingSetsMaximum() { TimeRangeTracker trr = new TimeRangeTracker();