diff --git a/CHANGES.txt b/CHANGES.txt index 925f574151c..1057f23ff2e 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -185,6 +185,7 @@ Trunk (unreleased changes) (Izaak Rubin via Stack) HBASE-744 BloomFilter serialization/deserialization broken HBASE-742 Column length limit is not enforced (Jean-Daniel Cryans via Stack) + HBASE-737 Scanner: every cell in a row has the same timestamp IMPROVEMENTS HBASE-559 MR example job to count table rows diff --git a/src/java/org/apache/hadoop/hbase/HMerge.java b/src/java/org/apache/hadoop/hbase/HMerge.java index e41ddebd167..f43b35e1fc5 100644 --- a/src/java/org/apache/hadoop/hbase/HMerge.java +++ b/src/java/org/apache/hadoop/hbase/HMerge.java @@ -330,11 +330,11 @@ class HMerge implements HConstants { try { HStoreKey key = new HStoreKey(); - TreeMap results = - new TreeMap(Bytes.BYTES_COMPARATOR); + TreeMap results = + new TreeMap(Bytes.BYTES_COMPARATOR); while(rootScanner.next(key, results)) { - for(byte [] b: results.values()) { - HRegionInfo info = Writables.getHRegionInfoOrNull(b); + for(Cell c: results.values()) { + HRegionInfo info = Writables.getHRegionInfoOrNull(c.getValue()); if (info != null) { metaRegions.add(info); } diff --git a/src/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java b/src/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java index c05d9d59dad..2519a07c4fd 100644 --- a/src/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java +++ b/src/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java @@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.client; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; class UnmodifyableHRegionInfo extends HRegionInfo { /* diff --git a/src/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java b/src/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java index d634c757cde..64f95ad5359 100644 --- a/src/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java +++ b/src/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java @@ -25,7 +25,6 @@ import org.apache.hadoop.hbase.HTableDescriptor; /** * Read-only table descriptor. - * Returned out of {@link HTable.getTableDescriptor}. */ public class UnmodifyableHTableDescriptor extends HTableDescriptor { diff --git a/src/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java b/src/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java index e59122bf787..42033a6f6e9 100644 --- a/src/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java +++ b/src/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java @@ -27,6 +27,7 @@ import java.util.SortedMap; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.io.Cell; import org.apache.hadoop.io.ObjectWritable; /** @@ -124,7 +125,7 @@ public class ColumnValueFilter implements RowFilterInterface { } /** {@inheritDoc} */ - public boolean filterRow(final SortedMap columns) { + public boolean filterRow(final SortedMap columns) { // Don't let rows through if they don't have the column we are checking return !columns.containsKey(columnName); } diff --git a/src/java/org/apache/hadoop/hbase/filter/PageRowFilter.java b/src/java/org/apache/hadoop/hbase/filter/PageRowFilter.java index e5c7f7d78fb..3d95c8b1d3c 100644 --- a/src/java/org/apache/hadoop/hbase/filter/PageRowFilter.java +++ b/src/java/org/apache/hadoop/hbase/filter/PageRowFilter.java @@ -24,6 +24,7 @@ import java.io.DataOutput; import java.io.IOException; import java.util.SortedMap; +import org.apache.hadoop.hbase.io.Cell; /** * Implementation of RowFilterInterface that limits results to a specific page @@ -123,7 +124,7 @@ public class PageRowFilter implements RowFilterInterface { * {@inheritDoc} */ public boolean filterRow(@SuppressWarnings("unused") - final SortedMap columns) { + final SortedMap columns) { return filterAllRemaining(); } diff --git a/src/java/org/apache/hadoop/hbase/filter/RegExpRowFilter.java b/src/java/org/apache/hadoop/hbase/filter/RegExpRowFilter.java index 144c14071bb..14335314ed7 100644 --- a/src/java/org/apache/hadoop/hbase/filter/RegExpRowFilter.java +++ b/src/java/org/apache/hadoop/hbase/filter/RegExpRowFilter.java @@ -32,6 +32,7 @@ import java.util.Map.Entry; import java.util.regex.Pattern; import org.apache.hadoop.hbase.regionserver.HLogEdit; +import org.apache.hadoop.hbase.io.Cell; import org.apache.hadoop.hbase.util.Bytes; /** @@ -80,7 +81,7 @@ public class RegExpRowFilter implements RowFilterInterface { */ @Deprecated public RegExpRowFilter(final String rowKeyRegExp, - final Map columnFilter) { + final Map columnFilter) { this.rowKeyRegExp = rowKeyRegExp; this.setColumnFilters(columnFilter); } @@ -122,13 +123,13 @@ public class RegExpRowFilter implements RowFilterInterface { * Map of columns with value criteria. */ @Deprecated - public void setColumnFilters(final Map columnFilter) { + public void setColumnFilters(final Map columnFilter) { if (null == columnFilter) { nullColumns.clear(); equalsMap.clear(); } else { - for (Entry entry : columnFilter.entrySet()) { - setColumnFilter(entry.getKey(), entry.getValue()); + for (Entry entry : columnFilter.entrySet()) { + setColumnFilter(entry.getKey(), entry.getValue().getValue()); } } } @@ -186,10 +187,10 @@ public class RegExpRowFilter implements RowFilterInterface { * * {@inheritDoc} */ - public boolean filterRow(final SortedMap columns) { - for (Entry col : columns.entrySet()) { + public boolean filterRow(final SortedMap columns) { + for (Entry col : columns.entrySet()) { if (nullColumns.contains(col.getKey()) - && !HLogEdit.isDeleted(col.getValue())) { + && !HLogEdit.isDeleted(col.getValue().getValue())) { return true; } } diff --git a/src/java/org/apache/hadoop/hbase/filter/RowFilterInterface.java b/src/java/org/apache/hadoop/hbase/filter/RowFilterInterface.java index 416b0b35cdd..bff336c80db 100644 --- a/src/java/org/apache/hadoop/hbase/filter/RowFilterInterface.java +++ b/src/java/org/apache/hadoop/hbase/filter/RowFilterInterface.java @@ -23,6 +23,8 @@ import java.util.SortedMap; import org.apache.hadoop.io.Writable; +import org.apache.hadoop.hbase.io.Cell; + /** * * Interface used for row-level filters applied to HRegion.HScanner scan @@ -98,7 +100,7 @@ public interface RowFilterInterface extends Writable { * @param columns * @return true if row filtered and should not be processed. */ - boolean filterRow(final SortedMap columns); + boolean filterRow(final SortedMap columns); /** * Validates that this filter applies only to a subset of the given columns. diff --git a/src/java/org/apache/hadoop/hbase/filter/RowFilterSet.java b/src/java/org/apache/hadoop/hbase/filter/RowFilterSet.java index b1fac166f07..f568d2d909b 100644 --- a/src/java/org/apache/hadoop/hbase/filter/RowFilterSet.java +++ b/src/java/org/apache/hadoop/hbase/filter/RowFilterSet.java @@ -28,6 +28,7 @@ import java.util.SortedMap; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.io.Cell; import org.apache.hadoop.io.ObjectWritable; /** @@ -179,7 +180,7 @@ public class RowFilterSet implements RowFilterInterface { } /** {@inheritDoc} */ - public boolean filterRow(final SortedMap columns) { + public boolean filterRow(final SortedMap columns) { boolean resultFound = false; boolean result = operator == Operator.MUST_PASS_ONE; for (RowFilterInterface filter : filters) { diff --git a/src/java/org/apache/hadoop/hbase/filter/StopRowFilter.java b/src/java/org/apache/hadoop/hbase/filter/StopRowFilter.java index ec0b9a3a8ad..b2bb653141b 100644 --- a/src/java/org/apache/hadoop/hbase/filter/StopRowFilter.java +++ b/src/java/org/apache/hadoop/hbase/filter/StopRowFilter.java @@ -24,6 +24,7 @@ import java.io.DataOutput; import java.io.IOException; import java.util.SortedMap; +import org.apache.hadoop.hbase.io.Cell; import org.apache.hadoop.hbase.util.Bytes; /** @@ -123,7 +124,7 @@ public class StopRowFilter implements RowFilterInterface { * @param columns */ public boolean filterRow(@SuppressWarnings("unused") - final SortedMap columns) { + final SortedMap columns) { return filterAllRemaining(); } diff --git a/src/java/org/apache/hadoop/hbase/filter/WhileMatchRowFilter.java b/src/java/org/apache/hadoop/hbase/filter/WhileMatchRowFilter.java index 64419bd76a9..bb9c90ec840 100644 --- a/src/java/org/apache/hadoop/hbase/filter/WhileMatchRowFilter.java +++ b/src/java/org/apache/hadoop/hbase/filter/WhileMatchRowFilter.java @@ -24,6 +24,7 @@ import java.io.DataOutput; import java.io.IOException; import java.util.SortedMap; +import org.apache.hadoop.hbase.io.Cell; /** * WhileMatchRowFilter is a wrapper filter that filters everything after the @@ -98,7 +99,7 @@ public class WhileMatchRowFilter implements RowFilterInterface { } /** {@inheritDoc} */ - public boolean filterRow(final SortedMap columns) { + public boolean filterRow(final SortedMap columns) { changeFAR(this.filter.filterRow(columns)); return filterAllRemaining(); } diff --git a/src/java/org/apache/hadoop/hbase/regionserver/Flusher.java b/src/java/org/apache/hadoop/hbase/regionserver/Flusher.java index 29879e32287..8d6895b936d 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/Flusher.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/Flusher.java @@ -27,8 +27,6 @@ import java.util.concurrent.TimeUnit; import java.util.HashSet; import java.util.Set; import java.util.SortedMap; -import java.util.TreeMap; -import java.util.Comparator; import java.util.ConcurrentModificationException; import org.apache.commons.logging.Log; @@ -169,8 +167,9 @@ class Flusher extends Thread implements FlushRequester { } lock.lock(); try { - // See javadoc comment above for removeFromQueue on why we do not - // compact if removeFromQueue is true. + // See comment above for removeFromQueue on why we do not + // compact if removeFromQueue is true. Note that region.flushCache() + // only returns true if a flush is done and if a compaction is needed. if (region.flushcache() && !removeFromQueue) { server.compactSplitThread.compactionRequested(region); } diff --git a/src/java/org/apache/hadoop/hbase/regionserver/HAbstractScanner.java b/src/java/org/apache/hadoop/hbase/regionserver/HAbstractScanner.java index 213f969d8f8..e1bbf6ae33c 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/HAbstractScanner.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/HAbstractScanner.java @@ -29,6 +29,7 @@ import java.util.regex.Pattern; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HStoreKey; +import org.apache.hadoop.hbase.io.Cell; import org.apache.hadoop.hbase.util.Bytes; /** @@ -182,8 +183,7 @@ public abstract class HAbstractScanner implements InternalScanner { } /** {@inheritDoc} */ - public abstract boolean next(HStoreKey key, - SortedMap results) + public abstract boolean next(HStoreKey key, SortedMap results) throws IOException; } diff --git a/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 8a53aac8e7f..1816806aefd 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -977,7 +977,7 @@ public class HRegion implements HConstants { * *

This method may block for some time. * - * @return true if the cache was flushed + * @return true if the region needs compacting * * @throws IOException * @throws DroppedSnapshotException Thrown when replay of hlog is required @@ -1021,13 +1021,16 @@ public class HRegion implements HConstants { // restart so hlog content can be replayed and put back into the memcache. // Otherwise, the snapshot content while backed up in the hlog, it will not // be part of the current running servers state. - long flushed = 0; + boolean compactionRequested = false; try { // A. Flush memcache to all the HStores. // Keep running vector of all store files that includes both old and the // just-made new flush store file. for (HStore hstore: stores.values()) { - flushed += hstore.flushCache(sequenceId); + boolean needsCompaction = hstore.flushCache(sequenceId); + if (needsCompaction) { + compactionRequested = true; + } } } catch (Throwable t) { // An exception here means that the snapshot was not persisted. @@ -1037,7 +1040,8 @@ public class HRegion implements HConstants { // exceptions -- e.g. HBASE-659 was about an NPE -- so now we catch // all and sundry. this.log.abortCacheFlush(); - DroppedSnapshotException dse = new DroppedSnapshotException(); + DroppedSnapshotException dse = new DroppedSnapshotException("region: " + + Bytes.toString(getRegionName())); dse.initCause(t); throw dse; } @@ -1064,13 +1068,12 @@ public class HRegion implements HConstants { LOG.debug("Finished memcache flush for region " + this + " in " + (System.currentTimeMillis() - startTime) + "ms, sequence id=" + - sequenceId + ", " + - StringUtils.humanReadableInt(flushed)); + sequenceId + ", compaction requested=" + compactionRequested); if (!regionInfo.isMetaRegion()) { this.historian.addRegionFlush(regionInfo, timeTaken); } } - return true; + return compactionRequested; } ////////////////////////////////////////////////////////////////////////////// @@ -1733,7 +1736,7 @@ public class HRegion implements HConstants { */ private class HScanner implements InternalScanner { private InternalScanner[] scanners; - private TreeMap[] resultSets; + private TreeMap[] resultSets; private HStoreKey[] keys; private RowFilterInterface filter; @@ -1782,7 +1785,7 @@ public class HRegion implements HConstants { this.keys = new HStoreKey[scanners.length]; for (int i = 0; i < scanners.length; i++) { keys[i] = new HStoreKey(); - resultSets[i] = new TreeMap(Bytes.BYTES_COMPARATOR); + resultSets[i] = new TreeMap(Bytes.BYTES_COMPARATOR); if(scanners[i] != null && !scanners[i].next(keys[i], resultSets[i])) { closeScanner(i); } @@ -1795,7 +1798,7 @@ public class HRegion implements HConstants { /** {@inheritDoc} */ @SuppressWarnings("null") - public boolean next(HStoreKey key, SortedMap results) + public boolean next(HStoreKey key, SortedMap results) throws IOException { boolean moreToFollow = false; boolean filtered = false; @@ -1830,7 +1833,7 @@ public class HRegion implements HConstants { // but this had the effect of overwriting newer // values with older ones. So now we only insert // a result if the map does not contain the key. - for (Map.Entry e : resultSets[i].entrySet()) { + for (Map.Entry e : resultSets[i].entrySet()) { if (!results.containsKey(e.getKey())) { results.put(e.getKey(), e.getValue()); } diff --git a/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 8a6a7506730..aae18333526 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1120,12 +1120,10 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable { HbaseMapWritable values = new HbaseMapWritable(); HStoreKey key = new HStoreKey(); - TreeMap results = - new TreeMap(Bytes.BYTES_COMPARATOR); + TreeMap results = + new TreeMap(Bytes.BYTES_COMPARATOR); while (s.next(key, results)) { - for (Map.Entry e: results.entrySet()) { - values.put(e.getKey(), new Cell(e.getValue(), key.getTimestamp())); - } + values.putAll(results); if (values.size() > 0) { // Row has something in it. Return the value. break; diff --git a/src/java/org/apache/hadoop/hbase/regionserver/HStore.java b/src/java/org/apache/hadoop/hbase/regionserver/HStore.java index 8f206e6a70a..f424d891d40 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -90,7 +90,7 @@ public class HStore implements HConstants { private final Integer flushLock = new Integer(0); - private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); + final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); final byte [] storeName; private final String storeNameStr; @@ -550,27 +550,27 @@ public class HStore implements HConstants { * Write out current snapshot. Presumes {@link #snapshot()} has been called * previously. * @param logCacheFlushId flush sequence number - * @return count of bytes flushed + * @return true if a compaction is needed * @throws IOException */ - long flushCache(final long logCacheFlushId) throws IOException { + boolean flushCache(final long logCacheFlushId) throws IOException { // Get the snapshot to flush. Presumes that a call to // this.memcache.snapshot() has happened earlier up in the chain. SortedMap cache = this.memcache.getSnapshot(); - long flushed = internalFlushCache(cache, logCacheFlushId); + boolean compactionNeeded = internalFlushCache(cache, logCacheFlushId); // If an exception happens flushing, we let it out without clearing // the memcache snapshot. The old snapshot will be returned when we say // 'snapshot', the next time flush comes around. this.memcache.clearSnapshot(cache); - return flushed; + return compactionNeeded; } - private long internalFlushCache(SortedMap cache, + private boolean internalFlushCache(SortedMap cache, long logCacheFlushId) throws IOException { long flushed = 0; // Don't flush if there are no entries. if (cache.size() == 0) { - return flushed; + return false; } // TODO: We can fail in the below block before we complete adding this @@ -634,7 +634,7 @@ public class HStore implements HConstants { StringUtils.humanReadableInt(newStoreSize)); } } - return flushed; + return storefiles.size() >= compactionThreshold; } /* @@ -744,8 +744,8 @@ public class HStore implements HConstants { List readers = new ArrayList(); for (HStoreFile file: filesToCompact) { try { - HStoreFile.BloomFilterMapFile.Reader reader = file.getReader(fs, - this.family.isBloomFilterEnabled(), false); + HStoreFile.BloomFilterMapFile.Reader reader = + file.getReader(fs, false, false); readers.add(reader); // Compute the size of the new bloomfilter if needed diff --git a/src/java/org/apache/hadoop/hbase/regionserver/HStoreScanner.java b/src/java/org/apache/hadoop/hbase/regionserver/HStoreScanner.java index 6b20725935c..3f23ccfc605 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/HStoreScanner.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/HStoreScanner.java @@ -22,7 +22,6 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; import java.util.ArrayList; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.SortedMap; @@ -33,6 +32,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HStoreKey; import org.apache.hadoop.hbase.filter.RowFilterInterface; +import org.apache.hadoop.hbase.io.Cell; import org.apache.hadoop.hbase.util.Bytes; /** @@ -42,7 +42,7 @@ class HStoreScanner implements InternalScanner { static final Log LOG = LogFactory.getLog(HStoreScanner.class); private InternalScanner[] scanners; - private TreeMap[] resultSets; + private TreeMap[] resultSets; private HStoreKey[] keys; private boolean wildcardMatch = false; private boolean multipleMatchers = false; @@ -87,7 +87,7 @@ class HStoreScanner implements InternalScanner { // All results will match the required column-set and scanTime. for (int i = 0; i < scanners.length; i++) { keys[i] = new HStoreKey(); - resultSets[i] = new TreeMap(Bytes.BYTES_COMPARATOR); + resultSets[i] = new TreeMap(Bytes.BYTES_COMPARATOR); if(scanners[i] != null && !scanners[i].next(keys[i], resultSets[i])) { closeScanner(i); } @@ -105,7 +105,7 @@ class HStoreScanner implements InternalScanner { } /** {@inheritDoc} */ - public boolean next(HStoreKey key, SortedMap results) + public boolean next(HStoreKey key, SortedMap results) throws IOException { // Filtered flag is set by filters. If a cell has been 'filtered out' @@ -166,9 +166,9 @@ class HStoreScanner implements InternalScanner { // a result if the map does not contain the key. HStoreKey hsk = new HStoreKey(key.getRow(), HConstants.EMPTY_BYTE_ARRAY, key.getTimestamp()); - for (Map.Entry e : resultSets[i].entrySet()) { + for (Map.Entry e : resultSets[i].entrySet()) { hsk.setColumn(e.getKey()); - if (HLogEdit.isDeleted(e.getValue())) { + if (HLogEdit.isDeleted(e.getValue().getValue())) { if (!deletes.contains(hsk)) { // Key changes as we cycle the for loop so add a copy to // the set of deletes. @@ -180,8 +180,8 @@ class HStoreScanner implements InternalScanner { !results.containsKey(e.getKey())) { if (dataFilter != null) { // Filter whole row by column data? - filtered = - dataFilter.filterColumn(chosenRow, e.getKey(), e.getValue()); + filtered = dataFilter.filterColumn(chosenRow, e.getKey(), + e.getValue().getValue()); if (filtered) { results.clear(); break; @@ -265,9 +265,4 @@ class HStoreScanner implements InternalScanner { } } } - - public Iterator>> iterator() { - throw new UnsupportedOperationException("Unimplemented serverside. " + - "next(HStoreKey, StortedMap(...) is more efficient"); - } } diff --git a/src/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java b/src/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java index 360bd27f13a..8275f634249 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java @@ -23,6 +23,7 @@ import java.io.Closeable; import java.io.IOException; import java.util.SortedMap; import org.apache.hadoop.hbase.HStoreKey; +import org.apache.hadoop.hbase.io.Cell; /** * Internal scanners differ from client-side scanners in that they operate on @@ -49,7 +50,7 @@ public interface InternalScanner extends Closeable { * @return true if data was returned * @throws IOException */ - public boolean next(HStoreKey key, SortedMap results) + public boolean next(HStoreKey key, SortedMap results) throws IOException; /** diff --git a/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java b/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java index 2f6a07d688a..a289cad5969 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java @@ -685,7 +685,7 @@ class Memcache { /** {@inheritDoc} */ @Override - public boolean next(HStoreKey key, SortedMap results) + public boolean next(HStoreKey key, SortedMap results) throws IOException { if (this.scannerClosed) { return false; @@ -735,7 +735,7 @@ class Memcache { c.getTimestamp() > latestTimestamp) { latestTimestamp = c.getTimestamp(); } - results.put(column, c.getValue()); + results.put(column, c); } this.currentRow = getNextRow(this.currentRow); diff --git a/src/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java b/src/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java index 597c3dbe27d..f8f69ef5de3 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java @@ -27,6 +27,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HStoreKey; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; +import org.apache.hadoop.hbase.io.Cell; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.MapFile; @@ -36,12 +37,12 @@ import org.apache.hadoop.io.MapFile; class StoreFileScanner extends HAbstractScanner implements ChangedReadersObserver { // Keys retrieved from the sources - private HStoreKey keys[]; + private volatile HStoreKey keys[]; // Values that correspond to those keys - private byte [][] vals; + private volatile byte [][] vals; // Readers we go against. - private MapFile.Reader[] readers; + private volatile MapFile.Reader[] readers; // Store this scanner came out of. private final HStore store; @@ -62,6 +63,7 @@ implements ChangedReadersObserver { super(timestamp, targetCols); this.store = store; this.store.addChangedReaderObserver(this); + this.store.lock.readLock().lock(); try { openReaders(firstRow); } catch (Exception ex) { @@ -69,6 +71,8 @@ implements ChangedReadersObserver { IOException e = new IOException("HStoreScanner failed construction"); e.initCause(ex); throw e; + } finally { + this.store.lock.readLock().unlock(); } } @@ -92,8 +96,7 @@ implements ChangedReadersObserver { // Most recent map file should be first int i = readers.length - 1; for(HStoreFile curHSF: store.getStorefiles().values()) { - readers[i--] = curHSF.getReader(store.fs, - store.getFamily().isBloomFilterEnabled(), false); + readers[i--] = curHSF.getReader(store.fs, false, false); } this.keys = new HStoreKey[readers.length]; @@ -140,7 +143,7 @@ implements ChangedReadersObserver { * @see org.apache.hadoop.hbase.regionserver.InternalScanner#next(org.apache.hadoop.hbase.HStoreKey, java.util.SortedMap) */ @Override - public boolean next(HStoreKey key, SortedMap results) + public boolean next(HStoreKey key, SortedMap results) throws IOException { if (this.scannerClosed) { return false; @@ -173,7 +176,8 @@ implements ChangedReadersObserver { if(columnMatch(i)) { // We only want the first result for any specific family member if(!results.containsKey(keys[i].getColumn())) { - results.put(keys[i].getColumn(), vals[i]); + results.put(keys[i].getColumn(), + new Cell(vals[i], keys[i].getTimestamp())); insertedItem = true; } } diff --git a/src/java/org/apache/hadoop/hbase/util/MetaUtils.java b/src/java/org/apache/hadoop/hbase/util/MetaUtils.java index 309f065ec50..c6f8458c465 100644 --- a/src/java/org/apache/hadoop/hbase/util/MetaUtils.java +++ b/src/java/org/apache/hadoop/hbase/util/MetaUtils.java @@ -201,11 +201,11 @@ public class MetaUtils { try { HStoreKey key = new HStoreKey(); - SortedMap results = - new TreeMap(Bytes.BYTES_COMPARATOR); + SortedMap results = + new TreeMap(Bytes.BYTES_COMPARATOR); while (rootScanner.next(key, results)) { HRegionInfo info = Writables.getHRegionInfoOrNull( - results.get(HConstants.COL_REGIONINFO)); + results.get(HConstants.COL_REGIONINFO).getValue()); if (info == null) { LOG.warn("region info is null for row " + key.getRow() + " in table " + HConstants.ROOT_TABLE_NAME); @@ -253,11 +253,11 @@ public class MetaUtils { HConstants.EMPTY_START_ROW, HConstants.LATEST_TIMESTAMP, null); try { HStoreKey key = new HStoreKey(); - SortedMap results = - new TreeMap(Bytes.BYTES_COMPARATOR); + SortedMap results = + new TreeMap(Bytes.BYTES_COMPARATOR); while (metaScanner.next(key, results)) { - HRegionInfo info = - Writables.getHRegionInfoOrNull(results.get(HConstants.COL_REGIONINFO)); + HRegionInfo info = Writables.getHRegionInfoOrNull( + results.get(HConstants.COL_REGIONINFO).getValue()); if (info == null) { LOG.warn("regioninfo null for row " + key.getRow() + " in table " + Bytes.toString(m.getTableDesc().getName())); diff --git a/src/test/org/apache/hadoop/hbase/HBaseTestCase.java b/src/test/org/apache/hadoop/hbase/HBaseTestCase.java index f1cf48c88d6..af984a5d492 100644 --- a/src/test/org/apache/hadoop/hbase/HBaseTestCase.java +++ b/src/test/org/apache/hadoop/hbase/HBaseTestCase.java @@ -509,8 +509,8 @@ public abstract class HBaseTestCase extends TestCase { } public interface ScannerIncommon - extends Iterable>> { - public boolean next(HStoreKey key, SortedMap values) + extends Iterable>> { + public boolean next(HStoreKey key, SortedMap values) throws IOException; public void close() throws IOException; @@ -522,7 +522,7 @@ public abstract class HBaseTestCase extends TestCase { this.scanner = scanner; } - public boolean next(HStoreKey key, SortedMap values) + public boolean next(HStoreKey key, SortedMap values) throws IOException { RowResult results = scanner.next(); if (results == null) { @@ -531,7 +531,7 @@ public abstract class HBaseTestCase extends TestCase { key.setRow(results.getRow()); values.clear(); for (Map.Entry entry : results.entrySet()) { - values.put(entry.getKey(), entry.getValue().getValue()); + values.put(entry.getKey(), entry.getValue()); } return true; } @@ -552,7 +552,7 @@ public abstract class HBaseTestCase extends TestCase { this.scanner = scanner; } - public boolean next(HStoreKey key, SortedMap values) + public boolean next(HStoreKey key, SortedMap values) throws IOException { return scanner.next(key, values); } diff --git a/src/test/org/apache/hadoop/hbase/TestBloomFilters.java b/src/test/org/apache/hadoop/hbase/TestBloomFilters.java deleted file mode 100644 index aea8d8c8808..00000000000 --- a/src/test/org/apache/hadoop/hbase/TestBloomFilters.java +++ /dev/null @@ -1,205 +0,0 @@ -/** - * Copyright 2007 The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import java.io.IOException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.io.Cell; -import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.HBaseAdmin; - -import org.apache.hadoop.hbase.io.BatchUpdate; -import org.apache.hadoop.hbase.util.Bytes; - -/** Tests per-column bloom filters */ -public class TestBloomFilters extends HBaseClusterTestCase { - static final Log LOG = LogFactory.getLog(TestBloomFilters.class); - - private static final byte [] CONTENTS = Bytes.toBytes("contents:"); - - private static final byte [][] rows = { - Bytes.toBytes("wmjwjzyv"), - Bytes.toBytes("baietibz"), - Bytes.toBytes("guhsgxnv"), - Bytes.toBytes("mhnqycto"), - Bytes.toBytes("xcyqafgz"), - Bytes.toBytes("zidoamgb"), - Bytes.toBytes("tftfirzd"), - Bytes.toBytes("okapqlrg"), - Bytes.toBytes("yccwzwsq"), - Bytes.toBytes("qmonufqu"), - Bytes.toBytes("wlsctews"), - Bytes.toBytes("mksdhqri"), - Bytes.toBytes("wxxllokj"), - Bytes.toBytes("eviuqpls"), - Bytes.toBytes("bavotqmj"), - Bytes.toBytes("yibqzhdl"), - Bytes.toBytes("csfqmsyr"), - Bytes.toBytes("guxliyuh"), - Bytes.toBytes("pzicietj"), - Bytes.toBytes("qdwgrqwo"), - Bytes.toBytes("ujfzecmi"), - Bytes.toBytes("dzeqfvfi"), - Bytes.toBytes("phoegsij"), - Bytes.toBytes("bvudfcou"), - Bytes.toBytes("dowzmciz"), - Bytes.toBytes("etvhkizp"), - Bytes.toBytes("rzurqycg"), - Bytes.toBytes("krqfxuge"), - Bytes.toBytes("gflcohtd"), - Bytes.toBytes("fcrcxtps"), - Bytes.toBytes("qrtovxdq"), - Bytes.toBytes("aypxwrwi"), - Bytes.toBytes("dckpyznr"), - Bytes.toBytes("mdaawnpz"), - Bytes.toBytes("pakdfvca"), - Bytes.toBytes("xjglfbez"), - Bytes.toBytes("xdsecofi"), - Bytes.toBytes("sjlrfcab"), - Bytes.toBytes("ebcjawxv"), - Bytes.toBytes("hkafkjmy"), - Bytes.toBytes("oimmwaxo"), - Bytes.toBytes("qcuzrazo"), - Bytes.toBytes("nqydfkwk"), - Bytes.toBytes("frybvmlb"), - Bytes.toBytes("amxmaqws"), - Bytes.toBytes("gtkovkgx"), - Bytes.toBytes("vgwxrwss"), - Bytes.toBytes("xrhzmcep"), - Bytes.toBytes("tafwziil"), - Bytes.toBytes("erjmncnv"), - Bytes.toBytes("heyzqzrn"), - Bytes.toBytes("sowvyhtu"), - Bytes.toBytes("heeixgzy"), - Bytes.toBytes("ktcahcob"), - Bytes.toBytes("ljhbybgg"), - Bytes.toBytes("jiqfcksl"), - Bytes.toBytes("anjdkjhm"), - Bytes.toBytes("uzcgcuxp"), - Bytes.toBytes("vzdhjqla"), - Bytes.toBytes("svhgwwzq"), - Bytes.toBytes("zhswvhbp"), - Bytes.toBytes("ueceybwy"), - Bytes.toBytes("czkqykcw"), - Bytes.toBytes("ctisayir"), - Bytes.toBytes("hppbgciu"), - Bytes.toBytes("nhzgljfk"), - Bytes.toBytes("vaziqllf"), - Bytes.toBytes("narvrrij"), - Bytes.toBytes("kcevbbqi"), - Bytes.toBytes("qymuaqnp"), - Bytes.toBytes("pwqpfhsr"), - Bytes.toBytes("peyeicuk"), - Bytes.toBytes("kudlwihi"), - Bytes.toBytes("pkmqejlm"), - Bytes.toBytes("ylwzjftl"), - Bytes.toBytes("rhqrlqar"), - Bytes.toBytes("xmftvzsp"), - Bytes.toBytes("iaemtihk"), - Bytes.toBytes("ymsbrqcu"), - Bytes.toBytes("yfnlcxto"), - Bytes.toBytes("nluqopqh"), - Bytes.toBytes("wmrzhtox"), - Bytes.toBytes("qnffhqbl"), - Bytes.toBytes("zypqpnbw"), - Bytes.toBytes("oiokhatd"), - Bytes.toBytes("mdraddiu"), - Bytes.toBytes("zqoatltt"), - Bytes.toBytes("ewhulbtm"), - Bytes.toBytes("nmswpsdf"), - Bytes.toBytes("xsjeteqe"), - Bytes.toBytes("ufubcbma"), - Bytes.toBytes("phyxvrds"), - Bytes.toBytes("vhnfldap"), - Bytes.toBytes("zrrlycmg"), - Bytes.toBytes("becotcjx"), - Bytes.toBytes("wvbubokn"), - Bytes.toBytes("avkgiopr"), - Bytes.toBytes("mbqqxmrv"), - Bytes.toBytes("ibplgvuu"), - Bytes.toBytes("dghvpkgc") - }; - - private static final byte [][] testKeys = { - Bytes.toBytes("abcdefgh"), - Bytes.toBytes("ijklmnop"), - Bytes.toBytes("qrstuvwx"), - Bytes.toBytes("yzabcdef") - }; - - /** - * Test that uses automatic bloom filter - * @throws IOException - */ - public void testComputedParameters() throws IOException { - HTable table = null; - - // Setup - - HTableDescriptor desc = new HTableDescriptor(getName()); - desc.addFamily( - new HColumnDescriptor(CONTENTS, // Column name - 1, // Max versions - HColumnDescriptor.CompressionType.NONE, // no compression - HColumnDescriptor.DEFAULT_IN_MEMORY, // not in memory - HColumnDescriptor.DEFAULT_BLOCKCACHE, - HColumnDescriptor.DEFAULT_LENGTH, - HColumnDescriptor.DEFAULT_TTL, - true - ) - ); - - // Create the table - - HBaseAdmin admin = new HBaseAdmin(conf); - admin.createTable(desc); - - // Open table - - table = new HTable(conf, desc.getName()); - - // Store some values - - for(int i = 0; i < 100; i++) { - byte [] row = rows[i]; - String value = row.toString(); - BatchUpdate b = new BatchUpdate(row); - b.put(CONTENTS, value.getBytes(HConstants.UTF8_ENCODING)); - table.commit(b); - } - try { - // Give cache flusher and log roller a chance to run - // Otherwise we'll never hit the bloom filter, just the memcache - Thread.sleep(conf.getLong(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000) * 2); - - } catch (InterruptedException e) { - // ignore - } - - for(int i = 0; i < testKeys.length; i++) { - Cell value = table.get(testKeys[i], CONTENTS); - if(value != null && value.getValue().length != 0) { - LOG.info("non existant key: " + testKeys[i] + " returned value: " + - new String(value.getValue(), HConstants.UTF8_ENCODING)); - } - } - } -} diff --git a/src/test/org/apache/hadoop/hbase/TestScannerAPI.java b/src/test/org/apache/hadoop/hbase/TestScannerAPI.java index af470f8602a..c0536b9f504 100644 --- a/src/test/org/apache/hadoop/hbase/TestScannerAPI.java +++ b/src/test/org/apache/hadoop/hbase/TestScannerAPI.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Scanner; import org.apache.hadoop.hbase.io.BatchUpdate; +import org.apache.hadoop.hbase.io.Cell; import org.apache.hadoop.hbase.io.RowResult; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.util.Bytes; @@ -44,8 +45,8 @@ public class TestScannerAPI extends HBaseClusterTestCase { }); private final byte [] startRow = Bytes.toBytes("0"); - private final TreeMap> values = - new TreeMap>(Bytes.BYTES_COMPARATOR); + private final TreeMap> values = + new TreeMap>(Bytes.BYTES_COMPARATOR); /** * @throws Exception @@ -53,13 +54,16 @@ public class TestScannerAPI extends HBaseClusterTestCase { public TestScannerAPI() throws Exception { super(); try { - TreeMap columns = - new TreeMap(Bytes.BYTES_COMPARATOR); - columns.put(Bytes.toBytes("a:1"), Bytes.toBytes("1")); + TreeMap columns = + new TreeMap(Bytes.BYTES_COMPARATOR); + columns.put(Bytes.toBytes("a:1"), + new Cell(Bytes.toBytes("1"), HConstants.LATEST_TIMESTAMP)); values.put(Bytes.toBytes("1"), columns); - columns = new TreeMap(Bytes.BYTES_COMPARATOR); - columns.put(Bytes.toBytes("a:2"), Bytes.toBytes("2")); - columns.put(Bytes.toBytes("b:2"), Bytes.toBytes("2")); + columns = new TreeMap(Bytes.BYTES_COMPARATOR); + columns.put(Bytes.toBytes("a:2"), + new Cell(Bytes.toBytes("2"), HConstants.LATEST_TIMESTAMP)); + columns.put(Bytes.toBytes("b:2"), + new Cell(Bytes.toBytes("2"), HConstants.LATEST_TIMESTAMP)); } catch (Exception e) { e.printStackTrace(); throw e; @@ -85,10 +89,10 @@ public class TestScannerAPI extends HBaseClusterTestCase { HTable table = new HTable(conf, getName()); - for (Map.Entry> row: values.entrySet()) { + for (Map.Entry> row: values.entrySet()) { BatchUpdate b = new BatchUpdate(row.getKey()); - for (Map.Entry val: row.getValue().entrySet()) { - b.put(val.getKey(), val.getValue()); + for (Map.Entry val: row.getValue().entrySet()) { + b.put(val.getKey(), val.getValue().getValue()); } table.commit(b); } @@ -128,12 +132,12 @@ public class TestScannerAPI extends HBaseClusterTestCase { for (RowResult r : scanner2) { assertTrue("row key", values.containsKey(r.getRow())); - SortedMap columnValues = values.get(r.getRow()); + SortedMap columnValues = values.get(r.getRow()); assertEquals(columnValues.size(), r.size()); - for (Map.Entry e: columnValues.entrySet()) { + for (Map.Entry e: columnValues.entrySet()) { byte [] column = e.getKey(); assertTrue("column", r.containsKey(column)); - assertTrue("value", Arrays.equals(columnValues.get(column), + assertTrue("value", Arrays.equals(columnValues.get(column).getValue(), r.get(column).getValue())); } } @@ -144,19 +148,19 @@ public class TestScannerAPI extends HBaseClusterTestCase { private void verify(ScannerIncommon scanner) throws IOException { HStoreKey key = new HStoreKey(); - SortedMap results = - new TreeMap(Bytes.BYTES_COMPARATOR); + SortedMap results = + new TreeMap(Bytes.BYTES_COMPARATOR); while (scanner.next(key, results)) { byte [] row = key.getRow(); assertTrue("row key", values.containsKey(row)); - SortedMap columnValues = values.get(row); + SortedMap columnValues = values.get(row); assertEquals(columnValues.size(), results.size()); - for (Map.Entry e: columnValues.entrySet()) { + for (Map.Entry e: columnValues.entrySet()) { byte [] column = e.getKey(); assertTrue("column", results.containsKey(column)); - assertTrue("value", Arrays.equals(columnValues.get(column), - results.get(column))); + assertTrue("value", Arrays.equals(columnValues.get(column).getValue(), + results.get(column).getValue())); } results.clear(); } diff --git a/src/test/org/apache/hadoop/hbase/TimestampTestBase.java b/src/test/org/apache/hadoop/hbase/TimestampTestBase.java index 1cff1105858..daef8779b3e 100644 --- a/src/test/org/apache/hadoop/hbase/TimestampTestBase.java +++ b/src/test/org/apache/hadoop/hbase/TimestampTestBase.java @@ -177,13 +177,13 @@ public class TimestampTestBase extends HBaseTestCase { int count = 0; try { HStoreKey key = new HStoreKey(); - TreeMapvalue = - new TreeMap(Bytes.BYTES_COMPARATOR); + TreeMapvalue = + new TreeMap(Bytes.BYTES_COMPARATOR); while (scanner.next(key, value)) { assertTrue(key.getTimestamp() <= ts); // Content matches the key or HConstants.LATEST_TIMESTAMP. // (Key does not match content if we 'put' with LATEST_TIMESTAMP). - long l = Bytes.toLong(value.get(COLUMN)); + long l = Bytes.toLong(value.get(COLUMN).getValue()); assertTrue(key.getTimestamp() == l || HConstants.LATEST_TIMESTAMP == l); count++; diff --git a/src/test/org/apache/hadoop/hbase/client/TestScannerTimes.java b/src/test/org/apache/hadoop/hbase/client/TestScannerTimes.java new file mode 100644 index 00000000000..5f3407a9c8e --- /dev/null +++ b/src/test/org/apache/hadoop/hbase/client/TestScannerTimes.java @@ -0,0 +1,139 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + +import java.io.IOException; + +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HBaseClusterTestCase; +import org.apache.hadoop.hbase.io.BatchUpdate; +import org.apache.hadoop.hbase.io.Cell; +import org.apache.hadoop.hbase.io.RowResult; + +/** + * Test that verifies that scanners return a different timestamp for values that + * are not stored at the same time. (HBASE-737) + */ +public class TestScannerTimes extends HBaseClusterTestCase { + private static final String TABLE_NAME = "hbase737"; + private static final String FAM1 = "fam1:"; + private static final String FAM2 = "fam2:"; + private static final String ROW = "row"; + + /** + * test for HBASE-737 + * @throws IOException + */ + public void testHBase737 () throws IOException { + HTableDescriptor desc = new HTableDescriptor(TABLE_NAME); + desc.addFamily(new HColumnDescriptor(FAM1)); + desc.addFamily(new HColumnDescriptor(FAM2)); + + // Create table + HBaseAdmin admin = new HBaseAdmin(conf); + admin.createTable(desc); + + // Open table + HTable table = new HTable(conf, TABLE_NAME); + + // Insert some values + BatchUpdate b = new BatchUpdate(ROW); + b.put(FAM1 + "letters", "abcdefg".getBytes(HConstants.UTF8_ENCODING)); + table.commit(b); + + try { + Thread.sleep(1000); + } catch (InterruptedException i) { + //ignore + } + + b = new BatchUpdate(ROW); + b.put(FAM1 + "numbers", "123456".getBytes(HConstants.UTF8_ENCODING)); + table.commit(b); + + try { + Thread.sleep(1000); + } catch (InterruptedException i) { + //ignore + } + + b = new BatchUpdate(ROW); + b.put(FAM2 + "letters", "hijklmnop".getBytes(HConstants.UTF8_ENCODING)); + table.commit(b); + + long times[] = new long[3]; + byte[][] columns = new byte[][] { + FAM1.getBytes(HConstants.UTF8_ENCODING), + FAM2.getBytes(HConstants.UTF8_ENCODING) + }; + + // First scan the memcache + + Scanner s = table.getScanner(columns); + try { + int index = 0; + RowResult r = null; + while ((r = s.next()) != null) { + for (Cell c: r.values()) { + times[index++] = c.getTimestamp(); + } + } + } finally { + s.close(); + } + for (int i = 0; i < times.length - 1; i++) { + for (int j = i + 1; j < times.length; j++) { + assertTrue(times[j] > times[i]); + } + } + + // Fush data to disk and try again + + cluster.flushcache(); + + try { + Thread.sleep(1000); + } catch (InterruptedException i) { + //ignore + } + + s = table.getScanner(columns); + try { + int index = 0; + RowResult r = null; + while ((r = s.next()) != null) { + for (Cell c: r.values()) { + times[index++] = c.getTimestamp(); + } + } + } finally { + s.close(); + } + for (int i = 0; i < times.length - 1; i++) { + for (int j = i + 1; j < times.length; j++) { + assertTrue(times[j] > times[i]); + } + } + + } +} diff --git a/src/test/org/apache/hadoop/hbase/filter/TestRegExpRowFilter.java b/src/test/org/apache/hadoop/hbase/filter/TestRegExpRowFilter.java index 484fd1034b2..7daab0108dd 100644 --- a/src/test/org/apache/hadoop/hbase/filter/TestRegExpRowFilter.java +++ b/src/test/org/apache/hadoop/hbase/filter/TestRegExpRowFilter.java @@ -30,6 +30,7 @@ import java.util.TreeMap; import junit.framework.TestCase; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.io.Cell; import org.apache.hadoop.hbase.regionserver.HLogEdit; import org.apache.hadoop.hbase.util.Bytes; @@ -37,7 +38,7 @@ import org.apache.hadoop.hbase.util.Bytes; * Tests for regular expression row filter */ public class TestRegExpRowFilter extends TestCase { - TreeMap colvalues; + TreeMap colvalues; RowFilterInterface mainFilter; final char FIRST_CHAR = 'a'; final char LAST_CHAR = 'e'; @@ -55,9 +56,10 @@ public class TestRegExpRowFilter extends TestCase { @Override protected void setUp() throws Exception { super.setUp(); - this.colvalues = new TreeMap(Bytes.BYTES_COMPARATOR); + this.colvalues = new TreeMap(Bytes.BYTES_COMPARATOR); for (char c = FIRST_CHAR; c < LAST_CHAR; c++) { - colvalues.put(Bytes.toBytes(new String(new char [] {c})), GOOD_BYTES); + colvalues.put(Bytes.toBytes(new String(new char [] {c})), + new Cell(GOOD_BYTES, HConstants.LATEST_TIMESTAMP)); } this.mainFilter = new RegExpRowFilter(HOST_PREFIX + ".*", colvalues); } @@ -126,9 +128,9 @@ public class TestRegExpRowFilter extends TestCase { for (char c = FIRST_CHAR; c <= LAST_CHAR; c++) { byte [] t = createRow(c); - for (Map.Entry e: this.colvalues.entrySet()) { + for (Map.Entry e: this.colvalues.entrySet()) { assertFalse("Failed on " + c, - filter.filterColumn(t, e.getKey(), e.getValue())); + filter.filterColumn(t, e.getKey(), e.getValue().getValue())); } } // Try a row and column I know will pass. @@ -171,13 +173,15 @@ public class TestRegExpRowFilter extends TestCase { // Try a row that has all expected columnKeys, and NO null-expected // columnKeys. // Testing row with columnKeys: a-d - colvalues.put(new byte [] {(byte)secondToLast}, GOOD_BYTES); + colvalues.put(new byte [] {(byte)secondToLast}, + new Cell(GOOD_BYTES, HConstants.LATEST_TIMESTAMP)); assertFalse("Failed with last columnKey " + secondToLast, filter. filterRow(colvalues)); // Try a row that has all expected columnKeys AND a null-expected columnKey. // Testing row with columnKeys: a-e - colvalues.put(new byte [] {LAST_CHAR}, GOOD_BYTES); + colvalues.put(new byte [] {LAST_CHAR}, + new Cell(GOOD_BYTES, HConstants.LATEST_TIMESTAMP)); assertTrue("Failed with last columnKey " + LAST_CHAR, filter. filterRow(colvalues)); @@ -185,7 +189,7 @@ public class TestRegExpRowFilter extends TestCase { // that maps to a null value. // Testing row with columnKeys: a-e, e maps to null colvalues.put(new byte [] {LAST_CHAR}, - HLogEdit.deleteBytes.get()); + new Cell(HLogEdit.deleteBytes.get(), HConstants.LATEST_TIMESTAMP)); assertFalse("Failed with last columnKey " + LAST_CHAR + " mapping to null.", filter.filterRow(colvalues)); } diff --git a/src/test/org/apache/hadoop/hbase/filter/TestRowFilterAfterWrite.java b/src/test/org/apache/hadoop/hbase/filter/TestRowFilterAfterWrite.java index 68f81190c04..2099cbbf78e 100644 --- a/src/test/org/apache/hadoop/hbase/filter/TestRowFilterAfterWrite.java +++ b/src/test/org/apache/hadoop/hbase/filter/TestRowFilterAfterWrite.java @@ -172,8 +172,9 @@ public class TestRowFilterAfterWrite extends HBaseClusterTestCase { private void scanTableWithRowFilter(final String tableName, final boolean printValues) throws IOException { HTable table = new HTable(conf, tableName); - Map columnMap = new HashMap(); - columnMap.put(TEXT_COLUMN1, VALUE); + Map columnMap = new HashMap(); + columnMap.put(TEXT_COLUMN1, + new Cell(VALUE, HConstants.LATEST_TIMESTAMP)); RegExpRowFilter filter = new RegExpRowFilter(null, columnMap); Scanner scanner = table.getScanner(columns, HConstants.EMPTY_START_ROW, filter); int numFound = doScan(scanner, printValues); diff --git a/src/test/org/apache/hadoop/hbase/filter/TestRowFilterOnMultipleFamilies.java b/src/test/org/apache/hadoop/hbase/filter/TestRowFilterOnMultipleFamilies.java index 276f501c5c1..ba13b4fd126 100644 --- a/src/test/org/apache/hadoop/hbase/filter/TestRowFilterOnMultipleFamilies.java +++ b/src/test/org/apache/hadoop/hbase/filter/TestRowFilterOnMultipleFamilies.java @@ -92,8 +92,9 @@ public class TestRowFilterOnMultipleFamilies extends HBaseClusterTestCase { private void scanTableWithRowFilter(final String tableName, final boolean printValues) throws IOException { HTable table = new HTable(conf, tableName); - Map columnMap = new HashMap(); - columnMap.put(TEXT_COLUMN1, VALUE); + Map columnMap = new HashMap(); + columnMap.put(TEXT_COLUMN1, + new Cell(VALUE, HConstants.LATEST_TIMESTAMP)); RegExpRowFilter filter = new RegExpRowFilter(null, columnMap); Scanner scanner = table.getScanner(columns, HConstants.EMPTY_START_ROW, filter); int numFound = doScan(scanner, printValues); diff --git a/src/test/org/apache/hadoop/hbase/filter/TestRowFilterSet.java b/src/test/org/apache/hadoop/hbase/filter/TestRowFilterSet.java index 5d1a02bad64..16fe4fd9f6c 100644 --- a/src/test/org/apache/hadoop/hbase/filter/TestRowFilterSet.java +++ b/src/test/org/apache/hadoop/hbase/filter/TestRowFilterSet.java @@ -29,6 +29,7 @@ import java.util.Set; import java.util.TreeMap; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.io.Cell; import org.apache.hadoop.hbase.util.Bytes; @@ -44,7 +45,7 @@ public class TestRowFilterSet extends TestCase { static final int MAX_PAGES = 5; final char FIRST_CHAR = 'a'; final char LAST_CHAR = 'e'; - TreeMap colvalues; + TreeMap colvalues; static byte[] GOOD_BYTES = null; static byte[] BAD_BYTES = null; @@ -62,9 +63,10 @@ public class TestRowFilterSet extends TestCase { protected void setUp() throws Exception { super.setUp(); - colvalues = new TreeMap(Bytes.BYTES_COMPARATOR); + colvalues = new TreeMap(Bytes.BYTES_COMPARATOR); for (char c = FIRST_CHAR; c < LAST_CHAR; c++) { - colvalues.put(new byte [] {(byte)c}, GOOD_BYTES); + colvalues.put(new byte [] {(byte)c}, + new Cell(GOOD_BYTES, HConstants.LATEST_TIMESTAMP)); } Set filters = new HashSet(); diff --git a/src/test/org/apache/hadoop/hbase/regionserver/TestGet2.java b/src/test/org/apache/hadoop/hbase/regionserver/TestGet2.java index 41d4558eda5..b4302e7fd2d 100644 --- a/src/test/org/apache/hadoop/hbase/regionserver/TestGet2.java +++ b/src/test/org/apache/hadoop/hbase/regionserver/TestGet2.java @@ -76,8 +76,8 @@ public class TestGet2 extends HBaseTestCase implements HConstants { arbitraryStartRow, HConstants.LATEST_TIMESTAMP, new WhileMatchRowFilter(new StopRowFilter(arbitraryStopRow))); HStoreKey key = new HStoreKey(); - TreeMap value = - new TreeMap(Bytes.BYTES_COMPARATOR); + TreeMap value = + new TreeMap(Bytes.BYTES_COMPARATOR); while (scanner.next(key, value)) { if (actualStartRow == null) { actualStartRow = key.getRow(); diff --git a/src/test/org/apache/hadoop/hbase/regionserver/TestHMemcache.java b/src/test/org/apache/hadoop/hbase/regionserver/TestHMemcache.java index 8da0bd3560e..970f394890b 100644 --- a/src/test/org/apache/hadoop/hbase/regionserver/TestHMemcache.java +++ b/src/test/org/apache/hadoop/hbase/regionserver/TestHMemcache.java @@ -151,7 +151,8 @@ public class TestHMemcache extends TestCase { } } - private void isExpectedRowWithoutTimestamps(final int rowIndex, TreeMap row) { + private void isExpectedRowWithoutTimestamps(final int rowIndex, + TreeMap row) { int i = 0; for (byte [] colname: row.keySet()) { String expectedColname = Bytes.toString(getColumnName(rowIndex, i++)); @@ -161,18 +162,19 @@ public class TestHMemcache extends TestCase { // 100 bytes in size at least. This is the default size // for BytesWriteable. For comparison, comvert bytes to // String and trim to remove trailing null bytes. - byte [] value = row.get(colname); + byte [] value = row.get(colname).getValue(); String colvalueStr = Bytes.toString(value).trim(); assertEquals("Content", colnameStr, colvalueStr); } } private void isExpectedRow(final int rowIndex, TreeMap row) { - TreeMap converted = - new TreeMap(Bytes.BYTES_COMPARATOR); + TreeMap converted = + new TreeMap(Bytes.BYTES_COMPARATOR); for (Map.Entry entry : row.entrySet()) { converted.put(entry.getKey(), - entry.getValue() == null ? null : entry.getValue().getValue()); + new Cell(entry.getValue() == null ? null : entry.getValue().getValue(), + HConstants.LATEST_TIMESTAMP)); } isExpectedRowWithoutTimestamps(rowIndex, converted); } @@ -241,16 +243,16 @@ public class TestHMemcache extends TestCase { InternalScanner scanner = this.hmemcache.getScanner(timestamp, cols, HConstants.EMPTY_START_ROW); HStoreKey key = new HStoreKey(); - TreeMap results = - new TreeMap(Bytes.BYTES_COMPARATOR); + TreeMap results = + new TreeMap(Bytes.BYTES_COMPARATOR); for (int i = 0; scanner.next(key, results); i++) { assertTrue("Row name", key.toString().startsWith(Bytes.toString(getRowName(i)))); assertEquals("Count of columns", COLUMNS_COUNT, results.size()); - TreeMap row = - new TreeMap(Bytes.BYTES_COMPARATOR); - for(Map.Entry e: results.entrySet() ) { + TreeMap row = + new TreeMap(Bytes.BYTES_COMPARATOR); + for(Map.Entry e: results.entrySet() ) { row.put(e.getKey(), e.getValue()); } isExpectedRowWithoutTimestamps(i, row); @@ -323,8 +325,8 @@ public class TestHMemcache extends TestCase { InternalScanner scanner = this.hmemcache.getScanner(timestamp, cols, getRowName(startRowId)); HStoreKey key = new HStoreKey(); - TreeMap results = - new TreeMap(Bytes.BYTES_COMPARATOR); + TreeMap results = + new TreeMap(Bytes.BYTES_COMPARATOR); for (int i = 0; scanner.next(key, results); i++) { int rowId = startRowId + i; assertTrue("Row name", @@ -332,9 +334,8 @@ public class TestHMemcache extends TestCase { assertEquals("Count of columns", COLUMNS_COUNT, results.size()); TreeMap row = new TreeMap(Bytes.BYTES_COMPARATOR); - for (Map.Entry e : results.entrySet()) { - row.put(e.getKey(), - new Cell(e.getValue(), HConstants.LATEST_TIMESTAMP)); + for (Map.Entry e : results.entrySet()) { + row.put(e.getKey(),e.getValue()); } isExpectedRow(rowId, row); // Clear out set. Otherwise row results accumulate. diff --git a/src/test/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/src/test/org/apache/hadoop/hbase/regionserver/TestHRegion.java index 10707912b7b..4ff0ecf6334 100644 --- a/src/test/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/src/test/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.HStoreKey; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.io.BatchUpdate; +import org.apache.hadoop.hbase.io.Cell; import org.apache.hadoop.hbase.util.Bytes; /** @@ -284,13 +285,13 @@ public class TestHRegion extends HBaseTestCase { int numFetched = 0; try { HStoreKey curKey = new HStoreKey(); - TreeMap curVals = - new TreeMap(Bytes.BYTES_COMPARATOR); + TreeMap curVals = + new TreeMap(Bytes.BYTES_COMPARATOR); int k = 0; while(s.next(curKey, curVals)) { for(Iterator it = curVals.keySet().iterator(); it.hasNext(); ) { byte [] col = it.next(); - byte [] val = curVals.get(col); + byte [] val = curVals.get(col).getValue(); int curval = Integer.parseInt(new String(val, HConstants.UTF8_ENCODING).trim()); for(int j = 0; j < cols.length; j++) { @@ -333,13 +334,13 @@ public class TestHRegion extends HBaseTestCase { numFetched = 0; try { HStoreKey curKey = new HStoreKey(); - TreeMap curVals = - new TreeMap(Bytes.BYTES_COMPARATOR); + TreeMap curVals = + new TreeMap(Bytes.BYTES_COMPARATOR); int k = 0; while(s.next(curKey, curVals)) { for(Iterator it = curVals.keySet().iterator(); it.hasNext(); ) { byte [] col = it.next(); - byte [] val = curVals.get(col); + byte [] val = curVals.get(col).getValue(); int curval = Integer.parseInt(new String(val, HConstants.UTF8_ENCODING).trim()); for(int j = 0; j < cols.length; j++) { @@ -392,13 +393,13 @@ public class TestHRegion extends HBaseTestCase { numFetched = 0; try { HStoreKey curKey = new HStoreKey(); - TreeMap curVals = - new TreeMap(Bytes.BYTES_COMPARATOR); + TreeMap curVals = + new TreeMap(Bytes.BYTES_COMPARATOR); int k = 0; while(s.next(curKey, curVals)) { for(Iterator it = curVals.keySet().iterator(); it.hasNext(); ) { byte [] col = it.next(); - byte [] val = curVals.get(col); + byte [] val = curVals.get(col).getValue(); int curval = Integer.parseInt(new String(val, HConstants.UTF8_ENCODING).trim()); for(int j = 0; j < cols.length; j++) { @@ -440,13 +441,13 @@ public class TestHRegion extends HBaseTestCase { numFetched = 0; try { HStoreKey curKey = new HStoreKey(); - TreeMap curVals = - new TreeMap(Bytes.BYTES_COMPARATOR); + TreeMap curVals = + new TreeMap(Bytes.BYTES_COMPARATOR); int k = 0; while(s.next(curKey, curVals)) { for(Iterator it = curVals.keySet().iterator(); it.hasNext(); ) { byte [] col = it.next(); - byte [] val = curVals.get(col); + byte [] val = curVals.get(col).getValue(); int curval = Integer.parseInt(new String(val, HConstants.UTF8_ENCODING).trim()); for (int j = 0; j < cols.length; j++) { @@ -479,13 +480,13 @@ public class TestHRegion extends HBaseTestCase { numFetched = 0; try { HStoreKey curKey = new HStoreKey(); - TreeMap curVals = - new TreeMap(Bytes.BYTES_COMPARATOR); + TreeMap curVals = + new TreeMap(Bytes.BYTES_COMPARATOR); int k = 500; while(s.next(curKey, curVals)) { for(Iterator it = curVals.keySet().iterator(); it.hasNext(); ) { byte [] col = it.next(); - byte [] val = curVals.get(col); + byte [] val = curVals.get(col).getValue(); int curval = Integer.parseInt(new String(val, HConstants.UTF8_ENCODING).trim()); for (int j = 0; j < cols.length; j++) { @@ -567,13 +568,13 @@ public class TestHRegion extends HBaseTestCase { int contentsFetched = 0; int anchorFetched = 0; HStoreKey curKey = new HStoreKey(); - TreeMap curVals = - new TreeMap(Bytes.BYTES_COMPARATOR); + TreeMap curVals = + new TreeMap(Bytes.BYTES_COMPARATOR); int k = 0; while(s.next(curKey, curVals)) { for(Iterator it = curVals.keySet().iterator(); it.hasNext(); ) { byte [] col = it.next(); - byte [] val = curVals.get(col); + byte [] val = curVals.get(col).getValue(); String curval = Bytes.toString(val); if(Bytes.compareTo(col, CONTENTS_BASIC) == 0) { assertTrue("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp() @@ -619,13 +620,13 @@ public class TestHRegion extends HBaseTestCase { try { int numFetched = 0; HStoreKey curKey = new HStoreKey(); - TreeMap curVals = - new TreeMap(Bytes.BYTES_COMPARATOR); + TreeMap curVals = + new TreeMap(Bytes.BYTES_COMPARATOR); int k = 0; while(s.next(curKey, curVals)) { for(Iterator it = curVals.keySet().iterator(); it.hasNext(); ) { byte [] col = it.next(); - byte [] val = curVals.get(col); + byte [] val = curVals.get(col).getValue(); int curval = Integer.parseInt(new String(val, HConstants.UTF8_ENCODING).trim()); @@ -663,8 +664,8 @@ public class TestHRegion extends HBaseTestCase { try { int fetched = 0; HStoreKey curKey = new HStoreKey(); - TreeMap curVals = - new TreeMap(Bytes.BYTES_COMPARATOR); + TreeMap curVals = + new TreeMap(Bytes.BYTES_COMPARATOR); while(s.next(curKey, curVals)) { for(Iterator it = curVals.keySet().iterator(); it.hasNext(); ) { it.next(); diff --git a/src/test/org/apache/hadoop/hbase/regionserver/TestScanner.java b/src/test/org/apache/hadoop/hbase/regionserver/TestScanner.java index 27cd13aa04c..946477530a0 100644 --- a/src/test/org/apache/hadoop/hbase/regionserver/TestScanner.java +++ b/src/test/org/apache/hadoop/hbase/regionserver/TestScanner.java @@ -27,7 +27,6 @@ import java.util.TreeMap; import org.apache.hadoop.dfs.MiniDFSCluster; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Writables; -import org.apache.hadoop.io.Text; import org.apache.hadoop.hbase.HBaseTestCase; import org.apache.hadoop.hbase.HRegionInfo; @@ -35,6 +34,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HServerAddress; import org.apache.hadoop.hbase.HStoreKey; import org.apache.hadoop.hbase.io.BatchUpdate; +import org.apache.hadoop.hbase.io.Cell; /** * Test of a long-lived scanner validating as we go. @@ -88,8 +88,8 @@ public class TestScanner extends HBaseTestCase { throws IOException { InternalScanner scanner = null; - TreeMap results = - new TreeMap(Bytes.BYTES_COMPARATOR); + TreeMap results = + new TreeMap(Bytes.BYTES_COMPARATOR); HStoreKey key = new HStoreKey(); byte [][][] scanColumns = { @@ -104,11 +104,11 @@ public class TestScanner extends HBaseTestCase { while (scanner.next(key, results)) { assertTrue(results.containsKey(HConstants.COL_REGIONINFO)); - byte [] val = results.get(HConstants.COL_REGIONINFO); + byte [] val = results.get(HConstants.COL_REGIONINFO).getValue(); validateRegionInfo(val); if(validateStartcode) { assertTrue(results.containsKey(HConstants.COL_STARTCODE)); - val = results.get(HConstants.COL_STARTCODE); + val = results.get(HConstants.COL_STARTCODE).getValue(); assertNotNull(val); assertFalse(val.length == 0); long startCode = Bytes.toLong(val); @@ -117,7 +117,7 @@ public class TestScanner extends HBaseTestCase { if(serverName != null) { assertTrue(results.containsKey(HConstants.COL_SERVER)); - val = results.get(HConstants.COL_SERVER); + val = results.get(HConstants.COL_SERVER).getValue(); assertNotNull(val); assertFalse(val.length == 0); String server = Bytes.toString(val); diff --git a/src/test/org/apache/hadoop/hbase/regionserver/TestSplit.java b/src/test/org/apache/hadoop/hbase/regionserver/TestSplit.java index 2f91c21b191..e48b61dc4cd 100644 --- a/src/test/org/apache/hadoop/hbase/regionserver/TestSplit.java +++ b/src/test/org/apache/hadoop/hbase/regionserver/TestSplit.java @@ -182,12 +182,12 @@ public class TestSplit extends HBaseClusterTestCase { HConstants.EMPTY_START_ROW, System.currentTimeMillis(), null); try { HStoreKey curKey = new HStoreKey(); - TreeMap curVals = - new TreeMap(Bytes.BYTES_COMPARATOR); + TreeMap curVals = + new TreeMap(Bytes.BYTES_COMPARATOR); boolean first = true; OUTER_LOOP: while(s.next(curKey, curVals)) { for(byte [] col: curVals.keySet()) { - byte [] val = curVals.get(col); + byte [] val = curVals.get(col).getValue(); byte [] curval = val; if (first) { first = false;