HBASE-737 Scanner: every cell in a row has the same timestamp

Changed InternalScanner API:

from
{code}
boolean next(HStoreKey, SortedMap<byte[], byte>)
{code}
to
{code}
boolean next(HStoreKey, SortedMap<byte[], Cell>)
{code}

Files changed as a result of API change:

HMerge.java, ColumnValueFilter, PageRowFilter, RegExpRowFilter, RowFilterInterface, RowFilterSet, StopRowFilter, WhileMatchRowFilter, HAbstractScanner, HRegion$HScanner, HRegionServer, HStoreScanner, InternalScanner, Memcache, StoreFileScanner, MetaUtils, HBaseTestCase, TestScannerAPI, TimestampTestBase, TestRegExpRowFilter, TestRowFilterAfterWrite, TestRowFilterOnMultipleFamilies, TestRowFilterSet, TestGet2, TestHMemcache, TestHRegion, TestScanner, TestSplit

Update comments in Flusher

HRegion.internalFlushCache, HStore.flushCache, HStore.internalFlushCache now returns true only if a flush was completed and a compaction is needed.

HRegion.internalFlushCache now includes region name in DroppedSnapshotException

When creating a reader during compaction, don't bother with bloom filter since we won't use it.

StoreFileScanner locks the store for read while it is in the constructor. It also does not load the bloom filter when it opens readers on the store files.

TestScannerTimes - new regression test for HBASE-737


git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@677113 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jim Kellerman 2008-07-16 00:03:26 +00:00
parent 422c3ff605
commit 90c9f71aa1
35 changed files with 333 additions and 378 deletions

View File

@ -185,6 +185,7 @@ Trunk (unreleased changes)
(Izaak Rubin via Stack) (Izaak Rubin via Stack)
HBASE-744 BloomFilter serialization/deserialization broken HBASE-744 BloomFilter serialization/deserialization broken
HBASE-742 Column length limit is not enforced (Jean-Daniel Cryans via Stack) HBASE-742 Column length limit is not enforced (Jean-Daniel Cryans via Stack)
HBASE-737 Scanner: every cell in a row has the same timestamp
IMPROVEMENTS IMPROVEMENTS
HBASE-559 MR example job to count table rows HBASE-559 MR example job to count table rows

View File

@ -330,11 +330,11 @@ class HMerge implements HConstants {
try { try {
HStoreKey key = new HStoreKey(); HStoreKey key = new HStoreKey();
TreeMap<byte [], byte[]> results = TreeMap<byte [], Cell> results =
new TreeMap<byte [], byte[]>(Bytes.BYTES_COMPARATOR); new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
while(rootScanner.next(key, results)) { while(rootScanner.next(key, results)) {
for(byte [] b: results.values()) { for(Cell c: results.values()) {
HRegionInfo info = Writables.getHRegionInfoOrNull(b); HRegionInfo info = Writables.getHRegionInfoOrNull(c.getValue());
if (info != null) { if (info != null) {
metaRegions.add(info); metaRegions.add(info);
} }

View File

@ -21,7 +21,6 @@
package org.apache.hadoop.hbase.client; package org.apache.hadoop.hbase.client;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
class UnmodifyableHRegionInfo extends HRegionInfo { class UnmodifyableHRegionInfo extends HRegionInfo {
/* /*

View File

@ -25,7 +25,6 @@ import org.apache.hadoop.hbase.HTableDescriptor;
/** /**
* Read-only table descriptor. * Read-only table descriptor.
* Returned out of {@link HTable.getTableDescriptor}.
*/ */
public class UnmodifyableHTableDescriptor extends HTableDescriptor { public class UnmodifyableHTableDescriptor extends HTableDescriptor {

View File

@ -27,6 +27,7 @@ import java.util.SortedMap;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.io.ObjectWritable; import org.apache.hadoop.io.ObjectWritable;
/** /**
@ -124,7 +125,7 @@ public class ColumnValueFilter implements RowFilterInterface {
} }
/** {@inheritDoc} */ /** {@inheritDoc} */
public boolean filterRow(final SortedMap<byte[], byte[]> columns) { public boolean filterRow(final SortedMap<byte[], Cell> columns) {
// Don't let rows through if they don't have the column we are checking // Don't let rows through if they don't have the column we are checking
return !columns.containsKey(columnName); return !columns.containsKey(columnName);
} }

View File

@ -24,6 +24,7 @@ import java.io.DataOutput;
import java.io.IOException; import java.io.IOException;
import java.util.SortedMap; import java.util.SortedMap;
import org.apache.hadoop.hbase.io.Cell;
/** /**
* Implementation of RowFilterInterface that limits results to a specific page * Implementation of RowFilterInterface that limits results to a specific page
@ -123,7 +124,7 @@ public class PageRowFilter implements RowFilterInterface {
* {@inheritDoc} * {@inheritDoc}
*/ */
public boolean filterRow(@SuppressWarnings("unused") public boolean filterRow(@SuppressWarnings("unused")
final SortedMap<byte [], byte[]> columns) { final SortedMap<byte [], Cell> columns) {
return filterAllRemaining(); return filterAllRemaining();
} }

View File

@ -32,6 +32,7 @@ import java.util.Map.Entry;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import org.apache.hadoop.hbase.regionserver.HLogEdit; import org.apache.hadoop.hbase.regionserver.HLogEdit;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
/** /**
@ -80,7 +81,7 @@ public class RegExpRowFilter implements RowFilterInterface {
*/ */
@Deprecated @Deprecated
public RegExpRowFilter(final String rowKeyRegExp, public RegExpRowFilter(final String rowKeyRegExp,
final Map<byte [], byte[]> columnFilter) { final Map<byte [], Cell> columnFilter) {
this.rowKeyRegExp = rowKeyRegExp; this.rowKeyRegExp = rowKeyRegExp;
this.setColumnFilters(columnFilter); this.setColumnFilters(columnFilter);
} }
@ -122,13 +123,13 @@ public class RegExpRowFilter implements RowFilterInterface {
* Map of columns with value criteria. * Map of columns with value criteria.
*/ */
@Deprecated @Deprecated
public void setColumnFilters(final Map<byte [], byte[]> columnFilter) { public void setColumnFilters(final Map<byte [], Cell> columnFilter) {
if (null == columnFilter) { if (null == columnFilter) {
nullColumns.clear(); nullColumns.clear();
equalsMap.clear(); equalsMap.clear();
} else { } else {
for (Entry<byte [], byte[]> entry : columnFilter.entrySet()) { for (Entry<byte [], Cell> entry : columnFilter.entrySet()) {
setColumnFilter(entry.getKey(), entry.getValue()); setColumnFilter(entry.getKey(), entry.getValue().getValue());
} }
} }
} }
@ -186,10 +187,10 @@ public class RegExpRowFilter implements RowFilterInterface {
* *
* {@inheritDoc} * {@inheritDoc}
*/ */
public boolean filterRow(final SortedMap<byte [], byte[]> columns) { public boolean filterRow(final SortedMap<byte [], Cell> columns) {
for (Entry<byte [], byte[]> col : columns.entrySet()) { for (Entry<byte [], Cell> col : columns.entrySet()) {
if (nullColumns.contains(col.getKey()) if (nullColumns.contains(col.getKey())
&& !HLogEdit.isDeleted(col.getValue())) { && !HLogEdit.isDeleted(col.getValue().getValue())) {
return true; return true;
} }
} }

View File

@ -23,6 +23,8 @@ import java.util.SortedMap;
import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.Writable;
import org.apache.hadoop.hbase.io.Cell;
/** /**
* *
* Interface used for row-level filters applied to HRegion.HScanner scan * Interface used for row-level filters applied to HRegion.HScanner scan
@ -98,7 +100,7 @@ public interface RowFilterInterface extends Writable {
* @param columns * @param columns
* @return true if row filtered and should not be processed. * @return true if row filtered and should not be processed.
*/ */
boolean filterRow(final SortedMap<byte [], byte[]> columns); boolean filterRow(final SortedMap<byte [], Cell> columns);
/** /**
* Validates that this filter applies only to a subset of the given columns. * Validates that this filter applies only to a subset of the given columns.

View File

@ -28,6 +28,7 @@ import java.util.SortedMap;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.io.ObjectWritable; import org.apache.hadoop.io.ObjectWritable;
/** /**
@ -179,7 +180,7 @@ public class RowFilterSet implements RowFilterInterface {
} }
/** {@inheritDoc} */ /** {@inheritDoc} */
public boolean filterRow(final SortedMap<byte [], byte[]> columns) { public boolean filterRow(final SortedMap<byte [], Cell> columns) {
boolean resultFound = false; boolean resultFound = false;
boolean result = operator == Operator.MUST_PASS_ONE; boolean result = operator == Operator.MUST_PASS_ONE;
for (RowFilterInterface filter : filters) { for (RowFilterInterface filter : filters) {

View File

@ -24,6 +24,7 @@ import java.io.DataOutput;
import java.io.IOException; import java.io.IOException;
import java.util.SortedMap; import java.util.SortedMap;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
/** /**
@ -123,7 +124,7 @@ public class StopRowFilter implements RowFilterInterface {
* @param columns * @param columns
*/ */
public boolean filterRow(@SuppressWarnings("unused") public boolean filterRow(@SuppressWarnings("unused")
final SortedMap<byte [], byte[]> columns) { final SortedMap<byte [], Cell> columns) {
return filterAllRemaining(); return filterAllRemaining();
} }

View File

@ -24,6 +24,7 @@ import java.io.DataOutput;
import java.io.IOException; import java.io.IOException;
import java.util.SortedMap; import java.util.SortedMap;
import org.apache.hadoop.hbase.io.Cell;
/** /**
* WhileMatchRowFilter is a wrapper filter that filters everything after the * WhileMatchRowFilter is a wrapper filter that filters everything after the
@ -98,7 +99,7 @@ public class WhileMatchRowFilter implements RowFilterInterface {
} }
/** {@inheritDoc} */ /** {@inheritDoc} */
public boolean filterRow(final SortedMap<byte [], byte[]> columns) { public boolean filterRow(final SortedMap<byte [], Cell> columns) {
changeFAR(this.filter.filterRow(columns)); changeFAR(this.filter.filterRow(columns));
return filterAllRemaining(); return filterAllRemaining();
} }

View File

@ -27,8 +27,6 @@ import java.util.concurrent.TimeUnit;
import java.util.HashSet; import java.util.HashSet;
import java.util.Set; import java.util.Set;
import java.util.SortedMap; import java.util.SortedMap;
import java.util.TreeMap;
import java.util.Comparator;
import java.util.ConcurrentModificationException; import java.util.ConcurrentModificationException;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
@ -169,8 +167,9 @@ class Flusher extends Thread implements FlushRequester {
} }
lock.lock(); lock.lock();
try { try {
// See javadoc comment above for removeFromQueue on why we do not // See comment above for removeFromQueue on why we do not
// compact if removeFromQueue is true. // compact if removeFromQueue is true. Note that region.flushCache()
// only returns true if a flush is done and if a compaction is needed.
if (region.flushcache() && !removeFromQueue) { if (region.flushcache() && !removeFromQueue) {
server.compactSplitThread.compactionRequested(region); server.compactSplitThread.compactionRequested(region);
} }

View File

@ -29,6 +29,7 @@ import java.util.regex.Pattern;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HStoreKey; import org.apache.hadoop.hbase.HStoreKey;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
/** /**
@ -182,8 +183,7 @@ public abstract class HAbstractScanner implements InternalScanner {
} }
/** {@inheritDoc} */ /** {@inheritDoc} */
public abstract boolean next(HStoreKey key, public abstract boolean next(HStoreKey key, SortedMap<byte [], Cell> results)
SortedMap<byte [], byte []> results)
throws IOException; throws IOException;
} }

View File

@ -977,7 +977,7 @@ public class HRegion implements HConstants {
* *
* <p> This method may block for some time. * <p> This method may block for some time.
* *
* @return true if the cache was flushed * @return true if the region needs compacting
* *
* @throws IOException * @throws IOException
* @throws DroppedSnapshotException Thrown when replay of hlog is required * @throws DroppedSnapshotException Thrown when replay of hlog is required
@ -1021,13 +1021,16 @@ public class HRegion implements HConstants {
// restart so hlog content can be replayed and put back into the memcache. // restart so hlog content can be replayed and put back into the memcache.
// Otherwise, the snapshot content while backed up in the hlog, it will not // Otherwise, the snapshot content while backed up in the hlog, it will not
// be part of the current running servers state. // be part of the current running servers state.
long flushed = 0; boolean compactionRequested = false;
try { try {
// A. Flush memcache to all the HStores. // A. Flush memcache to all the HStores.
// Keep running vector of all store files that includes both old and the // Keep running vector of all store files that includes both old and the
// just-made new flush store file. // just-made new flush store file.
for (HStore hstore: stores.values()) { for (HStore hstore: stores.values()) {
flushed += hstore.flushCache(sequenceId); boolean needsCompaction = hstore.flushCache(sequenceId);
if (needsCompaction) {
compactionRequested = true;
}
} }
} catch (Throwable t) { } catch (Throwable t) {
// An exception here means that the snapshot was not persisted. // An exception here means that the snapshot was not persisted.
@ -1037,7 +1040,8 @@ public class HRegion implements HConstants {
// exceptions -- e.g. HBASE-659 was about an NPE -- so now we catch // exceptions -- e.g. HBASE-659 was about an NPE -- so now we catch
// all and sundry. // all and sundry.
this.log.abortCacheFlush(); this.log.abortCacheFlush();
DroppedSnapshotException dse = new DroppedSnapshotException(); DroppedSnapshotException dse = new DroppedSnapshotException("region: " +
Bytes.toString(getRegionName()));
dse.initCause(t); dse.initCause(t);
throw dse; throw dse;
} }
@ -1064,13 +1068,12 @@ public class HRegion implements HConstants {
LOG.debug("Finished memcache flush for region " + this + LOG.debug("Finished memcache flush for region " + this +
" in " + " in " +
(System.currentTimeMillis() - startTime) + "ms, sequence id=" + (System.currentTimeMillis() - startTime) + "ms, sequence id=" +
sequenceId + ", " + sequenceId + ", compaction requested=" + compactionRequested);
StringUtils.humanReadableInt(flushed));
if (!regionInfo.isMetaRegion()) { if (!regionInfo.isMetaRegion()) {
this.historian.addRegionFlush(regionInfo, timeTaken); this.historian.addRegionFlush(regionInfo, timeTaken);
} }
} }
return true; return compactionRequested;
} }
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
@ -1733,7 +1736,7 @@ public class HRegion implements HConstants {
*/ */
private class HScanner implements InternalScanner { private class HScanner implements InternalScanner {
private InternalScanner[] scanners; private InternalScanner[] scanners;
private TreeMap<byte [], byte []>[] resultSets; private TreeMap<byte [], Cell>[] resultSets;
private HStoreKey[] keys; private HStoreKey[] keys;
private RowFilterInterface filter; private RowFilterInterface filter;
@ -1782,7 +1785,7 @@ public class HRegion implements HConstants {
this.keys = new HStoreKey[scanners.length]; this.keys = new HStoreKey[scanners.length];
for (int i = 0; i < scanners.length; i++) { for (int i = 0; i < scanners.length; i++) {
keys[i] = new HStoreKey(); keys[i] = new HStoreKey();
resultSets[i] = new TreeMap<byte [], byte []>(Bytes.BYTES_COMPARATOR); resultSets[i] = new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
if(scanners[i] != null && !scanners[i].next(keys[i], resultSets[i])) { if(scanners[i] != null && !scanners[i].next(keys[i], resultSets[i])) {
closeScanner(i); closeScanner(i);
} }
@ -1795,7 +1798,7 @@ public class HRegion implements HConstants {
/** {@inheritDoc} */ /** {@inheritDoc} */
@SuppressWarnings("null") @SuppressWarnings("null")
public boolean next(HStoreKey key, SortedMap<byte [], byte[]> results) public boolean next(HStoreKey key, SortedMap<byte [], Cell> results)
throws IOException { throws IOException {
boolean moreToFollow = false; boolean moreToFollow = false;
boolean filtered = false; boolean filtered = false;
@ -1830,7 +1833,7 @@ public class HRegion implements HConstants {
// but this had the effect of overwriting newer // but this had the effect of overwriting newer
// values with older ones. So now we only insert // values with older ones. So now we only insert
// a result if the map does not contain the key. // a result if the map does not contain the key.
for (Map.Entry<byte [], byte[]> e : resultSets[i].entrySet()) { for (Map.Entry<byte [], Cell> e : resultSets[i].entrySet()) {
if (!results.containsKey(e.getKey())) { if (!results.containsKey(e.getKey())) {
results.put(e.getKey(), e.getValue()); results.put(e.getKey(), e.getValue());
} }

View File

@ -1120,12 +1120,10 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
HbaseMapWritable<byte [], Cell> values HbaseMapWritable<byte [], Cell> values
= new HbaseMapWritable<byte [], Cell>(); = new HbaseMapWritable<byte [], Cell>();
HStoreKey key = new HStoreKey(); HStoreKey key = new HStoreKey();
TreeMap<byte [], byte []> results = TreeMap<byte [], Cell> results =
new TreeMap<byte [], byte []>(Bytes.BYTES_COMPARATOR); new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
while (s.next(key, results)) { while (s.next(key, results)) {
for (Map.Entry<byte [], byte []> e: results.entrySet()) { values.putAll(results);
values.put(e.getKey(), new Cell(e.getValue(), key.getTimestamp()));
}
if (values.size() > 0) { if (values.size() > 0) {
// Row has something in it. Return the value. // Row has something in it. Return the value.
break; break;

View File

@ -90,7 +90,7 @@ public class HStore implements HConstants {
private final Integer flushLock = new Integer(0); private final Integer flushLock = new Integer(0);
private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
final byte [] storeName; final byte [] storeName;
private final String storeNameStr; private final String storeNameStr;
@ -550,27 +550,27 @@ public class HStore implements HConstants {
* Write out current snapshot. Presumes {@link #snapshot()} has been called * Write out current snapshot. Presumes {@link #snapshot()} has been called
* previously. * previously.
* @param logCacheFlushId flush sequence number * @param logCacheFlushId flush sequence number
* @return count of bytes flushed * @return true if a compaction is needed
* @throws IOException * @throws IOException
*/ */
long flushCache(final long logCacheFlushId) throws IOException { boolean flushCache(final long logCacheFlushId) throws IOException {
// Get the snapshot to flush. Presumes that a call to // Get the snapshot to flush. Presumes that a call to
// this.memcache.snapshot() has happened earlier up in the chain. // this.memcache.snapshot() has happened earlier up in the chain.
SortedMap<HStoreKey, byte []> cache = this.memcache.getSnapshot(); SortedMap<HStoreKey, byte []> cache = this.memcache.getSnapshot();
long flushed = internalFlushCache(cache, logCacheFlushId); boolean compactionNeeded = internalFlushCache(cache, logCacheFlushId);
// If an exception happens flushing, we let it out without clearing // If an exception happens flushing, we let it out without clearing
// the memcache snapshot. The old snapshot will be returned when we say // the memcache snapshot. The old snapshot will be returned when we say
// 'snapshot', the next time flush comes around. // 'snapshot', the next time flush comes around.
this.memcache.clearSnapshot(cache); this.memcache.clearSnapshot(cache);
return flushed; return compactionNeeded;
} }
private long internalFlushCache(SortedMap<HStoreKey, byte []> cache, private boolean internalFlushCache(SortedMap<HStoreKey, byte []> cache,
long logCacheFlushId) throws IOException { long logCacheFlushId) throws IOException {
long flushed = 0; long flushed = 0;
// Don't flush if there are no entries. // Don't flush if there are no entries.
if (cache.size() == 0) { if (cache.size() == 0) {
return flushed; return false;
} }
// TODO: We can fail in the below block before we complete adding this // TODO: We can fail in the below block before we complete adding this
@ -634,7 +634,7 @@ public class HStore implements HConstants {
StringUtils.humanReadableInt(newStoreSize)); StringUtils.humanReadableInt(newStoreSize));
} }
} }
return flushed; return storefiles.size() >= compactionThreshold;
} }
/* /*
@ -744,8 +744,8 @@ public class HStore implements HConstants {
List<MapFile.Reader> readers = new ArrayList<MapFile.Reader>(); List<MapFile.Reader> readers = new ArrayList<MapFile.Reader>();
for (HStoreFile file: filesToCompact) { for (HStoreFile file: filesToCompact) {
try { try {
HStoreFile.BloomFilterMapFile.Reader reader = file.getReader(fs, HStoreFile.BloomFilterMapFile.Reader reader =
this.family.isBloomFilterEnabled(), false); file.getReader(fs, false, false);
readers.add(reader); readers.add(reader);
// Compute the size of the new bloomfilter if needed // Compute the size of the new bloomfilter if needed

View File

@ -22,7 +22,6 @@ package org.apache.hadoop.hbase.regionserver;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.SortedMap; import java.util.SortedMap;
@ -33,6 +32,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HStoreKey; import org.apache.hadoop.hbase.HStoreKey;
import org.apache.hadoop.hbase.filter.RowFilterInterface; import org.apache.hadoop.hbase.filter.RowFilterInterface;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
/** /**
@ -42,7 +42,7 @@ class HStoreScanner implements InternalScanner {
static final Log LOG = LogFactory.getLog(HStoreScanner.class); static final Log LOG = LogFactory.getLog(HStoreScanner.class);
private InternalScanner[] scanners; private InternalScanner[] scanners;
private TreeMap<byte [], byte []>[] resultSets; private TreeMap<byte [], Cell>[] resultSets;
private HStoreKey[] keys; private HStoreKey[] keys;
private boolean wildcardMatch = false; private boolean wildcardMatch = false;
private boolean multipleMatchers = false; private boolean multipleMatchers = false;
@ -87,7 +87,7 @@ class HStoreScanner implements InternalScanner {
// All results will match the required column-set and scanTime. // All results will match the required column-set and scanTime.
for (int i = 0; i < scanners.length; i++) { for (int i = 0; i < scanners.length; i++) {
keys[i] = new HStoreKey(); keys[i] = new HStoreKey();
resultSets[i] = new TreeMap<byte [], byte []>(Bytes.BYTES_COMPARATOR); resultSets[i] = new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
if(scanners[i] != null && !scanners[i].next(keys[i], resultSets[i])) { if(scanners[i] != null && !scanners[i].next(keys[i], resultSets[i])) {
closeScanner(i); closeScanner(i);
} }
@ -105,7 +105,7 @@ class HStoreScanner implements InternalScanner {
} }
/** {@inheritDoc} */ /** {@inheritDoc} */
public boolean next(HStoreKey key, SortedMap<byte [], byte[]> results) public boolean next(HStoreKey key, SortedMap<byte [], Cell> results)
throws IOException { throws IOException {
// Filtered flag is set by filters. If a cell has been 'filtered out' // Filtered flag is set by filters. If a cell has been 'filtered out'
@ -166,9 +166,9 @@ class HStoreScanner implements InternalScanner {
// a result if the map does not contain the key. // a result if the map does not contain the key.
HStoreKey hsk = new HStoreKey(key.getRow(), HConstants.EMPTY_BYTE_ARRAY, HStoreKey hsk = new HStoreKey(key.getRow(), HConstants.EMPTY_BYTE_ARRAY,
key.getTimestamp()); key.getTimestamp());
for (Map.Entry<byte [], byte[]> e : resultSets[i].entrySet()) { for (Map.Entry<byte [], Cell> e : resultSets[i].entrySet()) {
hsk.setColumn(e.getKey()); hsk.setColumn(e.getKey());
if (HLogEdit.isDeleted(e.getValue())) { if (HLogEdit.isDeleted(e.getValue().getValue())) {
if (!deletes.contains(hsk)) { if (!deletes.contains(hsk)) {
// Key changes as we cycle the for loop so add a copy to // Key changes as we cycle the for loop so add a copy to
// the set of deletes. // the set of deletes.
@ -180,8 +180,8 @@ class HStoreScanner implements InternalScanner {
!results.containsKey(e.getKey())) { !results.containsKey(e.getKey())) {
if (dataFilter != null) { if (dataFilter != null) {
// Filter whole row by column data? // Filter whole row by column data?
filtered = filtered = dataFilter.filterColumn(chosenRow, e.getKey(),
dataFilter.filterColumn(chosenRow, e.getKey(), e.getValue()); e.getValue().getValue());
if (filtered) { if (filtered) {
results.clear(); results.clear();
break; break;
@ -265,9 +265,4 @@ class HStoreScanner implements InternalScanner {
} }
} }
} }
public Iterator<Map.Entry<HStoreKey, SortedMap<byte [], byte[]>>> iterator() {
throw new UnsupportedOperationException("Unimplemented serverside. " +
"next(HStoreKey, StortedMap(...) is more efficient");
}
} }

View File

@ -23,6 +23,7 @@ import java.io.Closeable;
import java.io.IOException; import java.io.IOException;
import java.util.SortedMap; import java.util.SortedMap;
import org.apache.hadoop.hbase.HStoreKey; import org.apache.hadoop.hbase.HStoreKey;
import org.apache.hadoop.hbase.io.Cell;
/** /**
* Internal scanners differ from client-side scanners in that they operate on * Internal scanners differ from client-side scanners in that they operate on
@ -49,7 +50,7 @@ public interface InternalScanner extends Closeable {
* @return true if data was returned * @return true if data was returned
* @throws IOException * @throws IOException
*/ */
public boolean next(HStoreKey key, SortedMap<byte [], byte[]> results) public boolean next(HStoreKey key, SortedMap<byte [], Cell> results)
throws IOException; throws IOException;
/** /**

View File

@ -685,7 +685,7 @@ class Memcache {
/** {@inheritDoc} */ /** {@inheritDoc} */
@Override @Override
public boolean next(HStoreKey key, SortedMap<byte [], byte []> results) public boolean next(HStoreKey key, SortedMap<byte [], Cell> results)
throws IOException { throws IOException {
if (this.scannerClosed) { if (this.scannerClosed) {
return false; return false;
@ -735,7 +735,7 @@ class Memcache {
c.getTimestamp() > latestTimestamp) { c.getTimestamp() > latestTimestamp) {
latestTimestamp = c.getTimestamp(); latestTimestamp = c.getTimestamp();
} }
results.put(column, c.getValue()); results.put(column, c);
} }
this.currentRow = getNextRow(this.currentRow); this.currentRow = getNextRow(this.currentRow);

View File

@ -27,6 +27,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HStoreKey; import org.apache.hadoop.hbase.HStoreKey;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.MapFile; import org.apache.hadoop.io.MapFile;
@ -36,12 +37,12 @@ import org.apache.hadoop.io.MapFile;
class StoreFileScanner extends HAbstractScanner class StoreFileScanner extends HAbstractScanner
implements ChangedReadersObserver { implements ChangedReadersObserver {
// Keys retrieved from the sources // Keys retrieved from the sources
private HStoreKey keys[]; private volatile HStoreKey keys[];
// Values that correspond to those keys // Values that correspond to those keys
private byte [][] vals; private volatile byte [][] vals;
// Readers we go against. // Readers we go against.
private MapFile.Reader[] readers; private volatile MapFile.Reader[] readers;
// Store this scanner came out of. // Store this scanner came out of.
private final HStore store; private final HStore store;
@ -62,6 +63,7 @@ implements ChangedReadersObserver {
super(timestamp, targetCols); super(timestamp, targetCols);
this.store = store; this.store = store;
this.store.addChangedReaderObserver(this); this.store.addChangedReaderObserver(this);
this.store.lock.readLock().lock();
try { try {
openReaders(firstRow); openReaders(firstRow);
} catch (Exception ex) { } catch (Exception ex) {
@ -69,6 +71,8 @@ implements ChangedReadersObserver {
IOException e = new IOException("HStoreScanner failed construction"); IOException e = new IOException("HStoreScanner failed construction");
e.initCause(ex); e.initCause(ex);
throw e; throw e;
} finally {
this.store.lock.readLock().unlock();
} }
} }
@ -92,8 +96,7 @@ implements ChangedReadersObserver {
// Most recent map file should be first // Most recent map file should be first
int i = readers.length - 1; int i = readers.length - 1;
for(HStoreFile curHSF: store.getStorefiles().values()) { for(HStoreFile curHSF: store.getStorefiles().values()) {
readers[i--] = curHSF.getReader(store.fs, readers[i--] = curHSF.getReader(store.fs, false, false);
store.getFamily().isBloomFilterEnabled(), false);
} }
this.keys = new HStoreKey[readers.length]; this.keys = new HStoreKey[readers.length];
@ -140,7 +143,7 @@ implements ChangedReadersObserver {
* @see org.apache.hadoop.hbase.regionserver.InternalScanner#next(org.apache.hadoop.hbase.HStoreKey, java.util.SortedMap) * @see org.apache.hadoop.hbase.regionserver.InternalScanner#next(org.apache.hadoop.hbase.HStoreKey, java.util.SortedMap)
*/ */
@Override @Override
public boolean next(HStoreKey key, SortedMap<byte [], byte []> results) public boolean next(HStoreKey key, SortedMap<byte [], Cell> results)
throws IOException { throws IOException {
if (this.scannerClosed) { if (this.scannerClosed) {
return false; return false;
@ -173,7 +176,8 @@ implements ChangedReadersObserver {
if(columnMatch(i)) { if(columnMatch(i)) {
// We only want the first result for any specific family member // We only want the first result for any specific family member
if(!results.containsKey(keys[i].getColumn())) { if(!results.containsKey(keys[i].getColumn())) {
results.put(keys[i].getColumn(), vals[i]); results.put(keys[i].getColumn(),
new Cell(vals[i], keys[i].getTimestamp()));
insertedItem = true; insertedItem = true;
} }
} }

View File

@ -201,11 +201,11 @@ public class MetaUtils {
try { try {
HStoreKey key = new HStoreKey(); HStoreKey key = new HStoreKey();
SortedMap<byte [], byte[]> results = SortedMap<byte [], Cell> results =
new TreeMap<byte [], byte[]>(Bytes.BYTES_COMPARATOR); new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
while (rootScanner.next(key, results)) { while (rootScanner.next(key, results)) {
HRegionInfo info = Writables.getHRegionInfoOrNull( HRegionInfo info = Writables.getHRegionInfoOrNull(
results.get(HConstants.COL_REGIONINFO)); results.get(HConstants.COL_REGIONINFO).getValue());
if (info == null) { if (info == null) {
LOG.warn("region info is null for row " + key.getRow() + LOG.warn("region info is null for row " + key.getRow() +
" in table " + HConstants.ROOT_TABLE_NAME); " in table " + HConstants.ROOT_TABLE_NAME);
@ -253,11 +253,11 @@ public class MetaUtils {
HConstants.EMPTY_START_ROW, HConstants.LATEST_TIMESTAMP, null); HConstants.EMPTY_START_ROW, HConstants.LATEST_TIMESTAMP, null);
try { try {
HStoreKey key = new HStoreKey(); HStoreKey key = new HStoreKey();
SortedMap<byte[], byte[]> results = SortedMap<byte[], Cell> results =
new TreeMap<byte[], byte[]>(Bytes.BYTES_COMPARATOR); new TreeMap<byte[], Cell>(Bytes.BYTES_COMPARATOR);
while (metaScanner.next(key, results)) { while (metaScanner.next(key, results)) {
HRegionInfo info = HRegionInfo info = Writables.getHRegionInfoOrNull(
Writables.getHRegionInfoOrNull(results.get(HConstants.COL_REGIONINFO)); results.get(HConstants.COL_REGIONINFO).getValue());
if (info == null) { if (info == null) {
LOG.warn("regioninfo null for row " + key.getRow() + " in table " + LOG.warn("regioninfo null for row " + key.getRow() + " in table " +
Bytes.toString(m.getTableDesc().getName())); Bytes.toString(m.getTableDesc().getName()));

View File

@ -509,8 +509,8 @@ public abstract class HBaseTestCase extends TestCase {
} }
public interface ScannerIncommon public interface ScannerIncommon
extends Iterable<Map.Entry<HStoreKey, SortedMap<byte [], byte[]>>> { extends Iterable<Map.Entry<HStoreKey, SortedMap<byte [], Cell>>> {
public boolean next(HStoreKey key, SortedMap<byte [], byte[]> values) public boolean next(HStoreKey key, SortedMap<byte [], Cell> values)
throws IOException; throws IOException;
public void close() throws IOException; public void close() throws IOException;
@ -522,7 +522,7 @@ public abstract class HBaseTestCase extends TestCase {
this.scanner = scanner; this.scanner = scanner;
} }
public boolean next(HStoreKey key, SortedMap<byte [], byte[]> values) public boolean next(HStoreKey key, SortedMap<byte [], Cell> values)
throws IOException { throws IOException {
RowResult results = scanner.next(); RowResult results = scanner.next();
if (results == null) { if (results == null) {
@ -531,7 +531,7 @@ public abstract class HBaseTestCase extends TestCase {
key.setRow(results.getRow()); key.setRow(results.getRow());
values.clear(); values.clear();
for (Map.Entry<byte [], Cell> entry : results.entrySet()) { for (Map.Entry<byte [], Cell> entry : results.entrySet()) {
values.put(entry.getKey(), entry.getValue().getValue()); values.put(entry.getKey(), entry.getValue());
} }
return true; return true;
} }
@ -552,7 +552,7 @@ public abstract class HBaseTestCase extends TestCase {
this.scanner = scanner; this.scanner = scanner;
} }
public boolean next(HStoreKey key, SortedMap<byte [], byte[]> values) public boolean next(HStoreKey key, SortedMap<byte [], Cell> values)
throws IOException { throws IOException {
return scanner.next(key, values); return scanner.next(key, values);
} }

View File

@ -1,205 +0,0 @@
/**
* Copyright 2007 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.io.BatchUpdate;
import org.apache.hadoop.hbase.util.Bytes;
/** Tests per-column bloom filters */
public class TestBloomFilters extends HBaseClusterTestCase {
static final Log LOG = LogFactory.getLog(TestBloomFilters.class);
private static final byte [] CONTENTS = Bytes.toBytes("contents:");
private static final byte [][] rows = {
Bytes.toBytes("wmjwjzyv"),
Bytes.toBytes("baietibz"),
Bytes.toBytes("guhsgxnv"),
Bytes.toBytes("mhnqycto"),
Bytes.toBytes("xcyqafgz"),
Bytes.toBytes("zidoamgb"),
Bytes.toBytes("tftfirzd"),
Bytes.toBytes("okapqlrg"),
Bytes.toBytes("yccwzwsq"),
Bytes.toBytes("qmonufqu"),
Bytes.toBytes("wlsctews"),
Bytes.toBytes("mksdhqri"),
Bytes.toBytes("wxxllokj"),
Bytes.toBytes("eviuqpls"),
Bytes.toBytes("bavotqmj"),
Bytes.toBytes("yibqzhdl"),
Bytes.toBytes("csfqmsyr"),
Bytes.toBytes("guxliyuh"),
Bytes.toBytes("pzicietj"),
Bytes.toBytes("qdwgrqwo"),
Bytes.toBytes("ujfzecmi"),
Bytes.toBytes("dzeqfvfi"),
Bytes.toBytes("phoegsij"),
Bytes.toBytes("bvudfcou"),
Bytes.toBytes("dowzmciz"),
Bytes.toBytes("etvhkizp"),
Bytes.toBytes("rzurqycg"),
Bytes.toBytes("krqfxuge"),
Bytes.toBytes("gflcohtd"),
Bytes.toBytes("fcrcxtps"),
Bytes.toBytes("qrtovxdq"),
Bytes.toBytes("aypxwrwi"),
Bytes.toBytes("dckpyznr"),
Bytes.toBytes("mdaawnpz"),
Bytes.toBytes("pakdfvca"),
Bytes.toBytes("xjglfbez"),
Bytes.toBytes("xdsecofi"),
Bytes.toBytes("sjlrfcab"),
Bytes.toBytes("ebcjawxv"),
Bytes.toBytes("hkafkjmy"),
Bytes.toBytes("oimmwaxo"),
Bytes.toBytes("qcuzrazo"),
Bytes.toBytes("nqydfkwk"),
Bytes.toBytes("frybvmlb"),
Bytes.toBytes("amxmaqws"),
Bytes.toBytes("gtkovkgx"),
Bytes.toBytes("vgwxrwss"),
Bytes.toBytes("xrhzmcep"),
Bytes.toBytes("tafwziil"),
Bytes.toBytes("erjmncnv"),
Bytes.toBytes("heyzqzrn"),
Bytes.toBytes("sowvyhtu"),
Bytes.toBytes("heeixgzy"),
Bytes.toBytes("ktcahcob"),
Bytes.toBytes("ljhbybgg"),
Bytes.toBytes("jiqfcksl"),
Bytes.toBytes("anjdkjhm"),
Bytes.toBytes("uzcgcuxp"),
Bytes.toBytes("vzdhjqla"),
Bytes.toBytes("svhgwwzq"),
Bytes.toBytes("zhswvhbp"),
Bytes.toBytes("ueceybwy"),
Bytes.toBytes("czkqykcw"),
Bytes.toBytes("ctisayir"),
Bytes.toBytes("hppbgciu"),
Bytes.toBytes("nhzgljfk"),
Bytes.toBytes("vaziqllf"),
Bytes.toBytes("narvrrij"),
Bytes.toBytes("kcevbbqi"),
Bytes.toBytes("qymuaqnp"),
Bytes.toBytes("pwqpfhsr"),
Bytes.toBytes("peyeicuk"),
Bytes.toBytes("kudlwihi"),
Bytes.toBytes("pkmqejlm"),
Bytes.toBytes("ylwzjftl"),
Bytes.toBytes("rhqrlqar"),
Bytes.toBytes("xmftvzsp"),
Bytes.toBytes("iaemtihk"),
Bytes.toBytes("ymsbrqcu"),
Bytes.toBytes("yfnlcxto"),
Bytes.toBytes("nluqopqh"),
Bytes.toBytes("wmrzhtox"),
Bytes.toBytes("qnffhqbl"),
Bytes.toBytes("zypqpnbw"),
Bytes.toBytes("oiokhatd"),
Bytes.toBytes("mdraddiu"),
Bytes.toBytes("zqoatltt"),
Bytes.toBytes("ewhulbtm"),
Bytes.toBytes("nmswpsdf"),
Bytes.toBytes("xsjeteqe"),
Bytes.toBytes("ufubcbma"),
Bytes.toBytes("phyxvrds"),
Bytes.toBytes("vhnfldap"),
Bytes.toBytes("zrrlycmg"),
Bytes.toBytes("becotcjx"),
Bytes.toBytes("wvbubokn"),
Bytes.toBytes("avkgiopr"),
Bytes.toBytes("mbqqxmrv"),
Bytes.toBytes("ibplgvuu"),
Bytes.toBytes("dghvpkgc")
};
private static final byte [][] testKeys = {
Bytes.toBytes("abcdefgh"),
Bytes.toBytes("ijklmnop"),
Bytes.toBytes("qrstuvwx"),
Bytes.toBytes("yzabcdef")
};
/**
* Test that uses automatic bloom filter
* @throws IOException
*/
public void testComputedParameters() throws IOException {
HTable table = null;
// Setup
HTableDescriptor desc = new HTableDescriptor(getName());
desc.addFamily(
new HColumnDescriptor(CONTENTS, // Column name
1, // Max versions
HColumnDescriptor.CompressionType.NONE, // no compression
HColumnDescriptor.DEFAULT_IN_MEMORY, // not in memory
HColumnDescriptor.DEFAULT_BLOCKCACHE,
HColumnDescriptor.DEFAULT_LENGTH,
HColumnDescriptor.DEFAULT_TTL,
true
)
);
// Create the table
HBaseAdmin admin = new HBaseAdmin(conf);
admin.createTable(desc);
// Open table
table = new HTable(conf, desc.getName());
// Store some values
for(int i = 0; i < 100; i++) {
byte [] row = rows[i];
String value = row.toString();
BatchUpdate b = new BatchUpdate(row);
b.put(CONTENTS, value.getBytes(HConstants.UTF8_ENCODING));
table.commit(b);
}
try {
// Give cache flusher and log roller a chance to run
// Otherwise we'll never hit the bloom filter, just the memcache
Thread.sleep(conf.getLong(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000) * 2);
} catch (InterruptedException e) {
// ignore
}
for(int i = 0; i < testKeys.length; i++) {
Cell value = table.get(testKeys[i], CONTENTS);
if(value != null && value.getValue().length != 0) {
LOG.info("non existant key: " + testKeys[i] + " returned value: " +
new String(value.getValue(), HConstants.UTF8_ENCODING));
}
}
}
}

View File

@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Scanner; import org.apache.hadoop.hbase.client.Scanner;
import org.apache.hadoop.hbase.io.BatchUpdate; import org.apache.hadoop.hbase.io.BatchUpdate;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.io.RowResult; import org.apache.hadoop.hbase.io.RowResult;
import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
@ -44,8 +45,8 @@ public class TestScannerAPI extends HBaseClusterTestCase {
}); });
private final byte [] startRow = Bytes.toBytes("0"); private final byte [] startRow = Bytes.toBytes("0");
private final TreeMap<byte [], SortedMap<byte [], byte[]>> values = private final TreeMap<byte [], SortedMap<byte [], Cell>> values =
new TreeMap<byte [], SortedMap<byte [], byte[]>>(Bytes.BYTES_COMPARATOR); new TreeMap<byte [], SortedMap<byte [], Cell>>(Bytes.BYTES_COMPARATOR);
/** /**
* @throws Exception * @throws Exception
@ -53,13 +54,16 @@ public class TestScannerAPI extends HBaseClusterTestCase {
public TestScannerAPI() throws Exception { public TestScannerAPI() throws Exception {
super(); super();
try { try {
TreeMap<byte [], byte[]> columns = TreeMap<byte [], Cell> columns =
new TreeMap<byte [], byte[]>(Bytes.BYTES_COMPARATOR); new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
columns.put(Bytes.toBytes("a:1"), Bytes.toBytes("1")); columns.put(Bytes.toBytes("a:1"),
new Cell(Bytes.toBytes("1"), HConstants.LATEST_TIMESTAMP));
values.put(Bytes.toBytes("1"), columns); values.put(Bytes.toBytes("1"), columns);
columns = new TreeMap<byte [], byte[]>(Bytes.BYTES_COMPARATOR); columns = new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
columns.put(Bytes.toBytes("a:2"), Bytes.toBytes("2")); columns.put(Bytes.toBytes("a:2"),
columns.put(Bytes.toBytes("b:2"), Bytes.toBytes("2")); new Cell(Bytes.toBytes("2"), HConstants.LATEST_TIMESTAMP));
columns.put(Bytes.toBytes("b:2"),
new Cell(Bytes.toBytes("2"), HConstants.LATEST_TIMESTAMP));
} catch (Exception e) { } catch (Exception e) {
e.printStackTrace(); e.printStackTrace();
throw e; throw e;
@ -85,10 +89,10 @@ public class TestScannerAPI extends HBaseClusterTestCase {
HTable table = new HTable(conf, getName()); HTable table = new HTable(conf, getName());
for (Map.Entry<byte [], SortedMap<byte [], byte[]>> row: values.entrySet()) { for (Map.Entry<byte [], SortedMap<byte [], Cell>> row: values.entrySet()) {
BatchUpdate b = new BatchUpdate(row.getKey()); BatchUpdate b = new BatchUpdate(row.getKey());
for (Map.Entry<byte [], byte[]> val: row.getValue().entrySet()) { for (Map.Entry<byte [], Cell> val: row.getValue().entrySet()) {
b.put(val.getKey(), val.getValue()); b.put(val.getKey(), val.getValue().getValue());
} }
table.commit(b); table.commit(b);
} }
@ -128,12 +132,12 @@ public class TestScannerAPI extends HBaseClusterTestCase {
for (RowResult r : scanner2) { for (RowResult r : scanner2) {
assertTrue("row key", values.containsKey(r.getRow())); assertTrue("row key", values.containsKey(r.getRow()));
SortedMap<byte [], byte[]> columnValues = values.get(r.getRow()); SortedMap<byte [], Cell> columnValues = values.get(r.getRow());
assertEquals(columnValues.size(), r.size()); assertEquals(columnValues.size(), r.size());
for (Map.Entry<byte [], byte[]> e: columnValues.entrySet()) { for (Map.Entry<byte [], Cell> e: columnValues.entrySet()) {
byte [] column = e.getKey(); byte [] column = e.getKey();
assertTrue("column", r.containsKey(column)); assertTrue("column", r.containsKey(column));
assertTrue("value", Arrays.equals(columnValues.get(column), assertTrue("value", Arrays.equals(columnValues.get(column).getValue(),
r.get(column).getValue())); r.get(column).getValue()));
} }
} }
@ -144,19 +148,19 @@ public class TestScannerAPI extends HBaseClusterTestCase {
private void verify(ScannerIncommon scanner) throws IOException { private void verify(ScannerIncommon scanner) throws IOException {
HStoreKey key = new HStoreKey(); HStoreKey key = new HStoreKey();
SortedMap<byte [], byte[]> results = SortedMap<byte [], Cell> results =
new TreeMap<byte [], byte[]>(Bytes.BYTES_COMPARATOR); new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
while (scanner.next(key, results)) { while (scanner.next(key, results)) {
byte [] row = key.getRow(); byte [] row = key.getRow();
assertTrue("row key", values.containsKey(row)); assertTrue("row key", values.containsKey(row));
SortedMap<byte [], byte[]> columnValues = values.get(row); SortedMap<byte [], Cell> columnValues = values.get(row);
assertEquals(columnValues.size(), results.size()); assertEquals(columnValues.size(), results.size());
for (Map.Entry<byte [], byte[]> e: columnValues.entrySet()) { for (Map.Entry<byte [], Cell> e: columnValues.entrySet()) {
byte [] column = e.getKey(); byte [] column = e.getKey();
assertTrue("column", results.containsKey(column)); assertTrue("column", results.containsKey(column));
assertTrue("value", Arrays.equals(columnValues.get(column), assertTrue("value", Arrays.equals(columnValues.get(column).getValue(),
results.get(column))); results.get(column).getValue()));
} }
results.clear(); results.clear();
} }

View File

@ -177,13 +177,13 @@ public class TimestampTestBase extends HBaseTestCase {
int count = 0; int count = 0;
try { try {
HStoreKey key = new HStoreKey(); HStoreKey key = new HStoreKey();
TreeMap<byte [], byte []>value = TreeMap<byte [], Cell>value =
new TreeMap<byte [], byte[]>(Bytes.BYTES_COMPARATOR); new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
while (scanner.next(key, value)) { while (scanner.next(key, value)) {
assertTrue(key.getTimestamp() <= ts); assertTrue(key.getTimestamp() <= ts);
// Content matches the key or HConstants.LATEST_TIMESTAMP. // Content matches the key or HConstants.LATEST_TIMESTAMP.
// (Key does not match content if we 'put' with LATEST_TIMESTAMP). // (Key does not match content if we 'put' with LATEST_TIMESTAMP).
long l = Bytes.toLong(value.get(COLUMN)); long l = Bytes.toLong(value.get(COLUMN).getValue());
assertTrue(key.getTimestamp() == l || assertTrue(key.getTimestamp() == l ||
HConstants.LATEST_TIMESTAMP == l); HConstants.LATEST_TIMESTAMP == l);
count++; count++;

View File

@ -0,0 +1,139 @@
/**
* Copyright 2008 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import java.io.IOException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HBaseClusterTestCase;
import org.apache.hadoop.hbase.io.BatchUpdate;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.io.RowResult;
/**
* Test that verifies that scanners return a different timestamp for values that
* are not stored at the same time. (HBASE-737)
*/
public class TestScannerTimes extends HBaseClusterTestCase {
private static final String TABLE_NAME = "hbase737";
private static final String FAM1 = "fam1:";
private static final String FAM2 = "fam2:";
private static final String ROW = "row";
/**
* test for HBASE-737
* @throws IOException
*/
public void testHBase737 () throws IOException {
HTableDescriptor desc = new HTableDescriptor(TABLE_NAME);
desc.addFamily(new HColumnDescriptor(FAM1));
desc.addFamily(new HColumnDescriptor(FAM2));
// Create table
HBaseAdmin admin = new HBaseAdmin(conf);
admin.createTable(desc);
// Open table
HTable table = new HTable(conf, TABLE_NAME);
// Insert some values
BatchUpdate b = new BatchUpdate(ROW);
b.put(FAM1 + "letters", "abcdefg".getBytes(HConstants.UTF8_ENCODING));
table.commit(b);
try {
Thread.sleep(1000);
} catch (InterruptedException i) {
//ignore
}
b = new BatchUpdate(ROW);
b.put(FAM1 + "numbers", "123456".getBytes(HConstants.UTF8_ENCODING));
table.commit(b);
try {
Thread.sleep(1000);
} catch (InterruptedException i) {
//ignore
}
b = new BatchUpdate(ROW);
b.put(FAM2 + "letters", "hijklmnop".getBytes(HConstants.UTF8_ENCODING));
table.commit(b);
long times[] = new long[3];
byte[][] columns = new byte[][] {
FAM1.getBytes(HConstants.UTF8_ENCODING),
FAM2.getBytes(HConstants.UTF8_ENCODING)
};
// First scan the memcache
Scanner s = table.getScanner(columns);
try {
int index = 0;
RowResult r = null;
while ((r = s.next()) != null) {
for (Cell c: r.values()) {
times[index++] = c.getTimestamp();
}
}
} finally {
s.close();
}
for (int i = 0; i < times.length - 1; i++) {
for (int j = i + 1; j < times.length; j++) {
assertTrue(times[j] > times[i]);
}
}
// Fush data to disk and try again
cluster.flushcache();
try {
Thread.sleep(1000);
} catch (InterruptedException i) {
//ignore
}
s = table.getScanner(columns);
try {
int index = 0;
RowResult r = null;
while ((r = s.next()) != null) {
for (Cell c: r.values()) {
times[index++] = c.getTimestamp();
}
}
} finally {
s.close();
}
for (int i = 0; i < times.length - 1; i++) {
for (int j = i + 1; j < times.length; j++) {
assertTrue(times[j] > times[i]);
}
}
}
}

View File

@ -30,6 +30,7 @@ import java.util.TreeMap;
import junit.framework.TestCase; import junit.framework.TestCase;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.regionserver.HLogEdit; import org.apache.hadoop.hbase.regionserver.HLogEdit;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
@ -37,7 +38,7 @@ import org.apache.hadoop.hbase.util.Bytes;
* Tests for regular expression row filter * Tests for regular expression row filter
*/ */
public class TestRegExpRowFilter extends TestCase { public class TestRegExpRowFilter extends TestCase {
TreeMap<byte [], byte []> colvalues; TreeMap<byte [], Cell> colvalues;
RowFilterInterface mainFilter; RowFilterInterface mainFilter;
final char FIRST_CHAR = 'a'; final char FIRST_CHAR = 'a';
final char LAST_CHAR = 'e'; final char LAST_CHAR = 'e';
@ -55,9 +56,10 @@ public class TestRegExpRowFilter extends TestCase {
@Override @Override
protected void setUp() throws Exception { protected void setUp() throws Exception {
super.setUp(); super.setUp();
this.colvalues = new TreeMap<byte [], byte[]>(Bytes.BYTES_COMPARATOR); this.colvalues = new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
for (char c = FIRST_CHAR; c < LAST_CHAR; c++) { for (char c = FIRST_CHAR; c < LAST_CHAR; c++) {
colvalues.put(Bytes.toBytes(new String(new char [] {c})), GOOD_BYTES); colvalues.put(Bytes.toBytes(new String(new char [] {c})),
new Cell(GOOD_BYTES, HConstants.LATEST_TIMESTAMP));
} }
this.mainFilter = new RegExpRowFilter(HOST_PREFIX + ".*", colvalues); this.mainFilter = new RegExpRowFilter(HOST_PREFIX + ".*", colvalues);
} }
@ -126,9 +128,9 @@ public class TestRegExpRowFilter extends TestCase {
for (char c = FIRST_CHAR; c <= LAST_CHAR; c++) { for (char c = FIRST_CHAR; c <= LAST_CHAR; c++) {
byte [] t = createRow(c); byte [] t = createRow(c);
for (Map.Entry<byte [], byte []> e: this.colvalues.entrySet()) { for (Map.Entry<byte [], Cell> e: this.colvalues.entrySet()) {
assertFalse("Failed on " + c, assertFalse("Failed on " + c,
filter.filterColumn(t, e.getKey(), e.getValue())); filter.filterColumn(t, e.getKey(), e.getValue().getValue()));
} }
} }
// Try a row and column I know will pass. // Try a row and column I know will pass.
@ -171,13 +173,15 @@ public class TestRegExpRowFilter extends TestCase {
// Try a row that has all expected columnKeys, and NO null-expected // Try a row that has all expected columnKeys, and NO null-expected
// columnKeys. // columnKeys.
// Testing row with columnKeys: a-d // Testing row with columnKeys: a-d
colvalues.put(new byte [] {(byte)secondToLast}, GOOD_BYTES); colvalues.put(new byte [] {(byte)secondToLast},
new Cell(GOOD_BYTES, HConstants.LATEST_TIMESTAMP));
assertFalse("Failed with last columnKey " + secondToLast, filter. assertFalse("Failed with last columnKey " + secondToLast, filter.
filterRow(colvalues)); filterRow(colvalues));
// Try a row that has all expected columnKeys AND a null-expected columnKey. // Try a row that has all expected columnKeys AND a null-expected columnKey.
// Testing row with columnKeys: a-e // Testing row with columnKeys: a-e
colvalues.put(new byte [] {LAST_CHAR}, GOOD_BYTES); colvalues.put(new byte [] {LAST_CHAR},
new Cell(GOOD_BYTES, HConstants.LATEST_TIMESTAMP));
assertTrue("Failed with last columnKey " + LAST_CHAR, filter. assertTrue("Failed with last columnKey " + LAST_CHAR, filter.
filterRow(colvalues)); filterRow(colvalues));
@ -185,7 +189,7 @@ public class TestRegExpRowFilter extends TestCase {
// that maps to a null value. // that maps to a null value.
// Testing row with columnKeys: a-e, e maps to null // Testing row with columnKeys: a-e, e maps to null
colvalues.put(new byte [] {LAST_CHAR}, colvalues.put(new byte [] {LAST_CHAR},
HLogEdit.deleteBytes.get()); new Cell(HLogEdit.deleteBytes.get(), HConstants.LATEST_TIMESTAMP));
assertFalse("Failed with last columnKey " + LAST_CHAR + " mapping to null.", assertFalse("Failed with last columnKey " + LAST_CHAR + " mapping to null.",
filter.filterRow(colvalues)); filter.filterRow(colvalues));
} }

View File

@ -172,8 +172,9 @@ public class TestRowFilterAfterWrite extends HBaseClusterTestCase {
private void scanTableWithRowFilter(final String tableName, final boolean printValues) throws IOException { private void scanTableWithRowFilter(final String tableName, final boolean printValues) throws IOException {
HTable table = new HTable(conf, tableName); HTable table = new HTable(conf, tableName);
Map<byte [], byte[]> columnMap = new HashMap<byte [], byte[]>(); Map<byte [], Cell> columnMap = new HashMap<byte [], Cell>();
columnMap.put(TEXT_COLUMN1, VALUE); columnMap.put(TEXT_COLUMN1,
new Cell(VALUE, HConstants.LATEST_TIMESTAMP));
RegExpRowFilter filter = new RegExpRowFilter(null, columnMap); RegExpRowFilter filter = new RegExpRowFilter(null, columnMap);
Scanner scanner = table.getScanner(columns, HConstants.EMPTY_START_ROW, filter); Scanner scanner = table.getScanner(columns, HConstants.EMPTY_START_ROW, filter);
int numFound = doScan(scanner, printValues); int numFound = doScan(scanner, printValues);

View File

@ -92,8 +92,9 @@ public class TestRowFilterOnMultipleFamilies extends HBaseClusterTestCase {
private void scanTableWithRowFilter(final String tableName, final boolean printValues) throws IOException { private void scanTableWithRowFilter(final String tableName, final boolean printValues) throws IOException {
HTable table = new HTable(conf, tableName); HTable table = new HTable(conf, tableName);
Map<byte [], byte[]> columnMap = new HashMap<byte [], byte[]>(); Map<byte [], Cell> columnMap = new HashMap<byte [], Cell>();
columnMap.put(TEXT_COLUMN1, VALUE); columnMap.put(TEXT_COLUMN1,
new Cell(VALUE, HConstants.LATEST_TIMESTAMP));
RegExpRowFilter filter = new RegExpRowFilter(null, columnMap); RegExpRowFilter filter = new RegExpRowFilter(null, columnMap);
Scanner scanner = table.getScanner(columns, HConstants.EMPTY_START_ROW, filter); Scanner scanner = table.getScanner(columns, HConstants.EMPTY_START_ROW, filter);
int numFound = doScan(scanner, printValues); int numFound = doScan(scanner, printValues);

View File

@ -29,6 +29,7 @@ import java.util.Set;
import java.util.TreeMap; import java.util.TreeMap;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
@ -44,7 +45,7 @@ public class TestRowFilterSet extends TestCase {
static final int MAX_PAGES = 5; static final int MAX_PAGES = 5;
final char FIRST_CHAR = 'a'; final char FIRST_CHAR = 'a';
final char LAST_CHAR = 'e'; final char LAST_CHAR = 'e';
TreeMap<byte [], byte[]> colvalues; TreeMap<byte [], Cell> colvalues;
static byte[] GOOD_BYTES = null; static byte[] GOOD_BYTES = null;
static byte[] BAD_BYTES = null; static byte[] BAD_BYTES = null;
@ -62,9 +63,10 @@ public class TestRowFilterSet extends TestCase {
protected void setUp() throws Exception { protected void setUp() throws Exception {
super.setUp(); super.setUp();
colvalues = new TreeMap<byte [], byte[]>(Bytes.BYTES_COMPARATOR); colvalues = new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
for (char c = FIRST_CHAR; c < LAST_CHAR; c++) { for (char c = FIRST_CHAR; c < LAST_CHAR; c++) {
colvalues.put(new byte [] {(byte)c}, GOOD_BYTES); colvalues.put(new byte [] {(byte)c},
new Cell(GOOD_BYTES, HConstants.LATEST_TIMESTAMP));
} }
Set<RowFilterInterface> filters = new HashSet<RowFilterInterface>(); Set<RowFilterInterface> filters = new HashSet<RowFilterInterface>();

View File

@ -76,8 +76,8 @@ public class TestGet2 extends HBaseTestCase implements HConstants {
arbitraryStartRow, HConstants.LATEST_TIMESTAMP, arbitraryStartRow, HConstants.LATEST_TIMESTAMP,
new WhileMatchRowFilter(new StopRowFilter(arbitraryStopRow))); new WhileMatchRowFilter(new StopRowFilter(arbitraryStopRow)));
HStoreKey key = new HStoreKey(); HStoreKey key = new HStoreKey();
TreeMap<byte [], byte[]> value = TreeMap<byte [], Cell> value =
new TreeMap<byte [], byte []>(Bytes.BYTES_COMPARATOR); new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
while (scanner.next(key, value)) { while (scanner.next(key, value)) {
if (actualStartRow == null) { if (actualStartRow == null) {
actualStartRow = key.getRow(); actualStartRow = key.getRow();

View File

@ -151,7 +151,8 @@ public class TestHMemcache extends TestCase {
} }
} }
private void isExpectedRowWithoutTimestamps(final int rowIndex, TreeMap<byte [], byte[]> row) { private void isExpectedRowWithoutTimestamps(final int rowIndex,
TreeMap<byte [], Cell> row) {
int i = 0; int i = 0;
for (byte [] colname: row.keySet()) { for (byte [] colname: row.keySet()) {
String expectedColname = Bytes.toString(getColumnName(rowIndex, i++)); String expectedColname = Bytes.toString(getColumnName(rowIndex, i++));
@ -161,18 +162,19 @@ public class TestHMemcache extends TestCase {
// 100 bytes in size at least. This is the default size // 100 bytes in size at least. This is the default size
// for BytesWriteable. For comparison, comvert bytes to // for BytesWriteable. For comparison, comvert bytes to
// String and trim to remove trailing null bytes. // String and trim to remove trailing null bytes.
byte [] value = row.get(colname); byte [] value = row.get(colname).getValue();
String colvalueStr = Bytes.toString(value).trim(); String colvalueStr = Bytes.toString(value).trim();
assertEquals("Content", colnameStr, colvalueStr); assertEquals("Content", colnameStr, colvalueStr);
} }
} }
private void isExpectedRow(final int rowIndex, TreeMap<byte [], Cell> row) { private void isExpectedRow(final int rowIndex, TreeMap<byte [], Cell> row) {
TreeMap<byte [], byte[]> converted = TreeMap<byte [], Cell> converted =
new TreeMap<byte [], byte[]>(Bytes.BYTES_COMPARATOR); new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
for (Map.Entry<byte [], Cell> entry : row.entrySet()) { for (Map.Entry<byte [], Cell> entry : row.entrySet()) {
converted.put(entry.getKey(), converted.put(entry.getKey(),
entry.getValue() == null ? null : entry.getValue().getValue()); new Cell(entry.getValue() == null ? null : entry.getValue().getValue(),
HConstants.LATEST_TIMESTAMP));
} }
isExpectedRowWithoutTimestamps(rowIndex, converted); isExpectedRowWithoutTimestamps(rowIndex, converted);
} }
@ -241,16 +243,16 @@ public class TestHMemcache extends TestCase {
InternalScanner scanner = InternalScanner scanner =
this.hmemcache.getScanner(timestamp, cols, HConstants.EMPTY_START_ROW); this.hmemcache.getScanner(timestamp, cols, HConstants.EMPTY_START_ROW);
HStoreKey key = new HStoreKey(); HStoreKey key = new HStoreKey();
TreeMap<byte [], byte []> results = TreeMap<byte [], Cell> results =
new TreeMap<byte [], byte []>(Bytes.BYTES_COMPARATOR); new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
for (int i = 0; scanner.next(key, results); i++) { for (int i = 0; scanner.next(key, results); i++) {
assertTrue("Row name", assertTrue("Row name",
key.toString().startsWith(Bytes.toString(getRowName(i)))); key.toString().startsWith(Bytes.toString(getRowName(i))));
assertEquals("Count of columns", COLUMNS_COUNT, assertEquals("Count of columns", COLUMNS_COUNT,
results.size()); results.size());
TreeMap<byte [], byte []> row = TreeMap<byte [], Cell> row =
new TreeMap<byte [], byte []>(Bytes.BYTES_COMPARATOR); new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
for(Map.Entry<byte [], byte []> e: results.entrySet() ) { for(Map.Entry<byte [], Cell> e: results.entrySet() ) {
row.put(e.getKey(), e.getValue()); row.put(e.getKey(), e.getValue());
} }
isExpectedRowWithoutTimestamps(i, row); isExpectedRowWithoutTimestamps(i, row);
@ -323,8 +325,8 @@ public class TestHMemcache extends TestCase {
InternalScanner scanner = this.hmemcache.getScanner(timestamp, InternalScanner scanner = this.hmemcache.getScanner(timestamp,
cols, getRowName(startRowId)); cols, getRowName(startRowId));
HStoreKey key = new HStoreKey(); HStoreKey key = new HStoreKey();
TreeMap<byte[], byte[]> results = TreeMap<byte[], Cell> results =
new TreeMap<byte[], byte[]>(Bytes.BYTES_COMPARATOR); new TreeMap<byte[], Cell>(Bytes.BYTES_COMPARATOR);
for (int i = 0; scanner.next(key, results); i++) { for (int i = 0; scanner.next(key, results); i++) {
int rowId = startRowId + i; int rowId = startRowId + i;
assertTrue("Row name", assertTrue("Row name",
@ -332,9 +334,8 @@ public class TestHMemcache extends TestCase {
assertEquals("Count of columns", COLUMNS_COUNT, results.size()); assertEquals("Count of columns", COLUMNS_COUNT, results.size());
TreeMap<byte[], Cell> row = TreeMap<byte[], Cell> row =
new TreeMap<byte[], Cell>(Bytes.BYTES_COMPARATOR); new TreeMap<byte[], Cell>(Bytes.BYTES_COMPARATOR);
for (Map.Entry<byte[], byte[]> e : results.entrySet()) { for (Map.Entry<byte[], Cell> e : results.entrySet()) {
row.put(e.getKey(), row.put(e.getKey(),e.getValue());
new Cell(e.getValue(), HConstants.LATEST_TIMESTAMP));
} }
isExpectedRow(rowId, row); isExpectedRow(rowId, row);
// Clear out set. Otherwise row results accumulate. // Clear out set. Otherwise row results accumulate.

View File

@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.HStoreKey;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.io.BatchUpdate; import org.apache.hadoop.hbase.io.BatchUpdate;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
/** /**
@ -284,13 +285,13 @@ public class TestHRegion extends HBaseTestCase {
int numFetched = 0; int numFetched = 0;
try { try {
HStoreKey curKey = new HStoreKey(); HStoreKey curKey = new HStoreKey();
TreeMap<byte [], byte []> curVals = TreeMap<byte [], Cell> curVals =
new TreeMap<byte [], byte []>(Bytes.BYTES_COMPARATOR); new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
int k = 0; int k = 0;
while(s.next(curKey, curVals)) { while(s.next(curKey, curVals)) {
for(Iterator<byte []> it = curVals.keySet().iterator(); it.hasNext(); ) { for(Iterator<byte []> it = curVals.keySet().iterator(); it.hasNext(); ) {
byte [] col = it.next(); byte [] col = it.next();
byte [] val = curVals.get(col); byte [] val = curVals.get(col).getValue();
int curval = int curval =
Integer.parseInt(new String(val, HConstants.UTF8_ENCODING).trim()); Integer.parseInt(new String(val, HConstants.UTF8_ENCODING).trim());
for(int j = 0; j < cols.length; j++) { for(int j = 0; j < cols.length; j++) {
@ -333,13 +334,13 @@ public class TestHRegion extends HBaseTestCase {
numFetched = 0; numFetched = 0;
try { try {
HStoreKey curKey = new HStoreKey(); HStoreKey curKey = new HStoreKey();
TreeMap<byte [], byte []> curVals = TreeMap<byte [], Cell> curVals =
new TreeMap<byte [], byte []>(Bytes.BYTES_COMPARATOR); new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
int k = 0; int k = 0;
while(s.next(curKey, curVals)) { while(s.next(curKey, curVals)) {
for(Iterator<byte []> it = curVals.keySet().iterator(); it.hasNext(); ) { for(Iterator<byte []> it = curVals.keySet().iterator(); it.hasNext(); ) {
byte [] col = it.next(); byte [] col = it.next();
byte [] val = curVals.get(col); byte [] val = curVals.get(col).getValue();
int curval = int curval =
Integer.parseInt(new String(val, HConstants.UTF8_ENCODING).trim()); Integer.parseInt(new String(val, HConstants.UTF8_ENCODING).trim());
for(int j = 0; j < cols.length; j++) { for(int j = 0; j < cols.length; j++) {
@ -392,13 +393,13 @@ public class TestHRegion extends HBaseTestCase {
numFetched = 0; numFetched = 0;
try { try {
HStoreKey curKey = new HStoreKey(); HStoreKey curKey = new HStoreKey();
TreeMap<byte [], byte []> curVals = TreeMap<byte [], Cell> curVals =
new TreeMap<byte [], byte []>(Bytes.BYTES_COMPARATOR); new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
int k = 0; int k = 0;
while(s.next(curKey, curVals)) { while(s.next(curKey, curVals)) {
for(Iterator<byte []> it = curVals.keySet().iterator(); it.hasNext(); ) { for(Iterator<byte []> it = curVals.keySet().iterator(); it.hasNext(); ) {
byte [] col = it.next(); byte [] col = it.next();
byte [] val = curVals.get(col); byte [] val = curVals.get(col).getValue();
int curval = int curval =
Integer.parseInt(new String(val, HConstants.UTF8_ENCODING).trim()); Integer.parseInt(new String(val, HConstants.UTF8_ENCODING).trim());
for(int j = 0; j < cols.length; j++) { for(int j = 0; j < cols.length; j++) {
@ -440,13 +441,13 @@ public class TestHRegion extends HBaseTestCase {
numFetched = 0; numFetched = 0;
try { try {
HStoreKey curKey = new HStoreKey(); HStoreKey curKey = new HStoreKey();
TreeMap<byte [], byte []> curVals = TreeMap<byte [], Cell> curVals =
new TreeMap<byte [], byte []>(Bytes.BYTES_COMPARATOR); new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
int k = 0; int k = 0;
while(s.next(curKey, curVals)) { while(s.next(curKey, curVals)) {
for(Iterator<byte []> it = curVals.keySet().iterator(); it.hasNext(); ) { for(Iterator<byte []> it = curVals.keySet().iterator(); it.hasNext(); ) {
byte [] col = it.next(); byte [] col = it.next();
byte [] val = curVals.get(col); byte [] val = curVals.get(col).getValue();
int curval = int curval =
Integer.parseInt(new String(val, HConstants.UTF8_ENCODING).trim()); Integer.parseInt(new String(val, HConstants.UTF8_ENCODING).trim());
for (int j = 0; j < cols.length; j++) { for (int j = 0; j < cols.length; j++) {
@ -479,13 +480,13 @@ public class TestHRegion extends HBaseTestCase {
numFetched = 0; numFetched = 0;
try { try {
HStoreKey curKey = new HStoreKey(); HStoreKey curKey = new HStoreKey();
TreeMap<byte [], byte []> curVals = TreeMap<byte [], Cell> curVals =
new TreeMap<byte [], byte []>(Bytes.BYTES_COMPARATOR); new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
int k = 500; int k = 500;
while(s.next(curKey, curVals)) { while(s.next(curKey, curVals)) {
for(Iterator<byte []> it = curVals.keySet().iterator(); it.hasNext(); ) { for(Iterator<byte []> it = curVals.keySet().iterator(); it.hasNext(); ) {
byte [] col = it.next(); byte [] col = it.next();
byte [] val = curVals.get(col); byte [] val = curVals.get(col).getValue();
int curval = int curval =
Integer.parseInt(new String(val, HConstants.UTF8_ENCODING).trim()); Integer.parseInt(new String(val, HConstants.UTF8_ENCODING).trim());
for (int j = 0; j < cols.length; j++) { for (int j = 0; j < cols.length; j++) {
@ -567,13 +568,13 @@ public class TestHRegion extends HBaseTestCase {
int contentsFetched = 0; int contentsFetched = 0;
int anchorFetched = 0; int anchorFetched = 0;
HStoreKey curKey = new HStoreKey(); HStoreKey curKey = new HStoreKey();
TreeMap<byte [], byte []> curVals = TreeMap<byte [], Cell> curVals =
new TreeMap<byte [], byte []>(Bytes.BYTES_COMPARATOR); new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
int k = 0; int k = 0;
while(s.next(curKey, curVals)) { while(s.next(curKey, curVals)) {
for(Iterator<byte []> it = curVals.keySet().iterator(); it.hasNext(); ) { for(Iterator<byte []> it = curVals.keySet().iterator(); it.hasNext(); ) {
byte [] col = it.next(); byte [] col = it.next();
byte [] val = curVals.get(col); byte [] val = curVals.get(col).getValue();
String curval = Bytes.toString(val); String curval = Bytes.toString(val);
if(Bytes.compareTo(col, CONTENTS_BASIC) == 0) { if(Bytes.compareTo(col, CONTENTS_BASIC) == 0) {
assertTrue("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp() assertTrue("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
@ -619,13 +620,13 @@ public class TestHRegion extends HBaseTestCase {
try { try {
int numFetched = 0; int numFetched = 0;
HStoreKey curKey = new HStoreKey(); HStoreKey curKey = new HStoreKey();
TreeMap<byte [], byte []> curVals = TreeMap<byte [], Cell> curVals =
new TreeMap<byte [], byte []>(Bytes.BYTES_COMPARATOR); new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
int k = 0; int k = 0;
while(s.next(curKey, curVals)) { while(s.next(curKey, curVals)) {
for(Iterator<byte []> it = curVals.keySet().iterator(); it.hasNext(); ) { for(Iterator<byte []> it = curVals.keySet().iterator(); it.hasNext(); ) {
byte [] col = it.next(); byte [] col = it.next();
byte [] val = curVals.get(col); byte [] val = curVals.get(col).getValue();
int curval = int curval =
Integer.parseInt(new String(val, HConstants.UTF8_ENCODING).trim()); Integer.parseInt(new String(val, HConstants.UTF8_ENCODING).trim());
@ -663,8 +664,8 @@ public class TestHRegion extends HBaseTestCase {
try { try {
int fetched = 0; int fetched = 0;
HStoreKey curKey = new HStoreKey(); HStoreKey curKey = new HStoreKey();
TreeMap<byte [], byte []> curVals = TreeMap<byte [], Cell> curVals =
new TreeMap<byte [], byte []>(Bytes.BYTES_COMPARATOR); new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
while(s.next(curKey, curVals)) { while(s.next(curKey, curVals)) {
for(Iterator<byte []> it = curVals.keySet().iterator(); it.hasNext(); ) { for(Iterator<byte []> it = curVals.keySet().iterator(); it.hasNext(); ) {
it.next(); it.next();

View File

@ -27,7 +27,6 @@ import java.util.TreeMap;
import org.apache.hadoop.dfs.MiniDFSCluster; import org.apache.hadoop.dfs.MiniDFSCluster;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Writables; import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.hbase.HBaseTestCase; import org.apache.hadoop.hbase.HBaseTestCase;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
@ -35,6 +34,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HServerAddress; import org.apache.hadoop.hbase.HServerAddress;
import org.apache.hadoop.hbase.HStoreKey; import org.apache.hadoop.hbase.HStoreKey;
import org.apache.hadoop.hbase.io.BatchUpdate; import org.apache.hadoop.hbase.io.BatchUpdate;
import org.apache.hadoop.hbase.io.Cell;
/** /**
* Test of a long-lived scanner validating as we go. * Test of a long-lived scanner validating as we go.
@ -88,8 +88,8 @@ public class TestScanner extends HBaseTestCase {
throws IOException { throws IOException {
InternalScanner scanner = null; InternalScanner scanner = null;
TreeMap<byte [], byte []> results = TreeMap<byte [], Cell> results =
new TreeMap<byte [], byte []>(Bytes.BYTES_COMPARATOR); new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
HStoreKey key = new HStoreKey(); HStoreKey key = new HStoreKey();
byte [][][] scanColumns = { byte [][][] scanColumns = {
@ -104,11 +104,11 @@ public class TestScanner extends HBaseTestCase {
while (scanner.next(key, results)) { while (scanner.next(key, results)) {
assertTrue(results.containsKey(HConstants.COL_REGIONINFO)); assertTrue(results.containsKey(HConstants.COL_REGIONINFO));
byte [] val = results.get(HConstants.COL_REGIONINFO); byte [] val = results.get(HConstants.COL_REGIONINFO).getValue();
validateRegionInfo(val); validateRegionInfo(val);
if(validateStartcode) { if(validateStartcode) {
assertTrue(results.containsKey(HConstants.COL_STARTCODE)); assertTrue(results.containsKey(HConstants.COL_STARTCODE));
val = results.get(HConstants.COL_STARTCODE); val = results.get(HConstants.COL_STARTCODE).getValue();
assertNotNull(val); assertNotNull(val);
assertFalse(val.length == 0); assertFalse(val.length == 0);
long startCode = Bytes.toLong(val); long startCode = Bytes.toLong(val);
@ -117,7 +117,7 @@ public class TestScanner extends HBaseTestCase {
if(serverName != null) { if(serverName != null) {
assertTrue(results.containsKey(HConstants.COL_SERVER)); assertTrue(results.containsKey(HConstants.COL_SERVER));
val = results.get(HConstants.COL_SERVER); val = results.get(HConstants.COL_SERVER).getValue();
assertNotNull(val); assertNotNull(val);
assertFalse(val.length == 0); assertFalse(val.length == 0);
String server = Bytes.toString(val); String server = Bytes.toString(val);

View File

@ -182,12 +182,12 @@ public class TestSplit extends HBaseClusterTestCase {
HConstants.EMPTY_START_ROW, System.currentTimeMillis(), null); HConstants.EMPTY_START_ROW, System.currentTimeMillis(), null);
try { try {
HStoreKey curKey = new HStoreKey(); HStoreKey curKey = new HStoreKey();
TreeMap<byte [], byte []> curVals = TreeMap<byte [], Cell> curVals =
new TreeMap<byte [], byte []>(Bytes.BYTES_COMPARATOR); new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
boolean first = true; boolean first = true;
OUTER_LOOP: while(s.next(curKey, curVals)) { OUTER_LOOP: while(s.next(curKey, curVals)) {
for(byte [] col: curVals.keySet()) { for(byte [] col: curVals.keySet()) {
byte [] val = curVals.get(col); byte [] val = curVals.get(col).getValue();
byte [] curval = val; byte [] curval = val;
if (first) { if (first) {
first = false; first = false;