HBASE-737 Scanner: every cell in a row has the same timestamp

Changed InternalScanner API:

from
{code}
boolean next(HStoreKey, SortedMap<byte[], byte>)
{code}
to
{code}
boolean next(HStoreKey, SortedMap<byte[], Cell>)
{code}

Files changed as a result of API change:

HMerge.java, ColumnValueFilter, PageRowFilter, RegExpRowFilter, RowFilterInterface, RowFilterSet, StopRowFilter, WhileMatchRowFilter, HAbstractScanner, HRegion$HScanner, HRegionServer, HStoreScanner, InternalScanner, Memcache, StoreFileScanner, MetaUtils, HBaseTestCase, TestScannerAPI, TimestampTestBase, TestRegExpRowFilter, TestRowFilterAfterWrite, TestRowFilterOnMultipleFamilies, TestRowFilterSet, TestGet2, TestHMemcache, TestHRegion, TestScanner, TestSplit

Update comments in Flusher

HRegion.internalFlushCache, HStore.flushCache, HStore.internalFlushCache now returns true only if a flush was completed and a compaction is needed.

HRegion.internalFlushCache now includes region name in DroppedSnapshotException

When creating a reader during compaction, don't bother with bloom filter since we won't use it.

StoreFileScanner locks the store for read while it is in the constructor. It also does not load the bloom filter when it opens readers on the store files.

TestScannerTimes - new regression test for HBASE-737


git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@677113 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jim Kellerman 2008-07-16 00:03:26 +00:00
parent 422c3ff605
commit 90c9f71aa1
35 changed files with 333 additions and 378 deletions

View File

@ -185,6 +185,7 @@ Trunk (unreleased changes)
(Izaak Rubin via Stack)
HBASE-744 BloomFilter serialization/deserialization broken
HBASE-742 Column length limit is not enforced (Jean-Daniel Cryans via Stack)
HBASE-737 Scanner: every cell in a row has the same timestamp
IMPROVEMENTS
HBASE-559 MR example job to count table rows

View File

@ -330,11 +330,11 @@ class HMerge implements HConstants {
try {
HStoreKey key = new HStoreKey();
TreeMap<byte [], byte[]> results =
new TreeMap<byte [], byte[]>(Bytes.BYTES_COMPARATOR);
TreeMap<byte [], Cell> results =
new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
while(rootScanner.next(key, results)) {
for(byte [] b: results.values()) {
HRegionInfo info = Writables.getHRegionInfoOrNull(b);
for(Cell c: results.values()) {
HRegionInfo info = Writables.getHRegionInfoOrNull(c.getValue());
if (info != null) {
metaRegions.add(info);
}

View File

@ -21,7 +21,6 @@
package org.apache.hadoop.hbase.client;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
class UnmodifyableHRegionInfo extends HRegionInfo {
/*

View File

@ -25,7 +25,6 @@ import org.apache.hadoop.hbase.HTableDescriptor;
/**
* Read-only table descriptor.
* Returned out of {@link HTable.getTableDescriptor}.
*/
public class UnmodifyableHTableDescriptor extends HTableDescriptor {

View File

@ -27,6 +27,7 @@ import java.util.SortedMap;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.io.ObjectWritable;
/**
@ -124,7 +125,7 @@ public class ColumnValueFilter implements RowFilterInterface {
}
/** {@inheritDoc} */
public boolean filterRow(final SortedMap<byte[], byte[]> columns) {
public boolean filterRow(final SortedMap<byte[], Cell> columns) {
// Don't let rows through if they don't have the column we are checking
return !columns.containsKey(columnName);
}

View File

@ -24,6 +24,7 @@ import java.io.DataOutput;
import java.io.IOException;
import java.util.SortedMap;
import org.apache.hadoop.hbase.io.Cell;
/**
* Implementation of RowFilterInterface that limits results to a specific page
@ -123,7 +124,7 @@ public class PageRowFilter implements RowFilterInterface {
* {@inheritDoc}
*/
public boolean filterRow(@SuppressWarnings("unused")
final SortedMap<byte [], byte[]> columns) {
final SortedMap<byte [], Cell> columns) {
return filterAllRemaining();
}

View File

@ -32,6 +32,7 @@ import java.util.Map.Entry;
import java.util.regex.Pattern;
import org.apache.hadoop.hbase.regionserver.HLogEdit;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.util.Bytes;
/**
@ -80,7 +81,7 @@ public class RegExpRowFilter implements RowFilterInterface {
*/
@Deprecated
public RegExpRowFilter(final String rowKeyRegExp,
final Map<byte [], byte[]> columnFilter) {
final Map<byte [], Cell> columnFilter) {
this.rowKeyRegExp = rowKeyRegExp;
this.setColumnFilters(columnFilter);
}
@ -122,13 +123,13 @@ public class RegExpRowFilter implements RowFilterInterface {
* Map of columns with value criteria.
*/
@Deprecated
public void setColumnFilters(final Map<byte [], byte[]> columnFilter) {
public void setColumnFilters(final Map<byte [], Cell> columnFilter) {
if (null == columnFilter) {
nullColumns.clear();
equalsMap.clear();
} else {
for (Entry<byte [], byte[]> entry : columnFilter.entrySet()) {
setColumnFilter(entry.getKey(), entry.getValue());
for (Entry<byte [], Cell> entry : columnFilter.entrySet()) {
setColumnFilter(entry.getKey(), entry.getValue().getValue());
}
}
}
@ -186,10 +187,10 @@ public class RegExpRowFilter implements RowFilterInterface {
*
* {@inheritDoc}
*/
public boolean filterRow(final SortedMap<byte [], byte[]> columns) {
for (Entry<byte [], byte[]> col : columns.entrySet()) {
public boolean filterRow(final SortedMap<byte [], Cell> columns) {
for (Entry<byte [], Cell> col : columns.entrySet()) {
if (nullColumns.contains(col.getKey())
&& !HLogEdit.isDeleted(col.getValue())) {
&& !HLogEdit.isDeleted(col.getValue().getValue())) {
return true;
}
}

View File

@ -23,6 +23,8 @@ import java.util.SortedMap;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.hbase.io.Cell;
/**
*
* Interface used for row-level filters applied to HRegion.HScanner scan
@ -98,7 +100,7 @@ public interface RowFilterInterface extends Writable {
* @param columns
* @return true if row filtered and should not be processed.
*/
boolean filterRow(final SortedMap<byte [], byte[]> columns);
boolean filterRow(final SortedMap<byte [], Cell> columns);
/**
* Validates that this filter applies only to a subset of the given columns.

View File

@ -28,6 +28,7 @@ import java.util.SortedMap;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.io.ObjectWritable;
/**
@ -179,7 +180,7 @@ public class RowFilterSet implements RowFilterInterface {
}
/** {@inheritDoc} */
public boolean filterRow(final SortedMap<byte [], byte[]> columns) {
public boolean filterRow(final SortedMap<byte [], Cell> columns) {
boolean resultFound = false;
boolean result = operator == Operator.MUST_PASS_ONE;
for (RowFilterInterface filter : filters) {

View File

@ -24,6 +24,7 @@ import java.io.DataOutput;
import java.io.IOException;
import java.util.SortedMap;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.util.Bytes;
/**
@ -123,7 +124,7 @@ public class StopRowFilter implements RowFilterInterface {
* @param columns
*/
public boolean filterRow(@SuppressWarnings("unused")
final SortedMap<byte [], byte[]> columns) {
final SortedMap<byte [], Cell> columns) {
return filterAllRemaining();
}

View File

@ -24,6 +24,7 @@ import java.io.DataOutput;
import java.io.IOException;
import java.util.SortedMap;
import org.apache.hadoop.hbase.io.Cell;
/**
* WhileMatchRowFilter is a wrapper filter that filters everything after the
@ -98,7 +99,7 @@ public class WhileMatchRowFilter implements RowFilterInterface {
}
/** {@inheritDoc} */
public boolean filterRow(final SortedMap<byte [], byte[]> columns) {
public boolean filterRow(final SortedMap<byte [], Cell> columns) {
changeFAR(this.filter.filterRow(columns));
return filterAllRemaining();
}

View File

@ -27,8 +27,6 @@ import java.util.concurrent.TimeUnit;
import java.util.HashSet;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.Comparator;
import java.util.ConcurrentModificationException;
import org.apache.commons.logging.Log;
@ -169,8 +167,9 @@ class Flusher extends Thread implements FlushRequester {
}
lock.lock();
try {
// See javadoc comment above for removeFromQueue on why we do not
// compact if removeFromQueue is true.
// See comment above for removeFromQueue on why we do not
// compact if removeFromQueue is true. Note that region.flushCache()
// only returns true if a flush is done and if a compaction is needed.
if (region.flushcache() && !removeFromQueue) {
server.compactSplitThread.compactionRequested(region);
}

View File

@ -29,6 +29,7 @@ import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HStoreKey;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.util.Bytes;
/**
@ -182,8 +183,7 @@ public abstract class HAbstractScanner implements InternalScanner {
}
/** {@inheritDoc} */
public abstract boolean next(HStoreKey key,
SortedMap<byte [], byte []> results)
public abstract boolean next(HStoreKey key, SortedMap<byte [], Cell> results)
throws IOException;
}

View File

@ -977,7 +977,7 @@ public class HRegion implements HConstants {
*
* <p> This method may block for some time.
*
* @return true if the cache was flushed
* @return true if the region needs compacting
*
* @throws IOException
* @throws DroppedSnapshotException Thrown when replay of hlog is required
@ -1021,13 +1021,16 @@ public class HRegion implements HConstants {
// restart so hlog content can be replayed and put back into the memcache.
// Otherwise, the snapshot content while backed up in the hlog, it will not
// be part of the current running servers state.
long flushed = 0;
boolean compactionRequested = false;
try {
// A. Flush memcache to all the HStores.
// Keep running vector of all store files that includes both old and the
// just-made new flush store file.
for (HStore hstore: stores.values()) {
flushed += hstore.flushCache(sequenceId);
boolean needsCompaction = hstore.flushCache(sequenceId);
if (needsCompaction) {
compactionRequested = true;
}
}
} catch (Throwable t) {
// An exception here means that the snapshot was not persisted.
@ -1037,7 +1040,8 @@ public class HRegion implements HConstants {
// exceptions -- e.g. HBASE-659 was about an NPE -- so now we catch
// all and sundry.
this.log.abortCacheFlush();
DroppedSnapshotException dse = new DroppedSnapshotException();
DroppedSnapshotException dse = new DroppedSnapshotException("region: " +
Bytes.toString(getRegionName()));
dse.initCause(t);
throw dse;
}
@ -1064,13 +1068,12 @@ public class HRegion implements HConstants {
LOG.debug("Finished memcache flush for region " + this +
" in " +
(System.currentTimeMillis() - startTime) + "ms, sequence id=" +
sequenceId + ", " +
StringUtils.humanReadableInt(flushed));
sequenceId + ", compaction requested=" + compactionRequested);
if (!regionInfo.isMetaRegion()) {
this.historian.addRegionFlush(regionInfo, timeTaken);
}
}
return true;
return compactionRequested;
}
//////////////////////////////////////////////////////////////////////////////
@ -1733,7 +1736,7 @@ public class HRegion implements HConstants {
*/
private class HScanner implements InternalScanner {
private InternalScanner[] scanners;
private TreeMap<byte [], byte []>[] resultSets;
private TreeMap<byte [], Cell>[] resultSets;
private HStoreKey[] keys;
private RowFilterInterface filter;
@ -1782,7 +1785,7 @@ public class HRegion implements HConstants {
this.keys = new HStoreKey[scanners.length];
for (int i = 0; i < scanners.length; i++) {
keys[i] = new HStoreKey();
resultSets[i] = new TreeMap<byte [], byte []>(Bytes.BYTES_COMPARATOR);
resultSets[i] = new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
if(scanners[i] != null && !scanners[i].next(keys[i], resultSets[i])) {
closeScanner(i);
}
@ -1795,7 +1798,7 @@ public class HRegion implements HConstants {
/** {@inheritDoc} */
@SuppressWarnings("null")
public boolean next(HStoreKey key, SortedMap<byte [], byte[]> results)
public boolean next(HStoreKey key, SortedMap<byte [], Cell> results)
throws IOException {
boolean moreToFollow = false;
boolean filtered = false;
@ -1830,7 +1833,7 @@ public class HRegion implements HConstants {
// but this had the effect of overwriting newer
// values with older ones. So now we only insert
// a result if the map does not contain the key.
for (Map.Entry<byte [], byte[]> e : resultSets[i].entrySet()) {
for (Map.Entry<byte [], Cell> e : resultSets[i].entrySet()) {
if (!results.containsKey(e.getKey())) {
results.put(e.getKey(), e.getValue());
}

View File

@ -1120,12 +1120,10 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
HbaseMapWritable<byte [], Cell> values
= new HbaseMapWritable<byte [], Cell>();
HStoreKey key = new HStoreKey();
TreeMap<byte [], byte []> results =
new TreeMap<byte [], byte []>(Bytes.BYTES_COMPARATOR);
TreeMap<byte [], Cell> results =
new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
while (s.next(key, results)) {
for (Map.Entry<byte [], byte []> e: results.entrySet()) {
values.put(e.getKey(), new Cell(e.getValue(), key.getTimestamp()));
}
values.putAll(results);
if (values.size() > 0) {
// Row has something in it. Return the value.
break;

View File

@ -90,7 +90,7 @@ public class HStore implements HConstants {
private final Integer flushLock = new Integer(0);
private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
final byte [] storeName;
private final String storeNameStr;
@ -550,27 +550,27 @@ public class HStore implements HConstants {
* Write out current snapshot. Presumes {@link #snapshot()} has been called
* previously.
* @param logCacheFlushId flush sequence number
* @return count of bytes flushed
* @return true if a compaction is needed
* @throws IOException
*/
long flushCache(final long logCacheFlushId) throws IOException {
boolean flushCache(final long logCacheFlushId) throws IOException {
// Get the snapshot to flush. Presumes that a call to
// this.memcache.snapshot() has happened earlier up in the chain.
SortedMap<HStoreKey, byte []> cache = this.memcache.getSnapshot();
long flushed = internalFlushCache(cache, logCacheFlushId);
boolean compactionNeeded = internalFlushCache(cache, logCacheFlushId);
// If an exception happens flushing, we let it out without clearing
// the memcache snapshot. The old snapshot will be returned when we say
// 'snapshot', the next time flush comes around.
this.memcache.clearSnapshot(cache);
return flushed;
return compactionNeeded;
}
private long internalFlushCache(SortedMap<HStoreKey, byte []> cache,
private boolean internalFlushCache(SortedMap<HStoreKey, byte []> cache,
long logCacheFlushId) throws IOException {
long flushed = 0;
// Don't flush if there are no entries.
if (cache.size() == 0) {
return flushed;
return false;
}
// TODO: We can fail in the below block before we complete adding this
@ -634,7 +634,7 @@ public class HStore implements HConstants {
StringUtils.humanReadableInt(newStoreSize));
}
}
return flushed;
return storefiles.size() >= compactionThreshold;
}
/*
@ -744,8 +744,8 @@ public class HStore implements HConstants {
List<MapFile.Reader> readers = new ArrayList<MapFile.Reader>();
for (HStoreFile file: filesToCompact) {
try {
HStoreFile.BloomFilterMapFile.Reader reader = file.getReader(fs,
this.family.isBloomFilterEnabled(), false);
HStoreFile.BloomFilterMapFile.Reader reader =
file.getReader(fs, false, false);
readers.add(reader);
// Compute the size of the new bloomfilter if needed

View File

@ -22,7 +22,6 @@ package org.apache.hadoop.hbase.regionserver;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.SortedMap;
@ -33,6 +32,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HStoreKey;
import org.apache.hadoop.hbase.filter.RowFilterInterface;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.util.Bytes;
/**
@ -42,7 +42,7 @@ class HStoreScanner implements InternalScanner {
static final Log LOG = LogFactory.getLog(HStoreScanner.class);
private InternalScanner[] scanners;
private TreeMap<byte [], byte []>[] resultSets;
private TreeMap<byte [], Cell>[] resultSets;
private HStoreKey[] keys;
private boolean wildcardMatch = false;
private boolean multipleMatchers = false;
@ -87,7 +87,7 @@ class HStoreScanner implements InternalScanner {
// All results will match the required column-set and scanTime.
for (int i = 0; i < scanners.length; i++) {
keys[i] = new HStoreKey();
resultSets[i] = new TreeMap<byte [], byte []>(Bytes.BYTES_COMPARATOR);
resultSets[i] = new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
if(scanners[i] != null && !scanners[i].next(keys[i], resultSets[i])) {
closeScanner(i);
}
@ -105,7 +105,7 @@ class HStoreScanner implements InternalScanner {
}
/** {@inheritDoc} */
public boolean next(HStoreKey key, SortedMap<byte [], byte[]> results)
public boolean next(HStoreKey key, SortedMap<byte [], Cell> results)
throws IOException {
// Filtered flag is set by filters. If a cell has been 'filtered out'
@ -166,9 +166,9 @@ class HStoreScanner implements InternalScanner {
// a result if the map does not contain the key.
HStoreKey hsk = new HStoreKey(key.getRow(), HConstants.EMPTY_BYTE_ARRAY,
key.getTimestamp());
for (Map.Entry<byte [], byte[]> e : resultSets[i].entrySet()) {
for (Map.Entry<byte [], Cell> e : resultSets[i].entrySet()) {
hsk.setColumn(e.getKey());
if (HLogEdit.isDeleted(e.getValue())) {
if (HLogEdit.isDeleted(e.getValue().getValue())) {
if (!deletes.contains(hsk)) {
// Key changes as we cycle the for loop so add a copy to
// the set of deletes.
@ -180,8 +180,8 @@ class HStoreScanner implements InternalScanner {
!results.containsKey(e.getKey())) {
if (dataFilter != null) {
// Filter whole row by column data?
filtered =
dataFilter.filterColumn(chosenRow, e.getKey(), e.getValue());
filtered = dataFilter.filterColumn(chosenRow, e.getKey(),
e.getValue().getValue());
if (filtered) {
results.clear();
break;
@ -265,9 +265,4 @@ class HStoreScanner implements InternalScanner {
}
}
}
public Iterator<Map.Entry<HStoreKey, SortedMap<byte [], byte[]>>> iterator() {
throw new UnsupportedOperationException("Unimplemented serverside. " +
"next(HStoreKey, StortedMap(...) is more efficient");
}
}

View File

@ -23,6 +23,7 @@ import java.io.Closeable;
import java.io.IOException;
import java.util.SortedMap;
import org.apache.hadoop.hbase.HStoreKey;
import org.apache.hadoop.hbase.io.Cell;
/**
* Internal scanners differ from client-side scanners in that they operate on
@ -49,7 +50,7 @@ public interface InternalScanner extends Closeable {
* @return true if data was returned
* @throws IOException
*/
public boolean next(HStoreKey key, SortedMap<byte [], byte[]> results)
public boolean next(HStoreKey key, SortedMap<byte [], Cell> results)
throws IOException;
/**

View File

@ -685,7 +685,7 @@ class Memcache {
/** {@inheritDoc} */
@Override
public boolean next(HStoreKey key, SortedMap<byte [], byte []> results)
public boolean next(HStoreKey key, SortedMap<byte [], Cell> results)
throws IOException {
if (this.scannerClosed) {
return false;
@ -735,7 +735,7 @@ class Memcache {
c.getTimestamp() > latestTimestamp) {
latestTimestamp = c.getTimestamp();
}
results.put(column, c.getValue());
results.put(column, c);
}
this.currentRow = getNextRow(this.currentRow);

View File

@ -27,6 +27,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HStoreKey;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.MapFile;
@ -36,12 +37,12 @@ import org.apache.hadoop.io.MapFile;
class StoreFileScanner extends HAbstractScanner
implements ChangedReadersObserver {
// Keys retrieved from the sources
private HStoreKey keys[];
private volatile HStoreKey keys[];
// Values that correspond to those keys
private byte [][] vals;
private volatile byte [][] vals;
// Readers we go against.
private MapFile.Reader[] readers;
private volatile MapFile.Reader[] readers;
// Store this scanner came out of.
private final HStore store;
@ -62,6 +63,7 @@ implements ChangedReadersObserver {
super(timestamp, targetCols);
this.store = store;
this.store.addChangedReaderObserver(this);
this.store.lock.readLock().lock();
try {
openReaders(firstRow);
} catch (Exception ex) {
@ -69,6 +71,8 @@ implements ChangedReadersObserver {
IOException e = new IOException("HStoreScanner failed construction");
e.initCause(ex);
throw e;
} finally {
this.store.lock.readLock().unlock();
}
}
@ -92,8 +96,7 @@ implements ChangedReadersObserver {
// Most recent map file should be first
int i = readers.length - 1;
for(HStoreFile curHSF: store.getStorefiles().values()) {
readers[i--] = curHSF.getReader(store.fs,
store.getFamily().isBloomFilterEnabled(), false);
readers[i--] = curHSF.getReader(store.fs, false, false);
}
this.keys = new HStoreKey[readers.length];
@ -140,7 +143,7 @@ implements ChangedReadersObserver {
* @see org.apache.hadoop.hbase.regionserver.InternalScanner#next(org.apache.hadoop.hbase.HStoreKey, java.util.SortedMap)
*/
@Override
public boolean next(HStoreKey key, SortedMap<byte [], byte []> results)
public boolean next(HStoreKey key, SortedMap<byte [], Cell> results)
throws IOException {
if (this.scannerClosed) {
return false;
@ -173,7 +176,8 @@ implements ChangedReadersObserver {
if(columnMatch(i)) {
// We only want the first result for any specific family member
if(!results.containsKey(keys[i].getColumn())) {
results.put(keys[i].getColumn(), vals[i]);
results.put(keys[i].getColumn(),
new Cell(vals[i], keys[i].getTimestamp()));
insertedItem = true;
}
}

View File

@ -201,11 +201,11 @@ public class MetaUtils {
try {
HStoreKey key = new HStoreKey();
SortedMap<byte [], byte[]> results =
new TreeMap<byte [], byte[]>(Bytes.BYTES_COMPARATOR);
SortedMap<byte [], Cell> results =
new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
while (rootScanner.next(key, results)) {
HRegionInfo info = Writables.getHRegionInfoOrNull(
results.get(HConstants.COL_REGIONINFO));
results.get(HConstants.COL_REGIONINFO).getValue());
if (info == null) {
LOG.warn("region info is null for row " + key.getRow() +
" in table " + HConstants.ROOT_TABLE_NAME);
@ -253,11 +253,11 @@ public class MetaUtils {
HConstants.EMPTY_START_ROW, HConstants.LATEST_TIMESTAMP, null);
try {
HStoreKey key = new HStoreKey();
SortedMap<byte[], byte[]> results =
new TreeMap<byte[], byte[]>(Bytes.BYTES_COMPARATOR);
SortedMap<byte[], Cell> results =
new TreeMap<byte[], Cell>(Bytes.BYTES_COMPARATOR);
while (metaScanner.next(key, results)) {
HRegionInfo info =
Writables.getHRegionInfoOrNull(results.get(HConstants.COL_REGIONINFO));
HRegionInfo info = Writables.getHRegionInfoOrNull(
results.get(HConstants.COL_REGIONINFO).getValue());
if (info == null) {
LOG.warn("regioninfo null for row " + key.getRow() + " in table " +
Bytes.toString(m.getTableDesc().getName()));

View File

@ -509,8 +509,8 @@ public abstract class HBaseTestCase extends TestCase {
}
public interface ScannerIncommon
extends Iterable<Map.Entry<HStoreKey, SortedMap<byte [], byte[]>>> {
public boolean next(HStoreKey key, SortedMap<byte [], byte[]> values)
extends Iterable<Map.Entry<HStoreKey, SortedMap<byte [], Cell>>> {
public boolean next(HStoreKey key, SortedMap<byte [], Cell> values)
throws IOException;
public void close() throws IOException;
@ -522,7 +522,7 @@ public abstract class HBaseTestCase extends TestCase {
this.scanner = scanner;
}
public boolean next(HStoreKey key, SortedMap<byte [], byte[]> values)
public boolean next(HStoreKey key, SortedMap<byte [], Cell> values)
throws IOException {
RowResult results = scanner.next();
if (results == null) {
@ -531,7 +531,7 @@ public abstract class HBaseTestCase extends TestCase {
key.setRow(results.getRow());
values.clear();
for (Map.Entry<byte [], Cell> entry : results.entrySet()) {
values.put(entry.getKey(), entry.getValue().getValue());
values.put(entry.getKey(), entry.getValue());
}
return true;
}
@ -552,7 +552,7 @@ public abstract class HBaseTestCase extends TestCase {
this.scanner = scanner;
}
public boolean next(HStoreKey key, SortedMap<byte [], byte[]> values)
public boolean next(HStoreKey key, SortedMap<byte [], Cell> values)
throws IOException {
return scanner.next(key, values);
}

View File

@ -1,205 +0,0 @@
/**
* Copyright 2007 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.io.BatchUpdate;
import org.apache.hadoop.hbase.util.Bytes;
/** Tests per-column bloom filters */
public class TestBloomFilters extends HBaseClusterTestCase {
static final Log LOG = LogFactory.getLog(TestBloomFilters.class);
private static final byte [] CONTENTS = Bytes.toBytes("contents:");
private static final byte [][] rows = {
Bytes.toBytes("wmjwjzyv"),
Bytes.toBytes("baietibz"),
Bytes.toBytes("guhsgxnv"),
Bytes.toBytes("mhnqycto"),
Bytes.toBytes("xcyqafgz"),
Bytes.toBytes("zidoamgb"),
Bytes.toBytes("tftfirzd"),
Bytes.toBytes("okapqlrg"),
Bytes.toBytes("yccwzwsq"),
Bytes.toBytes("qmonufqu"),
Bytes.toBytes("wlsctews"),
Bytes.toBytes("mksdhqri"),
Bytes.toBytes("wxxllokj"),
Bytes.toBytes("eviuqpls"),
Bytes.toBytes("bavotqmj"),
Bytes.toBytes("yibqzhdl"),
Bytes.toBytes("csfqmsyr"),
Bytes.toBytes("guxliyuh"),
Bytes.toBytes("pzicietj"),
Bytes.toBytes("qdwgrqwo"),
Bytes.toBytes("ujfzecmi"),
Bytes.toBytes("dzeqfvfi"),
Bytes.toBytes("phoegsij"),
Bytes.toBytes("bvudfcou"),
Bytes.toBytes("dowzmciz"),
Bytes.toBytes("etvhkizp"),
Bytes.toBytes("rzurqycg"),
Bytes.toBytes("krqfxuge"),
Bytes.toBytes("gflcohtd"),
Bytes.toBytes("fcrcxtps"),
Bytes.toBytes("qrtovxdq"),
Bytes.toBytes("aypxwrwi"),
Bytes.toBytes("dckpyznr"),
Bytes.toBytes("mdaawnpz"),
Bytes.toBytes("pakdfvca"),
Bytes.toBytes("xjglfbez"),
Bytes.toBytes("xdsecofi"),
Bytes.toBytes("sjlrfcab"),
Bytes.toBytes("ebcjawxv"),
Bytes.toBytes("hkafkjmy"),
Bytes.toBytes("oimmwaxo"),
Bytes.toBytes("qcuzrazo"),
Bytes.toBytes("nqydfkwk"),
Bytes.toBytes("frybvmlb"),
Bytes.toBytes("amxmaqws"),
Bytes.toBytes("gtkovkgx"),
Bytes.toBytes("vgwxrwss"),
Bytes.toBytes("xrhzmcep"),
Bytes.toBytes("tafwziil"),
Bytes.toBytes("erjmncnv"),
Bytes.toBytes("heyzqzrn"),
Bytes.toBytes("sowvyhtu"),
Bytes.toBytes("heeixgzy"),
Bytes.toBytes("ktcahcob"),
Bytes.toBytes("ljhbybgg"),
Bytes.toBytes("jiqfcksl"),
Bytes.toBytes("anjdkjhm"),
Bytes.toBytes("uzcgcuxp"),
Bytes.toBytes("vzdhjqla"),
Bytes.toBytes("svhgwwzq"),
Bytes.toBytes("zhswvhbp"),
Bytes.toBytes("ueceybwy"),
Bytes.toBytes("czkqykcw"),
Bytes.toBytes("ctisayir"),
Bytes.toBytes("hppbgciu"),
Bytes.toBytes("nhzgljfk"),
Bytes.toBytes("vaziqllf"),
Bytes.toBytes("narvrrij"),
Bytes.toBytes("kcevbbqi"),
Bytes.toBytes("qymuaqnp"),
Bytes.toBytes("pwqpfhsr"),
Bytes.toBytes("peyeicuk"),
Bytes.toBytes("kudlwihi"),
Bytes.toBytes("pkmqejlm"),
Bytes.toBytes("ylwzjftl"),
Bytes.toBytes("rhqrlqar"),
Bytes.toBytes("xmftvzsp"),
Bytes.toBytes("iaemtihk"),
Bytes.toBytes("ymsbrqcu"),
Bytes.toBytes("yfnlcxto"),
Bytes.toBytes("nluqopqh"),
Bytes.toBytes("wmrzhtox"),
Bytes.toBytes("qnffhqbl"),
Bytes.toBytes("zypqpnbw"),
Bytes.toBytes("oiokhatd"),
Bytes.toBytes("mdraddiu"),
Bytes.toBytes("zqoatltt"),
Bytes.toBytes("ewhulbtm"),
Bytes.toBytes("nmswpsdf"),
Bytes.toBytes("xsjeteqe"),
Bytes.toBytes("ufubcbma"),
Bytes.toBytes("phyxvrds"),
Bytes.toBytes("vhnfldap"),
Bytes.toBytes("zrrlycmg"),
Bytes.toBytes("becotcjx"),
Bytes.toBytes("wvbubokn"),
Bytes.toBytes("avkgiopr"),
Bytes.toBytes("mbqqxmrv"),
Bytes.toBytes("ibplgvuu"),
Bytes.toBytes("dghvpkgc")
};
private static final byte [][] testKeys = {
Bytes.toBytes("abcdefgh"),
Bytes.toBytes("ijklmnop"),
Bytes.toBytes("qrstuvwx"),
Bytes.toBytes("yzabcdef")
};
/**
* Test that uses automatic bloom filter
* @throws IOException
*/
public void testComputedParameters() throws IOException {
HTable table = null;
// Setup
HTableDescriptor desc = new HTableDescriptor(getName());
desc.addFamily(
new HColumnDescriptor(CONTENTS, // Column name
1, // Max versions
HColumnDescriptor.CompressionType.NONE, // no compression
HColumnDescriptor.DEFAULT_IN_MEMORY, // not in memory
HColumnDescriptor.DEFAULT_BLOCKCACHE,
HColumnDescriptor.DEFAULT_LENGTH,
HColumnDescriptor.DEFAULT_TTL,
true
)
);
// Create the table
HBaseAdmin admin = new HBaseAdmin(conf);
admin.createTable(desc);
// Open table
table = new HTable(conf, desc.getName());
// Store some values
for(int i = 0; i < 100; i++) {
byte [] row = rows[i];
String value = row.toString();
BatchUpdate b = new BatchUpdate(row);
b.put(CONTENTS, value.getBytes(HConstants.UTF8_ENCODING));
table.commit(b);
}
try {
// Give cache flusher and log roller a chance to run
// Otherwise we'll never hit the bloom filter, just the memcache
Thread.sleep(conf.getLong(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000) * 2);
} catch (InterruptedException e) {
// ignore
}
for(int i = 0; i < testKeys.length; i++) {
Cell value = table.get(testKeys[i], CONTENTS);
if(value != null && value.getValue().length != 0) {
LOG.info("non existant key: " + testKeys[i] + " returned value: " +
new String(value.getValue(), HConstants.UTF8_ENCODING));
}
}
}
}

View File

@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Scanner;
import org.apache.hadoop.hbase.io.BatchUpdate;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.io.RowResult;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.util.Bytes;
@ -44,8 +45,8 @@ public class TestScannerAPI extends HBaseClusterTestCase {
});
private final byte [] startRow = Bytes.toBytes("0");
private final TreeMap<byte [], SortedMap<byte [], byte[]>> values =
new TreeMap<byte [], SortedMap<byte [], byte[]>>(Bytes.BYTES_COMPARATOR);
private final TreeMap<byte [], SortedMap<byte [], Cell>> values =
new TreeMap<byte [], SortedMap<byte [], Cell>>(Bytes.BYTES_COMPARATOR);
/**
* @throws Exception
@ -53,13 +54,16 @@ public class TestScannerAPI extends HBaseClusterTestCase {
public TestScannerAPI() throws Exception {
super();
try {
TreeMap<byte [], byte[]> columns =
new TreeMap<byte [], byte[]>(Bytes.BYTES_COMPARATOR);
columns.put(Bytes.toBytes("a:1"), Bytes.toBytes("1"));
TreeMap<byte [], Cell> columns =
new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
columns.put(Bytes.toBytes("a:1"),
new Cell(Bytes.toBytes("1"), HConstants.LATEST_TIMESTAMP));
values.put(Bytes.toBytes("1"), columns);
columns = new TreeMap<byte [], byte[]>(Bytes.BYTES_COMPARATOR);
columns.put(Bytes.toBytes("a:2"), Bytes.toBytes("2"));
columns.put(Bytes.toBytes("b:2"), Bytes.toBytes("2"));
columns = new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
columns.put(Bytes.toBytes("a:2"),
new Cell(Bytes.toBytes("2"), HConstants.LATEST_TIMESTAMP));
columns.put(Bytes.toBytes("b:2"),
new Cell(Bytes.toBytes("2"), HConstants.LATEST_TIMESTAMP));
} catch (Exception e) {
e.printStackTrace();
throw e;
@ -85,10 +89,10 @@ public class TestScannerAPI extends HBaseClusterTestCase {
HTable table = new HTable(conf, getName());
for (Map.Entry<byte [], SortedMap<byte [], byte[]>> row: values.entrySet()) {
for (Map.Entry<byte [], SortedMap<byte [], Cell>> row: values.entrySet()) {
BatchUpdate b = new BatchUpdate(row.getKey());
for (Map.Entry<byte [], byte[]> val: row.getValue().entrySet()) {
b.put(val.getKey(), val.getValue());
for (Map.Entry<byte [], Cell> val: row.getValue().entrySet()) {
b.put(val.getKey(), val.getValue().getValue());
}
table.commit(b);
}
@ -128,12 +132,12 @@ public class TestScannerAPI extends HBaseClusterTestCase {
for (RowResult r : scanner2) {
assertTrue("row key", values.containsKey(r.getRow()));
SortedMap<byte [], byte[]> columnValues = values.get(r.getRow());
SortedMap<byte [], Cell> columnValues = values.get(r.getRow());
assertEquals(columnValues.size(), r.size());
for (Map.Entry<byte [], byte[]> e: columnValues.entrySet()) {
for (Map.Entry<byte [], Cell> e: columnValues.entrySet()) {
byte [] column = e.getKey();
assertTrue("column", r.containsKey(column));
assertTrue("value", Arrays.equals(columnValues.get(column),
assertTrue("value", Arrays.equals(columnValues.get(column).getValue(),
r.get(column).getValue()));
}
}
@ -144,19 +148,19 @@ public class TestScannerAPI extends HBaseClusterTestCase {
private void verify(ScannerIncommon scanner) throws IOException {
HStoreKey key = new HStoreKey();
SortedMap<byte [], byte[]> results =
new TreeMap<byte [], byte[]>(Bytes.BYTES_COMPARATOR);
SortedMap<byte [], Cell> results =
new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
while (scanner.next(key, results)) {
byte [] row = key.getRow();
assertTrue("row key", values.containsKey(row));
SortedMap<byte [], byte[]> columnValues = values.get(row);
SortedMap<byte [], Cell> columnValues = values.get(row);
assertEquals(columnValues.size(), results.size());
for (Map.Entry<byte [], byte[]> e: columnValues.entrySet()) {
for (Map.Entry<byte [], Cell> e: columnValues.entrySet()) {
byte [] column = e.getKey();
assertTrue("column", results.containsKey(column));
assertTrue("value", Arrays.equals(columnValues.get(column),
results.get(column)));
assertTrue("value", Arrays.equals(columnValues.get(column).getValue(),
results.get(column).getValue()));
}
results.clear();
}

View File

@ -177,13 +177,13 @@ public class TimestampTestBase extends HBaseTestCase {
int count = 0;
try {
HStoreKey key = new HStoreKey();
TreeMap<byte [], byte []>value =
new TreeMap<byte [], byte[]>(Bytes.BYTES_COMPARATOR);
TreeMap<byte [], Cell>value =
new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
while (scanner.next(key, value)) {
assertTrue(key.getTimestamp() <= ts);
// Content matches the key or HConstants.LATEST_TIMESTAMP.
// (Key does not match content if we 'put' with LATEST_TIMESTAMP).
long l = Bytes.toLong(value.get(COLUMN));
long l = Bytes.toLong(value.get(COLUMN).getValue());
assertTrue(key.getTimestamp() == l ||
HConstants.LATEST_TIMESTAMP == l);
count++;

View File

@ -0,0 +1,139 @@
/**
* Copyright 2008 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import java.io.IOException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HBaseClusterTestCase;
import org.apache.hadoop.hbase.io.BatchUpdate;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.io.RowResult;
/**
* Test that verifies that scanners return a different timestamp for values that
* are not stored at the same time. (HBASE-737)
*/
public class TestScannerTimes extends HBaseClusterTestCase {
private static final String TABLE_NAME = "hbase737";
private static final String FAM1 = "fam1:";
private static final String FAM2 = "fam2:";
private static final String ROW = "row";
/**
* test for HBASE-737
* @throws IOException
*/
public void testHBase737 () throws IOException {
HTableDescriptor desc = new HTableDescriptor(TABLE_NAME);
desc.addFamily(new HColumnDescriptor(FAM1));
desc.addFamily(new HColumnDescriptor(FAM2));
// Create table
HBaseAdmin admin = new HBaseAdmin(conf);
admin.createTable(desc);
// Open table
HTable table = new HTable(conf, TABLE_NAME);
// Insert some values
BatchUpdate b = new BatchUpdate(ROW);
b.put(FAM1 + "letters", "abcdefg".getBytes(HConstants.UTF8_ENCODING));
table.commit(b);
try {
Thread.sleep(1000);
} catch (InterruptedException i) {
//ignore
}
b = new BatchUpdate(ROW);
b.put(FAM1 + "numbers", "123456".getBytes(HConstants.UTF8_ENCODING));
table.commit(b);
try {
Thread.sleep(1000);
} catch (InterruptedException i) {
//ignore
}
b = new BatchUpdate(ROW);
b.put(FAM2 + "letters", "hijklmnop".getBytes(HConstants.UTF8_ENCODING));
table.commit(b);
long times[] = new long[3];
byte[][] columns = new byte[][] {
FAM1.getBytes(HConstants.UTF8_ENCODING),
FAM2.getBytes(HConstants.UTF8_ENCODING)
};
// First scan the memcache
Scanner s = table.getScanner(columns);
try {
int index = 0;
RowResult r = null;
while ((r = s.next()) != null) {
for (Cell c: r.values()) {
times[index++] = c.getTimestamp();
}
}
} finally {
s.close();
}
for (int i = 0; i < times.length - 1; i++) {
for (int j = i + 1; j < times.length; j++) {
assertTrue(times[j] > times[i]);
}
}
// Fush data to disk and try again
cluster.flushcache();
try {
Thread.sleep(1000);
} catch (InterruptedException i) {
//ignore
}
s = table.getScanner(columns);
try {
int index = 0;
RowResult r = null;
while ((r = s.next()) != null) {
for (Cell c: r.values()) {
times[index++] = c.getTimestamp();
}
}
} finally {
s.close();
}
for (int i = 0; i < times.length - 1; i++) {
for (int j = i + 1; j < times.length; j++) {
assertTrue(times[j] > times[i]);
}
}
}
}

View File

@ -30,6 +30,7 @@ import java.util.TreeMap;
import junit.framework.TestCase;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.regionserver.HLogEdit;
import org.apache.hadoop.hbase.util.Bytes;
@ -37,7 +38,7 @@ import org.apache.hadoop.hbase.util.Bytes;
* Tests for regular expression row filter
*/
public class TestRegExpRowFilter extends TestCase {
TreeMap<byte [], byte []> colvalues;
TreeMap<byte [], Cell> colvalues;
RowFilterInterface mainFilter;
final char FIRST_CHAR = 'a';
final char LAST_CHAR = 'e';
@ -55,9 +56,10 @@ public class TestRegExpRowFilter extends TestCase {
@Override
protected void setUp() throws Exception {
super.setUp();
this.colvalues = new TreeMap<byte [], byte[]>(Bytes.BYTES_COMPARATOR);
this.colvalues = new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
for (char c = FIRST_CHAR; c < LAST_CHAR; c++) {
colvalues.put(Bytes.toBytes(new String(new char [] {c})), GOOD_BYTES);
colvalues.put(Bytes.toBytes(new String(new char [] {c})),
new Cell(GOOD_BYTES, HConstants.LATEST_TIMESTAMP));
}
this.mainFilter = new RegExpRowFilter(HOST_PREFIX + ".*", colvalues);
}
@ -126,9 +128,9 @@ public class TestRegExpRowFilter extends TestCase {
for (char c = FIRST_CHAR; c <= LAST_CHAR; c++) {
byte [] t = createRow(c);
for (Map.Entry<byte [], byte []> e: this.colvalues.entrySet()) {
for (Map.Entry<byte [], Cell> e: this.colvalues.entrySet()) {
assertFalse("Failed on " + c,
filter.filterColumn(t, e.getKey(), e.getValue()));
filter.filterColumn(t, e.getKey(), e.getValue().getValue()));
}
}
// Try a row and column I know will pass.
@ -171,13 +173,15 @@ public class TestRegExpRowFilter extends TestCase {
// Try a row that has all expected columnKeys, and NO null-expected
// columnKeys.
// Testing row with columnKeys: a-d
colvalues.put(new byte [] {(byte)secondToLast}, GOOD_BYTES);
colvalues.put(new byte [] {(byte)secondToLast},
new Cell(GOOD_BYTES, HConstants.LATEST_TIMESTAMP));
assertFalse("Failed with last columnKey " + secondToLast, filter.
filterRow(colvalues));
// Try a row that has all expected columnKeys AND a null-expected columnKey.
// Testing row with columnKeys: a-e
colvalues.put(new byte [] {LAST_CHAR}, GOOD_BYTES);
colvalues.put(new byte [] {LAST_CHAR},
new Cell(GOOD_BYTES, HConstants.LATEST_TIMESTAMP));
assertTrue("Failed with last columnKey " + LAST_CHAR, filter.
filterRow(colvalues));
@ -185,7 +189,7 @@ public class TestRegExpRowFilter extends TestCase {
// that maps to a null value.
// Testing row with columnKeys: a-e, e maps to null
colvalues.put(new byte [] {LAST_CHAR},
HLogEdit.deleteBytes.get());
new Cell(HLogEdit.deleteBytes.get(), HConstants.LATEST_TIMESTAMP));
assertFalse("Failed with last columnKey " + LAST_CHAR + " mapping to null.",
filter.filterRow(colvalues));
}

View File

@ -172,8 +172,9 @@ public class TestRowFilterAfterWrite extends HBaseClusterTestCase {
private void scanTableWithRowFilter(final String tableName, final boolean printValues) throws IOException {
HTable table = new HTable(conf, tableName);
Map<byte [], byte[]> columnMap = new HashMap<byte [], byte[]>();
columnMap.put(TEXT_COLUMN1, VALUE);
Map<byte [], Cell> columnMap = new HashMap<byte [], Cell>();
columnMap.put(TEXT_COLUMN1,
new Cell(VALUE, HConstants.LATEST_TIMESTAMP));
RegExpRowFilter filter = new RegExpRowFilter(null, columnMap);
Scanner scanner = table.getScanner(columns, HConstants.EMPTY_START_ROW, filter);
int numFound = doScan(scanner, printValues);

View File

@ -92,8 +92,9 @@ public class TestRowFilterOnMultipleFamilies extends HBaseClusterTestCase {
private void scanTableWithRowFilter(final String tableName, final boolean printValues) throws IOException {
HTable table = new HTable(conf, tableName);
Map<byte [], byte[]> columnMap = new HashMap<byte [], byte[]>();
columnMap.put(TEXT_COLUMN1, VALUE);
Map<byte [], Cell> columnMap = new HashMap<byte [], Cell>();
columnMap.put(TEXT_COLUMN1,
new Cell(VALUE, HConstants.LATEST_TIMESTAMP));
RegExpRowFilter filter = new RegExpRowFilter(null, columnMap);
Scanner scanner = table.getScanner(columns, HConstants.EMPTY_START_ROW, filter);
int numFound = doScan(scanner, printValues);

View File

@ -29,6 +29,7 @@ import java.util.Set;
import java.util.TreeMap;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.util.Bytes;
@ -44,7 +45,7 @@ public class TestRowFilterSet extends TestCase {
static final int MAX_PAGES = 5;
final char FIRST_CHAR = 'a';
final char LAST_CHAR = 'e';
TreeMap<byte [], byte[]> colvalues;
TreeMap<byte [], Cell> colvalues;
static byte[] GOOD_BYTES = null;
static byte[] BAD_BYTES = null;
@ -62,9 +63,10 @@ public class TestRowFilterSet extends TestCase {
protected void setUp() throws Exception {
super.setUp();
colvalues = new TreeMap<byte [], byte[]>(Bytes.BYTES_COMPARATOR);
colvalues = new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
for (char c = FIRST_CHAR; c < LAST_CHAR; c++) {
colvalues.put(new byte [] {(byte)c}, GOOD_BYTES);
colvalues.put(new byte [] {(byte)c},
new Cell(GOOD_BYTES, HConstants.LATEST_TIMESTAMP));
}
Set<RowFilterInterface> filters = new HashSet<RowFilterInterface>();

View File

@ -76,8 +76,8 @@ public class TestGet2 extends HBaseTestCase implements HConstants {
arbitraryStartRow, HConstants.LATEST_TIMESTAMP,
new WhileMatchRowFilter(new StopRowFilter(arbitraryStopRow)));
HStoreKey key = new HStoreKey();
TreeMap<byte [], byte[]> value =
new TreeMap<byte [], byte []>(Bytes.BYTES_COMPARATOR);
TreeMap<byte [], Cell> value =
new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
while (scanner.next(key, value)) {
if (actualStartRow == null) {
actualStartRow = key.getRow();

View File

@ -151,7 +151,8 @@ public class TestHMemcache extends TestCase {
}
}
private void isExpectedRowWithoutTimestamps(final int rowIndex, TreeMap<byte [], byte[]> row) {
private void isExpectedRowWithoutTimestamps(final int rowIndex,
TreeMap<byte [], Cell> row) {
int i = 0;
for (byte [] colname: row.keySet()) {
String expectedColname = Bytes.toString(getColumnName(rowIndex, i++));
@ -161,18 +162,19 @@ public class TestHMemcache extends TestCase {
// 100 bytes in size at least. This is the default size
// for BytesWriteable. For comparison, comvert bytes to
// String and trim to remove trailing null bytes.
byte [] value = row.get(colname);
byte [] value = row.get(colname).getValue();
String colvalueStr = Bytes.toString(value).trim();
assertEquals("Content", colnameStr, colvalueStr);
}
}
private void isExpectedRow(final int rowIndex, TreeMap<byte [], Cell> row) {
TreeMap<byte [], byte[]> converted =
new TreeMap<byte [], byte[]>(Bytes.BYTES_COMPARATOR);
TreeMap<byte [], Cell> converted =
new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
for (Map.Entry<byte [], Cell> entry : row.entrySet()) {
converted.put(entry.getKey(),
entry.getValue() == null ? null : entry.getValue().getValue());
new Cell(entry.getValue() == null ? null : entry.getValue().getValue(),
HConstants.LATEST_TIMESTAMP));
}
isExpectedRowWithoutTimestamps(rowIndex, converted);
}
@ -241,16 +243,16 @@ public class TestHMemcache extends TestCase {
InternalScanner scanner =
this.hmemcache.getScanner(timestamp, cols, HConstants.EMPTY_START_ROW);
HStoreKey key = new HStoreKey();
TreeMap<byte [], byte []> results =
new TreeMap<byte [], byte []>(Bytes.BYTES_COMPARATOR);
TreeMap<byte [], Cell> results =
new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
for (int i = 0; scanner.next(key, results); i++) {
assertTrue("Row name",
key.toString().startsWith(Bytes.toString(getRowName(i))));
assertEquals("Count of columns", COLUMNS_COUNT,
results.size());
TreeMap<byte [], byte []> row =
new TreeMap<byte [], byte []>(Bytes.BYTES_COMPARATOR);
for(Map.Entry<byte [], byte []> e: results.entrySet() ) {
TreeMap<byte [], Cell> row =
new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
for(Map.Entry<byte [], Cell> e: results.entrySet() ) {
row.put(e.getKey(), e.getValue());
}
isExpectedRowWithoutTimestamps(i, row);
@ -323,8 +325,8 @@ public class TestHMemcache extends TestCase {
InternalScanner scanner = this.hmemcache.getScanner(timestamp,
cols, getRowName(startRowId));
HStoreKey key = new HStoreKey();
TreeMap<byte[], byte[]> results =
new TreeMap<byte[], byte[]>(Bytes.BYTES_COMPARATOR);
TreeMap<byte[], Cell> results =
new TreeMap<byte[], Cell>(Bytes.BYTES_COMPARATOR);
for (int i = 0; scanner.next(key, results); i++) {
int rowId = startRowId + i;
assertTrue("Row name",
@ -332,9 +334,8 @@ public class TestHMemcache extends TestCase {
assertEquals("Count of columns", COLUMNS_COUNT, results.size());
TreeMap<byte[], Cell> row =
new TreeMap<byte[], Cell>(Bytes.BYTES_COMPARATOR);
for (Map.Entry<byte[], byte[]> e : results.entrySet()) {
row.put(e.getKey(),
new Cell(e.getValue(), HConstants.LATEST_TIMESTAMP));
for (Map.Entry<byte[], Cell> e : results.entrySet()) {
row.put(e.getKey(),e.getValue());
}
isExpectedRow(rowId, row);
// Clear out set. Otherwise row results accumulate.

View File

@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.HStoreKey;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.io.BatchUpdate;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.util.Bytes;
/**
@ -284,13 +285,13 @@ public class TestHRegion extends HBaseTestCase {
int numFetched = 0;
try {
HStoreKey curKey = new HStoreKey();
TreeMap<byte [], byte []> curVals =
new TreeMap<byte [], byte []>(Bytes.BYTES_COMPARATOR);
TreeMap<byte [], Cell> curVals =
new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
int k = 0;
while(s.next(curKey, curVals)) {
for(Iterator<byte []> it = curVals.keySet().iterator(); it.hasNext(); ) {
byte [] col = it.next();
byte [] val = curVals.get(col);
byte [] val = curVals.get(col).getValue();
int curval =
Integer.parseInt(new String(val, HConstants.UTF8_ENCODING).trim());
for(int j = 0; j < cols.length; j++) {
@ -333,13 +334,13 @@ public class TestHRegion extends HBaseTestCase {
numFetched = 0;
try {
HStoreKey curKey = new HStoreKey();
TreeMap<byte [], byte []> curVals =
new TreeMap<byte [], byte []>(Bytes.BYTES_COMPARATOR);
TreeMap<byte [], Cell> curVals =
new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
int k = 0;
while(s.next(curKey, curVals)) {
for(Iterator<byte []> it = curVals.keySet().iterator(); it.hasNext(); ) {
byte [] col = it.next();
byte [] val = curVals.get(col);
byte [] val = curVals.get(col).getValue();
int curval =
Integer.parseInt(new String(val, HConstants.UTF8_ENCODING).trim());
for(int j = 0; j < cols.length; j++) {
@ -392,13 +393,13 @@ public class TestHRegion extends HBaseTestCase {
numFetched = 0;
try {
HStoreKey curKey = new HStoreKey();
TreeMap<byte [], byte []> curVals =
new TreeMap<byte [], byte []>(Bytes.BYTES_COMPARATOR);
TreeMap<byte [], Cell> curVals =
new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
int k = 0;
while(s.next(curKey, curVals)) {
for(Iterator<byte []> it = curVals.keySet().iterator(); it.hasNext(); ) {
byte [] col = it.next();
byte [] val = curVals.get(col);
byte [] val = curVals.get(col).getValue();
int curval =
Integer.parseInt(new String(val, HConstants.UTF8_ENCODING).trim());
for(int j = 0; j < cols.length; j++) {
@ -440,13 +441,13 @@ public class TestHRegion extends HBaseTestCase {
numFetched = 0;
try {
HStoreKey curKey = new HStoreKey();
TreeMap<byte [], byte []> curVals =
new TreeMap<byte [], byte []>(Bytes.BYTES_COMPARATOR);
TreeMap<byte [], Cell> curVals =
new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
int k = 0;
while(s.next(curKey, curVals)) {
for(Iterator<byte []> it = curVals.keySet().iterator(); it.hasNext(); ) {
byte [] col = it.next();
byte [] val = curVals.get(col);
byte [] val = curVals.get(col).getValue();
int curval =
Integer.parseInt(new String(val, HConstants.UTF8_ENCODING).trim());
for (int j = 0; j < cols.length; j++) {
@ -479,13 +480,13 @@ public class TestHRegion extends HBaseTestCase {
numFetched = 0;
try {
HStoreKey curKey = new HStoreKey();
TreeMap<byte [], byte []> curVals =
new TreeMap<byte [], byte []>(Bytes.BYTES_COMPARATOR);
TreeMap<byte [], Cell> curVals =
new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
int k = 500;
while(s.next(curKey, curVals)) {
for(Iterator<byte []> it = curVals.keySet().iterator(); it.hasNext(); ) {
byte [] col = it.next();
byte [] val = curVals.get(col);
byte [] val = curVals.get(col).getValue();
int curval =
Integer.parseInt(new String(val, HConstants.UTF8_ENCODING).trim());
for (int j = 0; j < cols.length; j++) {
@ -567,13 +568,13 @@ public class TestHRegion extends HBaseTestCase {
int contentsFetched = 0;
int anchorFetched = 0;
HStoreKey curKey = new HStoreKey();
TreeMap<byte [], byte []> curVals =
new TreeMap<byte [], byte []>(Bytes.BYTES_COMPARATOR);
TreeMap<byte [], Cell> curVals =
new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
int k = 0;
while(s.next(curKey, curVals)) {
for(Iterator<byte []> it = curVals.keySet().iterator(); it.hasNext(); ) {
byte [] col = it.next();
byte [] val = curVals.get(col);
byte [] val = curVals.get(col).getValue();
String curval = Bytes.toString(val);
if(Bytes.compareTo(col, CONTENTS_BASIC) == 0) {
assertTrue("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
@ -619,13 +620,13 @@ public class TestHRegion extends HBaseTestCase {
try {
int numFetched = 0;
HStoreKey curKey = new HStoreKey();
TreeMap<byte [], byte []> curVals =
new TreeMap<byte [], byte []>(Bytes.BYTES_COMPARATOR);
TreeMap<byte [], Cell> curVals =
new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
int k = 0;
while(s.next(curKey, curVals)) {
for(Iterator<byte []> it = curVals.keySet().iterator(); it.hasNext(); ) {
byte [] col = it.next();
byte [] val = curVals.get(col);
byte [] val = curVals.get(col).getValue();
int curval =
Integer.parseInt(new String(val, HConstants.UTF8_ENCODING).trim());
@ -663,8 +664,8 @@ public class TestHRegion extends HBaseTestCase {
try {
int fetched = 0;
HStoreKey curKey = new HStoreKey();
TreeMap<byte [], byte []> curVals =
new TreeMap<byte [], byte []>(Bytes.BYTES_COMPARATOR);
TreeMap<byte [], Cell> curVals =
new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
while(s.next(curKey, curVals)) {
for(Iterator<byte []> it = curVals.keySet().iterator(); it.hasNext(); ) {
it.next();

View File

@ -27,7 +27,6 @@ import java.util.TreeMap;
import org.apache.hadoop.dfs.MiniDFSCluster;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.hbase.HBaseTestCase;
import org.apache.hadoop.hbase.HRegionInfo;
@ -35,6 +34,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HServerAddress;
import org.apache.hadoop.hbase.HStoreKey;
import org.apache.hadoop.hbase.io.BatchUpdate;
import org.apache.hadoop.hbase.io.Cell;
/**
* Test of a long-lived scanner validating as we go.
@ -88,8 +88,8 @@ public class TestScanner extends HBaseTestCase {
throws IOException {
InternalScanner scanner = null;
TreeMap<byte [], byte []> results =
new TreeMap<byte [], byte []>(Bytes.BYTES_COMPARATOR);
TreeMap<byte [], Cell> results =
new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
HStoreKey key = new HStoreKey();
byte [][][] scanColumns = {
@ -104,11 +104,11 @@ public class TestScanner extends HBaseTestCase {
while (scanner.next(key, results)) {
assertTrue(results.containsKey(HConstants.COL_REGIONINFO));
byte [] val = results.get(HConstants.COL_REGIONINFO);
byte [] val = results.get(HConstants.COL_REGIONINFO).getValue();
validateRegionInfo(val);
if(validateStartcode) {
assertTrue(results.containsKey(HConstants.COL_STARTCODE));
val = results.get(HConstants.COL_STARTCODE);
val = results.get(HConstants.COL_STARTCODE).getValue();
assertNotNull(val);
assertFalse(val.length == 0);
long startCode = Bytes.toLong(val);
@ -117,7 +117,7 @@ public class TestScanner extends HBaseTestCase {
if(serverName != null) {
assertTrue(results.containsKey(HConstants.COL_SERVER));
val = results.get(HConstants.COL_SERVER);
val = results.get(HConstants.COL_SERVER).getValue();
assertNotNull(val);
assertFalse(val.length == 0);
String server = Bytes.toString(val);

View File

@ -182,12 +182,12 @@ public class TestSplit extends HBaseClusterTestCase {
HConstants.EMPTY_START_ROW, System.currentTimeMillis(), null);
try {
HStoreKey curKey = new HStoreKey();
TreeMap<byte [], byte []> curVals =
new TreeMap<byte [], byte []>(Bytes.BYTES_COMPARATOR);
TreeMap<byte [], Cell> curVals =
new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
boolean first = true;
OUTER_LOOP: while(s.next(curKey, curVals)) {
for(byte [] col: curVals.keySet()) {
byte [] val = curVals.get(col);
byte [] val = curVals.get(col).getValue();
byte [] curval = val;
if (first) {
first = false;