HBASE-1217 Changed HColumnDescriptor so it now has blocksize, and compression

is not lzo, gz or none (rather than block, key or none).  Made changes to
shell so you can do this configuration therein.  Bloomfilter still not
hooked up.

HBASE-859 HStoreKey needs a reworking... binary keys work again.

HBASE-1211 NPE in retries exhausted exception


M src/test/org/apache/hadoop/hbase/TestHStoreKey.java
    Added tests comparing binary keys, plain, meta and root keys.
    Enabled the commented-out testHStoreKeyBorderCases.
M  src/test/org/apache/hadoop/hbase/HBaseTestCase.java
    Removed PUNCTUATION pollution of keys.
M src/test/org/apache/hadoop/hbase/regionserver/TestBloomFilters.java
    Use different compression type.
M src/test/org/apache/hadoop/hbase/regionserver/TestTimestamp.java
    Use new HCD constructor.
M src/test/org/apache/hadoop/hbase/regionserver/TestScanner.java
    Was using ROOT table but a plain hstorekey comparator; that won't
    work (have to be careful about which comparator is used on which
    table from here on out).
M src/test/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
    getWriter now takes whether or not bloom filter, etc.
M  src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java
    Removed PUNCTUATION pollution.
M src/test/org/apache/hadoop/hbase/filter/TestStopRowFilter.java
M src/test/org/apache/hadoop/hbase/client/TestBatchUpdate.java
    Whitespace.
M src/test/org/apache/hadoop/hbase/io/hfile/TestHFile.java
    Pass compressor and whether bloomfilter.
M src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java
    Added new seek+scan test.
M src/java/org/apache/hadoop/hbase/HColumnDescriptor.java
    Added in BLOCKSIZE, changed COMPRESSION arguments.
    Removed DEFAULT_MAPFILE_INDEX_INTERVAL.
    Upped the version from 6 to 7.
M src/java/org/apache/hadoop/hbase/HStoreKey.java
    New object and raw byte comparators, one for each context of
    plain, meta, and root.  Use same code.  Other utility such
    as static creates that take a ByteBuffer or a byte array of
    a serialized HStoreKey.  Old compareTo methods are deprecated
    since they can return wrong answer if binary keys.
    New getBytes without creating Streams.
    Removed the old BeforeThisStoreKey trick.
    Custom vint math methods that work with ByteBuffers and byte []
    instead of streams.
M src/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java
    Fixup for new compression.
M src/java/org/apache/hadoop/hbase/regionserver/Memcache.java
    Pass in comparator to use.
M src/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
M src/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
    Use right comparator comparing rows
M src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
M src/java/org/apache/hadoop/hbase/regionserver/HAbstractScanner.java
    Formatting.
M src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
    Formatting.
    Might be issues here with key compares but no store in the
    context... leave it for now.
M src/java/org/apache/hadoop/hbase/regionserver/Store.java
    Use new comparators and hcd changes.
M src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
    Added override of HFile.Reader so could format the toString
    (so HFile doesn't have to know about HStoreKeys though thats
    what we put in there always -- let it just be about
    byte []).
M src/java/org/apache/hadoop/hbase/rest/parser/JsonRestParser.java
M src/java/org/apache/hadoop/hbase/rest/parser/XMLRestParser.java
M src/java/org/apache/hadoop/hbase/HTableDescriptor.java
    Fix up for new HCD.
M src/java/org/apache/hadoop/hbase/HRegionInfo.java
M src/java/org/apache/hadoop/hbase/master/MetaRegion.java
    Use Bytes.compareTo.
M src/java/org/apache/hadoop/hbase/io/HalfHFileReader.java
    (isTop): Added.
M src/java/org/apache/hadoop/hbase/io/RowResult.java
    Formatting.
M src/java/org/apache/hadoop/hbase/io/hfile/Compression.java
    Make getCompressionAlgorithmByName public.
M src/java/org/apache/hadoop/hbase/io/hfile/HFile.java
    Add in writer constructor that takes compression and bloom filter.
    Fix some of the comparator use.
M src/java/org/apache/hadoop/hbase/io/HbaseMapWritable.java
    Remove commented out code
M src/java/org/apache/hadoop/hbase/io/Cell.java
    Formatting.
M src/java/org/apache/hadoop/hbase/io/HBaseMapFile.java
    Use new comparator names.
A src/java/org/apache/hadoop/hbase/util/TestBytes.java
    Small bytes test.
M src/java/org/apache/hadoop/hbase/util/Bytes.java
    Some overrides to help when source is ByteBuffer.
    Added in some vint math utility.
M src/java/org/apache/hadoop/hbase/client/HTable.java
    Formatting.
M src/java/org/apache/hadoop/hbase/client/MetaScanner.java
    Use Bytes.compareTO.
M src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
    Use right comparator.
M src/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java
    Make it match HCD.
M src/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java
    Fix NPE
M src/java/org/apache/hadoop/hbase/client/ScannerCallable.java
    Formatting.
M bin/HBase.rb
    Support for new HCD.


git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@749546 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2009-03-03 06:48:33 +00:00
parent 8206da62c0
commit 08693f66a8
43 changed files with 1529 additions and 785 deletions

View File

@ -26,6 +26,9 @@ Release 0.20.0 - Unreleased
HBASE-1198 OOME in IPC server does not trigger abort behavior HBASE-1198 OOME in IPC server does not trigger abort behavior
HBASE-1209 Make port displayed the same as is used in URL for RegionServer HBASE-1209 Make port displayed the same as is used in URL for RegionServer
table in UI (Lars George via Stack) table in UI (Lars George via Stack)
HBASE-1217 add new compression and hfile blocksize to HColumnDescriptor
HBASE-859 HStoreKey needs a reworking
HBASE-1211 NPE in retries exhausted exception
IMPROVEMENTS IMPROVEMENTS
HBASE-1089 Add count of regions on filesystem to master UI; add percentage HBASE-1089 Add count of regions on filesystem to master UI; add percentage

View File

@ -27,6 +27,8 @@ import java.util.HashMap;
import java.util.Map; import java.util.Map;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.hfile.Compression;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.rest.exception.HBaseRestException; import org.apache.hadoop.hbase.rest.exception.HBaseRestException;
import org.apache.hadoop.hbase.rest.serializer.IRestSerializer; import org.apache.hadoop.hbase.rest.serializer.IRestSerializer;
import org.apache.hadoop.hbase.rest.serializer.ISerializable; import org.apache.hadoop.hbase.rest.serializer.ISerializable;
@ -52,11 +54,12 @@ public class HColumnDescriptor implements ISerializable, WritableComparable<HCol
// Time-to-live feature. Version 4 was when we moved to byte arrays, HBASE-82. // Time-to-live feature. Version 4 was when we moved to byte arrays, HBASE-82.
// Version 5 was when bloom filter descriptors were removed. // Version 5 was when bloom filter descriptors were removed.
// Version 6 adds metadata as a map where keys and values are byte[]. // Version 6 adds metadata as a map where keys and values are byte[].
private static final byte COLUMN_DESCRIPTOR_VERSION = (byte)6; private static final byte COLUMN_DESCRIPTOR_VERSION = (byte)7;
/** /**
* The type of compression. * The type of compression.
* @see org.apache.hadoop.io.SequenceFile.Writer * @see org.apache.hadoop.io.SequenceFile.Writer
* @deprecated Replaced by {@link Compression.Algorithm}.
*/ */
public static enum CompressionType { public static enum CompressionType {
/** Do not compress records. */ /** Do not compress records. */
@ -67,20 +70,21 @@ public class HColumnDescriptor implements ISerializable, WritableComparable<HCol
BLOCK BLOCK
} }
public static final String COMPRESSION = "COMPRESSION"; //TODO: change to protected public static final String COMPRESSION = "COMPRESSION";
public static final String BLOCKCACHE = "BLOCKCACHE"; //TODO: change to protected public static final String BLOCKCACHE = "BLOCKCACHE";
public static final String LENGTH = "LENGTH"; //TODO: change to protected public static final String BLOCKSIZE = "BLOCKSIZE";
public static final String TTL = "TTL"; //TODO: change to protected public static final String LENGTH = "LENGTH";
public static final String BLOOMFILTER = "BLOOMFILTER"; //TODO: change to protected public static final String TTL = "TTL";
public static final String FOREVER = "FOREVER"; //TODO: change to protected public static final String BLOOMFILTER = "BLOOMFILTER";
public static final String MAPFILE_INDEX_INTERVAL = //TODO: change to protected public static final String FOREVER = "FOREVER";
public static final String MAPFILE_INDEX_INTERVAL =
"MAPFILE_INDEX_INTERVAL"; "MAPFILE_INDEX_INTERVAL";
/** /**
* Default compression type. * Default compression type.
*/ */
public static final CompressionType DEFAULT_COMPRESSION = public static final String DEFAULT_COMPRESSION =
CompressionType.NONE; Compression.Algorithm.NONE.getName();
/** /**
* Default number of versions of a record to keep. * Default number of versions of a record to keep.
@ -100,6 +104,12 @@ public class HColumnDescriptor implements ISerializable, WritableComparable<HCol
*/ */
private volatile Integer maxValueLength = null; private volatile Integer maxValueLength = null;
/*
* Cache here the HCD value.
* Question: its OK to cache since when we're reenable, we create a new HCD?
*/
private volatile Integer blocksize = null;
/** /**
* Default setting for whether to serve from memory or not. * Default setting for whether to serve from memory or not.
*/ */
@ -110,6 +120,12 @@ public class HColumnDescriptor implements ISerializable, WritableComparable<HCol
*/ */
public static final boolean DEFAULT_BLOCKCACHE = false; public static final boolean DEFAULT_BLOCKCACHE = false;
/**
* Default size of blocks in files store to the filesytem. Use smaller for
* faster random-access at expense of larger indices (more memory consumption).
*/
public static final int DEFAULT_BLOCKSIZE = HFile.DEFAULT_BLOCKSIZE;
/** /**
* Default setting for whether or not to use bloomfilters. * Default setting for whether or not to use bloomfilters.
*/ */
@ -123,16 +139,10 @@ public class HColumnDescriptor implements ISerializable, WritableComparable<HCol
// Column family name // Column family name
private byte [] name; private byte [] name;
/**
* Default mapfile index interval.
*/
public static final int DEFAULT_MAPFILE_INDEX_INTERVAL = 128;
// Column metadata // Column metadata
protected Map<ImmutableBytesWritable,ImmutableBytesWritable> values = protected Map<ImmutableBytesWritable,ImmutableBytesWritable> values =
new HashMap<ImmutableBytesWritable,ImmutableBytesWritable>(); new HashMap<ImmutableBytesWritable,ImmutableBytesWritable>();
/** /**
* Default constructor. Must be present for Writable. * Default constructor. Must be present for Writable.
*/ */
@ -200,9 +210,37 @@ public class HColumnDescriptor implements ISerializable, WritableComparable<HCol
* @throws IllegalArgumentException if the number of versions is &lt;= 0 * @throws IllegalArgumentException if the number of versions is &lt;= 0
*/ */
public HColumnDescriptor(final byte [] familyName, final int maxVersions, public HColumnDescriptor(final byte [] familyName, final int maxVersions,
final CompressionType compression, final boolean inMemory, final String compression, final boolean inMemory,
final boolean blockCacheEnabled, final int maxValueLength, final boolean blockCacheEnabled, final int maxValueLength,
final int timeToLive, final boolean bloomFilter) { final int timeToLive, final boolean bloomFilter) {
this(familyName, maxVersions, compression, inMemory, blockCacheEnabled,
DEFAULT_BLOCKSIZE, maxValueLength, timeToLive, bloomFilter);
}
/**
* Constructor
* @param familyName Column family name. Must be 'printable' -- digit or
* letter -- and end in a <code>:<code>
* @param maxVersions Maximum number of versions to keep
* @param compression Compression type
* @param inMemory If true, column data should be kept in an HRegionServer's
* cache
* @param blockCacheEnabled If true, MapFile blocks should be cached
* @param maxValueLength Restrict values to &lt;= this value
* @param timeToLive Time-to-live of cell contents, in seconds
* (use HConstants.FOREVER for unlimited TTL)
* @param bloomFilter Enable the specified bloom filter for this column
*
* @throws IllegalArgumentException if passed a family name that is made of
* other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> and does not
* end in a <code>:</code>
* @throws IllegalArgumentException if the number of versions is &lt;= 0
*/
public HColumnDescriptor(final byte [] familyName, final int maxVersions,
final String compression, final boolean inMemory,
final boolean blockCacheEnabled, final int blocksize,
final int maxValueLength,
final int timeToLive, final boolean bloomFilter) {
isLegalFamilyName(familyName); isLegalFamilyName(familyName);
this.name = stripColon(familyName); this.name = stripColon(familyName);
if (maxVersions <= 0) { if (maxVersions <= 0) {
@ -215,10 +253,12 @@ public class HColumnDescriptor implements ISerializable, WritableComparable<HCol
setBlockCacheEnabled(blockCacheEnabled); setBlockCacheEnabled(blockCacheEnabled);
setMaxValueLength(maxValueLength); setMaxValueLength(maxValueLength);
setTimeToLive(timeToLive); setTimeToLive(timeToLive);
setCompressionType(compression); setCompressionType(Compression.Algorithm.
valueOf(compression.toUpperCase()));
setBloomfilter(bloomFilter); setBloomfilter(bloomFilter);
setBlocksize(blocksize);
} }
private static byte [] stripColon(final byte [] n) { private static byte [] stripColon(final byte [] n) {
byte [] result = new byte [n.length - 1]; byte [] result = new byte [n.length - 1];
// Have the stored family name be absent the colon delimiter // Have the stored family name be absent the colon delimiter
@ -321,15 +361,8 @@ public class HColumnDescriptor implements ISerializable, WritableComparable<HCol
/** @return compression type being used for the column family */ /** @return compression type being used for the column family */
@TOJSON @TOJSON
public CompressionType getCompression() { public Compression.Algorithm getCompression() {
String value = getValue(COMPRESSION); return Compression.Algorithm.valueOf(getValue(COMPRESSION));
if (value != null) {
if (value.equalsIgnoreCase("BLOCK"))
return CompressionType.BLOCK;
else if (value.equalsIgnoreCase("RECORD"))
return CompressionType.RECORD;
}
return CompressionType.NONE;
} }
/** @return maximum number of versions */ /** @return maximum number of versions */
@ -347,24 +380,44 @@ public class HColumnDescriptor implements ISerializable, WritableComparable<HCol
public void setMaxVersions(int maxVersions) { public void setMaxVersions(int maxVersions) {
setValue(HConstants.VERSIONS, Integer.toString(maxVersions)); setValue(HConstants.VERSIONS, Integer.toString(maxVersions));
} }
/**
* @return Blocksize.
*/
@TOJSON
public synchronized int getBlocksize() {
if (this.blocksize == null) {
String value = getValue(BLOCKSIZE);
this.blocksize = (value != null)?
Integer.decode(value): Integer.valueOf(DEFAULT_BLOCKSIZE);
}
return this.blocksize.intValue();
}
/**
* @param s
*/
public void setBlocksize(int s) {
setValue(BLOCKSIZE, Integer.toString(s));
this.blocksize = null;
}
/** /**
* @return Compression type setting. * @return Compression type setting.
*/ */
@TOJSON @TOJSON
public CompressionType getCompressionType() { public Compression.Algorithm getCompressionType() {
return getCompression(); return getCompression();
} }
/** /**
* @param type Compression type setting. * @param type Compression type setting.
*/ */
public void setCompressionType(CompressionType type) { public void setCompressionType(Compression.Algorithm type) {
String compressionType; String compressionType;
switch (type) { switch (type) {
case BLOCK: compressionType = "BLOCK"; break; case GZ: compressionType = "GZ"; break;
case RECORD: compressionType = "RECORD"; break; default: compressionType = "NONE"; break;
default: compressionType = "NONE"; break;
} }
setValue(COMPRESSION, compressionType); setValue(COMPRESSION, compressionType);
} }
@ -461,17 +514,6 @@ public class HColumnDescriptor implements ISerializable, WritableComparable<HCol
setValue(BLOOMFILTER, Boolean.toString(onOff)); setValue(BLOOMFILTER, Boolean.toString(onOff));
} }
/**
* @return The number of entries that are added to the store MapFile before
* an index entry is added.
*/
public int getMapFileIndexInterval() {
String value = getValue(MAPFILE_INDEX_INTERVAL);
if (value != null)
return Integer.valueOf(value).intValue();
return DEFAULT_MAPFILE_INDEX_INTERVAL;
}
/** /**
* @param interval The number of entries that are added to the store MapFile before * @param interval The number of entries that are added to the store MapFile before
* an index entry is added. * an index entry is added.
@ -531,7 +573,7 @@ public class HColumnDescriptor implements ISerializable, WritableComparable<HCol
this.values.clear(); this.values.clear();
setMaxVersions(in.readInt()); setMaxVersions(in.readInt());
int ordinal = in.readInt(); int ordinal = in.readInt();
setCompressionType(CompressionType.values()[ordinal]); setCompressionType(Compression.Algorithm.values()[ordinal]);
setInMemory(in.readBoolean()); setInMemory(in.readBoolean());
setMaxValueLength(in.readInt()); setMaxValueLength(in.readInt());
setBloomfilter(in.readBoolean()); setBloomfilter(in.readBoolean());
@ -551,7 +593,7 @@ public class HColumnDescriptor implements ISerializable, WritableComparable<HCol
setTimeToLive(in.readInt()); setTimeToLive(in.readInt());
} }
} else { } else {
// version 6+ // version 7+
this.name = Bytes.readByteArray(in); this.name = Bytes.readByteArray(in);
this.values.clear(); this.values.clear();
int numValues = in.readInt(); int numValues = in.readInt();
@ -562,6 +604,10 @@ public class HColumnDescriptor implements ISerializable, WritableComparable<HCol
value.readFields(in); value.readFields(in);
values.put(key, value); values.put(key, value);
} }
if (version == 6) {
// Convert old values.
setValue(COMPRESSION, Compression.Algorithm.NONE.getName());
}
} }
} }
@ -597,4 +643,4 @@ public class HColumnDescriptor implements ISerializable, WritableComparable<HCol
public void restSerialize(IRestSerializer serializer) throws HBaseRestException { public void restSerialize(IRestSerializer serializer) throws HBaseRestException {
serializer.serializeColumnDescriptor(this); serializer.serializeColumnDescriptor(this);
} }
} }

View File

@ -427,13 +427,13 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable
} }
// Compare start keys. // Compare start keys.
result = HStoreKey.compareTwoRowKeys(this.startKey, o.startKey); result = Bytes.compareTo(this.startKey, o.startKey);
if (result != 0) { if (result != 0) {
return result; return result;
} }
// Compare end keys. // Compare end keys.
return HStoreKey.compareTwoRowKeys(this.endKey, o.endKey); return Bytes.compareTo(this.endKey, o.endKey);
} }
/** /**

View File

@ -20,10 +20,8 @@
package org.apache.hadoop.hbase; package org.apache.hadoop.hbase;
import java.io.ByteArrayOutputStream;
import java.io.DataInput; import java.io.DataInput;
import java.io.DataOutput; import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.IOException; import java.io.IOException;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
@ -44,10 +42,6 @@ public class HStoreKey implements WritableComparable<HStoreKey>, HeapSize {
* Colon character in UTF-8 * Colon character in UTF-8
*/ */
public static final char COLUMN_FAMILY_DELIMITER = ':'; public static final char COLUMN_FAMILY_DELIMITER = ':';
private byte [] row = HConstants.EMPTY_BYTE_ARRAY;
private byte [] column = HConstants.EMPTY_BYTE_ARRAY;
private long timestamp = Long.MAX_VALUE;
/** /**
* Estimated size tax paid for each instance of HSK. Estimate based on * Estimated size tax paid for each instance of HSK. Estimate based on
@ -56,9 +50,17 @@ public class HStoreKey implements WritableComparable<HStoreKey>, HeapSize {
// In jprofiler, says shallow size is 48 bytes. Add to it cost of two // In jprofiler, says shallow size is 48 bytes. Add to it cost of two
// byte arrays and then something for the HRI hosting. // byte arrays and then something for the HRI hosting.
public static final int ESTIMATED_HEAP_TAX = 48; public static final int ESTIMATED_HEAP_TAX = 48;
public static final StoreKeyByteComparator BYTECOMPARATOR = private byte [] row = HConstants.EMPTY_BYTE_ARRAY;
new StoreKeyByteComparator(); private byte [] column = HConstants.EMPTY_BYTE_ARRAY;
private long timestamp = Long.MAX_VALUE;
private static final HStoreKey.StoreKeyComparator PLAIN_COMPARATOR =
new HStoreKey.StoreKeyComparator();
private static final HStoreKey.StoreKeyComparator META_COMPARATOR =
new HStoreKey.MetaStoreKeyComparator();
private static final HStoreKey.StoreKeyComparator ROOT_COMPARATOR =
new HStoreKey.RootStoreKeyComparator();
/** Default constructor used in conjunction with Writable interface */ /** Default constructor used in conjunction with Writable interface */
public HStoreKey() { public HStoreKey() {
@ -93,7 +95,6 @@ public class HStoreKey implements WritableComparable<HStoreKey>, HeapSize {
* *
* @param row row key * @param row row key
* @param timestamp timestamp value * @param timestamp timestamp value
* @param hri HRegionInfo
*/ */
public HStoreKey(final byte [] row, final long timestamp) { public HStoreKey(final byte [] row, final long timestamp) {
this(row, HConstants.EMPTY_BYTE_ARRAY, timestamp); this(row, HConstants.EMPTY_BYTE_ARRAY, timestamp);
@ -130,7 +131,6 @@ public class HStoreKey implements WritableComparable<HStoreKey>, HeapSize {
* @param row row key * @param row row key
* @param column column key * @param column column key
* @param timestamp timestamp value * @param timestamp timestamp value
* @param regionInfo region info
*/ */
public HStoreKey(final String row, final String column, final long timestamp) { public HStoreKey(final String row, final String column, final long timestamp) {
this (Bytes.toBytes(row), Bytes.toBytes(column), timestamp); this (Bytes.toBytes(row), Bytes.toBytes(column), timestamp);
@ -143,7 +143,6 @@ public class HStoreKey implements WritableComparable<HStoreKey>, HeapSize {
* @param row row key * @param row row key
* @param column column key * @param column column key
* @param timestamp timestamp value * @param timestamp timestamp value
* @param regionInfo region info
*/ */
public HStoreKey(final byte [] row, final byte [] column, final long timestamp) { public HStoreKey(final byte [] row, final byte [] column, final long timestamp) {
// Make copies // Make copies
@ -256,7 +255,7 @@ public class HStoreKey implements WritableComparable<HStoreKey>, HeapSize {
public boolean matchesRowFamily(final HStoreKey that) { public boolean matchesRowFamily(final HStoreKey that) {
final int delimiterIndex = getFamilyDelimiterIndex(getColumn()); final int delimiterIndex = getFamilyDelimiterIndex(getColumn());
return equalsTwoRowKeys(getRow(), that.getRow()) && return equalsTwoRowKeys(getRow(), that.getRow()) &&
Bytes.compareTo(getColumn(), 0, delimiterIndex, that.getColumn(), 0, Bytes.compareTo(getColumn(), 0, delimiterIndex, that.getColumn(), 0,
delimiterIndex) == 0; delimiterIndex) == 0;
} }
@ -277,34 +276,44 @@ public class HStoreKey implements WritableComparable<HStoreKey>, HeapSize {
} }
return compareTo(other) == 0; return compareTo(other) == 0;
} }
@Override
public int hashCode() { public int hashCode() {
int result = Bytes.hashCode(getRow()); int c = Bytes.hashCode(getRow());
result ^= Bytes.hashCode(getColumn()); c ^= Bytes.hashCode(getColumn());
result ^= getTimestamp(); c ^= getTimestamp();
return result; return c;
} }
// Comparable // Comparable
/**
* @deprecated Use Comparators instead. This can give wrong results.
*/
public int compareTo(final HStoreKey o) { public int compareTo(final HStoreKey o) {
return compareTo(this, o); return compareTo(this, o);
} }
/**
* @param left
* @param right
* @return
* @deprecated Use Comparators instead. This can give wrong results because
* does not take into account special handling needed for meta and root rows.
*/
static int compareTo(final HStoreKey left, final HStoreKey right) { static int compareTo(final HStoreKey left, final HStoreKey right) {
// We can be passed null // We can be passed null
if (left == null && right == null) return 0; if (left == null && right == null) return 0;
if (left == null) return -1; if (left == null) return -1;
if (right == null) return 1; if (right == null) return 1;
int result = compareTwoRowKeys(left.getRow(), right.getRow()); int result = Bytes.compareTo(left.getRow(), right.getRow());
if (result != 0) { if (result != 0) {
return result; return result;
} }
result = left.getColumn() == null && right.getColumn() == null? 0: result = left.getColumn() == null && right.getColumn() == null? 0:
left.getColumn() == null && right.getColumn() != null? -1: left.getColumn() == null && right.getColumn() != null? -1:
left.getColumn() != null && right.getColumn() == null? 1: left.getColumn() != null && right.getColumn() == null? 1:
Bytes.compareTo(left.getColumn(), right.getColumn()); Bytes.compareTo(left.getColumn(), right.getColumn());
if (result != 0) { if (result != 0) {
return result; return result;
} }
@ -365,7 +374,7 @@ public class HStoreKey implements WritableComparable<HStoreKey>, HeapSize {
} }
return Bytes.compareTo(family, 0, index, column, 0, index) == 0; return Bytes.compareTo(family, 0, index, column, 0, index) == 0;
} }
/** /**
* @param family * @param family
* @return Return <code>family</code> plus the family delimiter. * @return Return <code>family</code> plus the family delimiter.
@ -413,19 +422,39 @@ public class HStoreKey implements WritableComparable<HStoreKey>, HeapSize {
len); len);
return result; return result;
} }
/** /**
* @param b * @param b
* @return Index of the family-qualifier colon delimiter character in passed * @return Index of the family-qualifier colon delimiter character in passed
* buffer. * buffer.
*/ */
public static int getFamilyDelimiterIndex(final byte [] b) { public static int getFamilyDelimiterIndex(final byte [] b) {
return getDelimiter(b, 0, b.length, (int)COLUMN_FAMILY_DELIMITER);
}
private static int getRequiredDelimiterInReverse(final byte [] b,
final int offset, final int length, final int delimiter) {
int index = getDelimiterInReverse(b, offset, length, delimiter);
if (index < 0) {
throw new IllegalArgumentException("No " + delimiter + " in <" +
Bytes.toString(b) + ">" + ", length=" + length + ", offset=" + offset);
}
return index;
}
/*
* @param b
* @param delimiter
* @return Index of delimiter having started from end of <code>b</code> moving
* leftward.
*/
private static int getDelimiter(final byte [] b, int offset, final int length,
final int delimiter) {
if (b == null) { if (b == null) {
throw new NullPointerException(); throw new NullPointerException();
} }
int result = -1; int result = -1;
for (int i = 0; i < b.length; i++) { for (int i = offset; i < length + offset; i++) {
if (b[i] == COLUMN_FAMILY_DELIMITER) { if (b[i] == delimiter) {
result = i; result = i;
break; break;
} }
@ -433,19 +462,26 @@ public class HStoreKey implements WritableComparable<HStoreKey>, HeapSize {
return result; return result;
} }
/** /*
* Utility method to compare two row keys. * @param b
* This is required because of the meta delimiters. * @param delimiter
* This is a hack. * @return Index of delimiter
* @param regionInfo
* @param rowA
* @param rowB
* @return value of the comparison
*/ */
public static int compareTwoRowKeys(final byte[] rowA, final byte[] rowB) { private static int getDelimiterInReverse(final byte [] b, final int offset,
return Bytes.compareTo(rowA, rowB); final int length, final int delimiter) {
if (b == null) {
throw new NullPointerException();
}
int result = -1;
for (int i = (offset + length) - 1; i >= offset; i--) {
if (b[i] == delimiter) {
result = i;
break;
}
}
return result;
} }
/** /**
* Utility method to check if two row keys are equal. * Utility method to check if two row keys are equal.
* This is required because of the meta delimiters * This is required because of the meta delimiters
@ -457,7 +493,7 @@ public class HStoreKey implements WritableComparable<HStoreKey>, HeapSize {
public static boolean equalsTwoRowKeys(final byte[] rowA, final byte[] rowB) { public static boolean equalsTwoRowKeys(final byte[] rowA, final byte[] rowB) {
return ((rowA == null) && (rowB == null)) ? true: return ((rowA == null) && (rowB == null)) ? true:
(rowA == null) || (rowB == null) || (rowA.length != rowB.length) ? false: (rowA == null) || (rowB == null) || (rowA.length != rowB.length) ? false:
compareTwoRowKeys(rowA,rowB) == 0; Bytes.compareTo(rowA, rowB) == 0;
} }
// Writable // Writable
@ -518,153 +554,38 @@ public class HStoreKey implements WritableComparable<HStoreKey>, HeapSize {
* @throws IOException * @throws IOException
*/ */
public static byte [] getBytes(final HStoreKey hsk) throws IOException { public static byte [] getBytes(final HStoreKey hsk) throws IOException {
// TODO: Redo with system.arraycopy instead of DOS. return getBytes(hsk.getRow(), hsk.getColumn(), hsk.getTimestamp());
if (hsk == null) {
throw new IllegalArgumentException("Writable cannot be null");
}
final int serializedSize = getSerializedSize(hsk);
final ByteArrayOutputStream byteStream = new ByteArrayOutputStream(serializedSize);
DataOutputStream out = new DataOutputStream(byteStream);
try {
hsk.write(out);
out.close();
out = null;
final byte [] serializedKey = byteStream.toByteArray();
if (serializedKey.length != serializedSize) {
// REMOVE THIS AFTER CONFIDENCE THAT OUR SIZING IS BEING DONE PROPERLY
throw new AssertionError("Sizes do not agree " + serializedKey.length +
", " + serializedSize);
}
return serializedKey;
} finally {
if (out != null) {
out.close();
}
}
}
/**
* Pass this class into {@link org.apache.hadoop.io.MapFile}.getClosest when
* searching for the key that comes BEFORE this one but NOT this one. This
* class will return > 0 when asked to compare against itself rather than 0.
* This is a hack for case where getClosest returns a deleted key and we want
* to get the previous. Can't unless use use this class; it'll just keep
* returning us the deleted key (getClosest gets exact or nearest before when
* you pass true argument). TODO: Throw this class away when MapFile has
* a real 'previous' method. See HBASE-751.
* @deprecated
*/
public static class BeforeThisStoreKey extends HStoreKey {
private final HStoreKey beforeThisKey;
/**
* @param beforeThisKey
*/
public BeforeThisStoreKey(final HStoreKey beforeThisKey) {
super();
this.beforeThisKey = beforeThisKey;
}
@Override
public int compareTo(final HStoreKey o) {
final int result = this.beforeThisKey.compareTo(o);
return result == 0? -1: result;
}
@Override
public boolean equals(final Object obj) {
return false;
}
@Override
public byte[] getColumn() {
return this.beforeThisKey.getColumn();
}
@Override
public byte[] getRow() {
return this.beforeThisKey.getRow();
}
@Override
public long heapSize() {
return this.beforeThisKey.heapSize();
}
@Override
public long getTimestamp() {
return this.beforeThisKey.getTimestamp();
}
@Override
public int hashCode() {
return this.beforeThisKey.hashCode();
}
@Override
public boolean matchesRowCol(final HStoreKey other) {
return this.beforeThisKey.matchesRowCol(other);
}
@Override
public boolean matchesRowFamily(final HStoreKey that) {
return this.beforeThisKey.matchesRowFamily(that);
}
@Override
public boolean matchesWithoutColumn(final HStoreKey other) {
return this.beforeThisKey.matchesWithoutColumn(other);
}
@Override
public void readFields(final DataInput in) throws IOException {
this.beforeThisKey.readFields(in);
}
@Override
public void set(final HStoreKey k) {
this.beforeThisKey.set(k);
}
@Override
public void setColumn(final byte[] c) {
this.beforeThisKey.setColumn(c);
}
@Override
public void setRow(final byte[] newrow) {
this.beforeThisKey.setRow(newrow);
}
@Override
public void setVersion(final long timestamp) {
this.beforeThisKey.setVersion(timestamp);
}
@Override
public String toString() {
return this.beforeThisKey.toString();
}
@Override
public void write(final DataOutput out) throws IOException {
this.beforeThisKey.write(out);
}
} }
/** /**
* Passed as comparator for memcache and for store files. See HBASE-868. * @param row Can't be null
* @return Passed arguments as a serialized HSK.
* @throws IOException
*/ */
public static class HStoreKeyWritableComparator extends WritableComparator { public static byte [] getBytes(final byte [] row)
public HStoreKeyWritableComparator() { throws IOException {
super(HStoreKey.class); return getBytes(row, null, HConstants.LATEST_TIMESTAMP);
} }
@SuppressWarnings("unchecked") /**
public int compare(final WritableComparable left, * @param row Can't be null
final WritableComparable right) { * @param column Can be null
return compareTo((HStoreKey)left, (HStoreKey)right); * @param ts
} * @return Passed arguments as a serialized HSK.
* @throws IOException
*/
public static byte [] getBytes(final byte [] row, final byte [] column,
final long ts)
throws IOException {
// TODO: Get vint sizes as I calculate serialized size of hsk.
byte [] b = new byte [getSerializedSize(row) +
getSerializedSize(column) + Bytes.SIZEOF_LONG];
int offset = Bytes.writeByteArray(b, 0, row, 0, row.length);
byte [] c = column == null? HConstants.EMPTY_BYTE_ARRAY: column;
offset = Bytes.writeByteArray(b, offset, c, 0, c.length);
byte [] timestamp = Bytes.toBytes(ts);
System.arraycopy(timestamp, 0, b, offset, timestamp.length);
return b;
} }
/** /**
@ -688,17 +609,11 @@ public class HStoreKey implements WritableComparable<HStoreKey>, HeapSize {
* @return Column * @return Column
*/ */
public static byte [] getColumn(final ByteBuffer bb) { public static byte [] getColumn(final ByteBuffer bb) {
byte firstByte = bb.get(0); // Skip over row.
int offset = skipVintdByteArray(bb, 0);
byte firstByte = bb.get(offset);
int vint = firstByte; int vint = firstByte;
int vintWidth = WritableUtils.decodeVIntSize(firstByte); int vintWidth = WritableUtils.decodeVIntSize(firstByte);
if (vintWidth != 1) {
vint = getBigVint(vintWidth, firstByte, bb.array(), bb.arrayOffset());
}
// Skip over row.
int offset = vint + vintWidth;
firstByte = bb.get(offset);
vint = firstByte;
vintWidth = WritableUtils.decodeVIntSize(firstByte);
if (vintWidth != 1) { if (vintWidth != 1) {
vint = getBigVint(vintWidth, firstByte, bb.array(), vint = getBigVint(vintWidth, firstByte, bb.array(),
bb.arrayOffset() + offset); bb.arrayOffset() + offset);
@ -714,117 +629,24 @@ public class HStoreKey implements WritableComparable<HStoreKey>, HeapSize {
* @return Timestamp * @return Timestamp
*/ */
public static long getTimestamp(final ByteBuffer bb) { public static long getTimestamp(final ByteBuffer bb) {
byte firstByte = bb.get(0); return bb.getLong(bb.limit() - Bytes.SIZEOF_LONG);
}
/*
* @param bb
* @param offset
* @return Amount to skip to get paste a byte array that is preceded by a
* vint of how long it is.
*/
private static int skipVintdByteArray(final ByteBuffer bb, final int offset) {
byte firstByte = bb.get(offset);
int vint = firstByte; int vint = firstByte;
int vintWidth = WritableUtils.decodeVIntSize(firstByte); int vintWidth = WritableUtils.decodeVIntSize(firstByte);
if (vintWidth != 1) {
vint = getBigVint(vintWidth, firstByte, bb.array(), bb.arrayOffset());
}
// Skip over row.
int offset = vint + vintWidth;
firstByte = bb.get(offset);
vint = firstByte;
vintWidth = WritableUtils.decodeVIntSize(firstByte);
if (vintWidth != 1) { if (vintWidth != 1) {
vint = getBigVint(vintWidth, firstByte, bb.array(), vint = getBigVint(vintWidth, firstByte, bb.array(),
bb.arrayOffset() + offset); bb.arrayOffset() + offset);
} }
// Skip over column return vint + vintWidth + offset;
offset += (vint + vintWidth);
return bb.getLong(offset);
}
/**
* RawComparator for plain -- i.e. non-catalog table keys such as
* -ROOT- and .META. -- HStoreKeys. Compares at byte level.
*/
public static class StoreKeyByteComparator implements RawComparator<byte []> {
public StoreKeyByteComparator() {
super();
}
public int compare(final byte[] b1, final byte[] b2) {
return compare(b1, 0, b1.length, b2, 0, b2.length);
}
public int compare(final byte [] b1, int o1, int l1,
final byte [] b2, int o2, int l2) {
// Below is byte compare without creating new objects. Its awkward but
// seems no way around getting vint width, value, and compare result any
// other way. The passed byte arrays, b1 and b2, have a vint, row, vint,
// column, timestamp in them. The byte array was written by the
// #write(DataOutputStream) method above. See it to better understand the
// below.
// Calculate vint and vint width for rows in b1 and b2.
byte firstByte1 = b1[o1];
int vint1 = firstByte1;
int vintWidth1 = WritableUtils.decodeVIntSize(firstByte1);
if (vintWidth1 != 1) {
vint1 = getBigVint(vintWidth1, firstByte1, b1, o1);
}
byte firstByte2 = b2[o2];
int vint2 = firstByte2;
int vintWidth2 = WritableUtils.decodeVIntSize(firstByte2);
if (vintWidth2 != 1) {
vint2 = getBigVint(vintWidth2, firstByte2, b2, o2);
}
// Compare the rows.
int result = WritableComparator.compareBytes(b1, o1 + vintWidth1, vint1,
b2, o2 + vintWidth2, vint2);
if (result != 0) {
return result;
}
// Update offsets and lengths so we are aligned on columns.
int diff1 = vintWidth1 + vint1;
o1 += diff1;
l1 -= diff1;
int diff2 = vintWidth2 + vint2;
o2 += diff2;
l2 -= diff2;
// Calculate vint and vint width for columns in b1 and b2.
firstByte1 = b1[o1];
vint1 = firstByte1;
vintWidth1 = WritableUtils.decodeVIntSize(firstByte1);
if (vintWidth1 != 1) {
vint1 = getBigVint(vintWidth1, firstByte1, b1, o1);
}
firstByte2 = b2[o2];
vint2 = firstByte2;
vintWidth2 = WritableUtils.decodeVIntSize(firstByte2);
if (vintWidth2 != 1) {
vint2 = getBigVint(vintWidth2, firstByte2, b2, o2);
}
// Compare columns.
result = WritableComparator.compareBytes(b1, o1 + vintWidth1, vint1,
b2, o2 + vintWidth2, vint2);
if (result != 0) {
return result;
}
// Update offsets and lengths.
diff1 = vintWidth1 + vint1;
o1 += diff1;
l1 -= diff1;
diff2 = vintWidth2 + vint2;
o2 += diff2;
l2 -= diff2;
// The below older timestamps sorting ahead of newer timestamps looks
// wrong but it is intentional. This way, newer timestamps are first
// found when we iterate over a memcache and newer versions are the
// first we trip over when reading from a store file.
for (int i = 0; i < l1; i++) {
int leftb = b1[o1 + i] & 0xff;
int rightb = b2[o2 + i] & 0xff;
if (leftb < rightb) {
return 1;
} else if (leftb > rightb) {
return -1;
}
}
return 0;
}
} }
/* /*
@ -858,31 +680,7 @@ public class HStoreKey implements WritableComparable<HStoreKey>, HeapSize {
*/ */
public static HStoreKey create(final ByteBuffer bb) public static HStoreKey create(final ByteBuffer bb)
throws IOException { throws IOException {
byte firstByte = bb.get(0); return HStoreKey.create(bb.array(), bb.arrayOffset(), bb.limit());
int vint = firstByte;
int vintWidth = WritableUtils.decodeVIntSize(firstByte);
if (vintWidth != 1) {
vint = getBigVint(vintWidth, firstByte, bb.array(), bb.arrayOffset());
}
byte [] row = new byte [vint];
System.arraycopy(bb.array(), bb.arrayOffset() + vintWidth,
row, 0, row.length);
// Skip over row.
int offset = vint + vintWidth;
firstByte = bb.get(offset);
vint = firstByte;
vintWidth = WritableUtils.decodeVIntSize(firstByte);
if (vintWidth != 1) {
vint = getBigVint(vintWidth, firstByte, bb.array(),
bb.arrayOffset() + offset);
}
byte [] column = new byte [vint];
System.arraycopy(bb.array(), bb.arrayOffset() + offset + vintWidth,
column, 0, column.length);
// Skip over column
offset += (vint + vintWidth);
long ts = bb.getLong(offset);
return new HStoreKey(row, column, ts);
} }
/** /**
@ -907,6 +705,382 @@ public class HStoreKey implements WritableComparable<HStoreKey>, HeapSize {
public static HStoreKey create(final byte [] b, final int offset, public static HStoreKey create(final byte [] b, final int offset,
final int length) final int length)
throws IOException { throws IOException {
return (HStoreKey)Writables.getWritable(b, offset, length, new HStoreKey()); byte firstByte = b[offset];
int vint = firstByte;
int vintWidth = WritableUtils.decodeVIntSize(firstByte);
if (vintWidth != 1) {
vint = getBigVint(vintWidth, firstByte, b, offset);
}
byte [] row = new byte [vint];
System.arraycopy(b, offset + vintWidth,
row, 0, row.length);
// Skip over row.
int extraOffset = vint + vintWidth;
firstByte = b[offset + extraOffset];
vint = firstByte;
vintWidth = WritableUtils.decodeVIntSize(firstByte);
if (vintWidth != 1) {
vint = getBigVint(vintWidth, firstByte, b, offset + extraOffset);
}
byte [] column = new byte [vint];
System.arraycopy(b, offset + extraOffset + vintWidth,
column, 0, column.length);
// Skip over column
extraOffset += (vint + vintWidth);
return new HStoreKey(row, column, Bytes.toLong(b, offset + extraOffset));
}
/**
* Passed as comparator for memcache and for store files. See HBASE-868.
* Use this comparing keys in the -ROOT_ table.
*/
public static class HStoreKeyRootComparator extends HStoreKeyMetaComparator {
protected int compareRows(byte [] left, int loffset, int llength,
byte [] right, int roffset, int rlength) {
return compareRootRows(left, loffset, llength, right, roffset, rlength);
}
}
/**
* Passed as comparator for memcache and for store files. See HBASE-868.
* Use this comprator for keys in the .META. table.
*/
public static class HStoreKeyMetaComparator extends HStoreKeyComparator {
protected int compareRows(byte [] left, int loffset, int llength,
byte [] right, int roffset, int rlength) {
return compareMetaRows(left, loffset, llength, right, roffset, rlength);
}
}
/**
* Passed as comparator for memcache and for store files. See HBASE-868.
*/
public static class HStoreKeyComparator extends WritableComparator {
public HStoreKeyComparator() {
super(HStoreKey.class);
}
@SuppressWarnings("unchecked")
public int compare(final WritableComparable l,
final WritableComparable r) {
HStoreKey left = (HStoreKey)l;
HStoreKey right = (HStoreKey)r;
// We can be passed null
if (left == null && right == null) return 0;
if (left == null) return -1;
if (right == null) return 1;
byte [] lrow = left.getRow();
byte [] rrow = right.getRow();
int result = compareRows(lrow, 0, lrow.length, rrow, 0, rrow.length);
if (result != 0) {
return result;
}
result = left.getColumn() == null && right.getColumn() == null? 0:
left.getColumn() == null && right.getColumn() != null? -1:
left.getColumn() != null && right.getColumn() == null? 1:
Bytes.compareTo(left.getColumn(), right.getColumn());
if (result != 0) {
return result;
}
// The below older timestamps sorting ahead of newer timestamps looks
// wrong but it is intentional. This way, newer timestamps are first
// found when we iterate over a memcache and newer versions are the
// first we trip over when reading from a store file.
if (left.getTimestamp() < right.getTimestamp()) {
result = 1;
} else if (left.getTimestamp() > right.getTimestamp()) {
result = -1;
}
return result;
}
protected int compareRows(final byte [] left, final int loffset,
final int llength, final byte [] right, final int roffset,
final int rlength) {
return Bytes.compareTo(left, loffset, llength, right, roffset, rlength);
}
}
/**
* StoreKeyComparator for the -ROOT- table.
*/
public static class RootStoreKeyComparator
extends MetaStoreKeyComparator {
public int compareRows(byte [] left, int loffset, int llength,
byte [] right, int roffset, int rlength) {
return compareRootRows(left, loffset, llength, right, roffset, rlength);
}
}
/**
* StoreKeyComparator for the .META. table.
*/
public static class MetaStoreKeyComparator extends StoreKeyComparator {
public int compareRows(byte [] left, int loffset, int llength,
byte [] right, int roffset, int rlength) {
return compareMetaRows(left, loffset, llength, right, roffset, rlength);
}
}
/*
* @param left
* @param loffset
* @param llength
* @param right
* @param roffset
* @param rlength
* @return Result of comparing two rows from the -ROOT- table both of which
* are of the form .META.,(TABLE,REGIONNAME,REGIONID),REGIONID.
*/
protected static int compareRootRows(byte [] left, int loffset, int llength,
byte [] right, int roffset, int rlength) {
// Rows look like this: .META.,ROW_FROM_META,RID
// System.out.println("ROOT " + Bytes.toString(left, loffset, llength) +
// "---" + Bytes.toString(right, roffset, rlength));
int lmetaOffsetPlusDelimiter = loffset + 7; // '.META.,'
int leftFarDelimiter = getDelimiterInReverse(left, lmetaOffsetPlusDelimiter,
llength - lmetaOffsetPlusDelimiter, HRegionInfo.DELIMITER);
int rmetaOffsetPlusDelimiter = roffset + 7; // '.META.,'
int rightFarDelimiter = getDelimiterInReverse(right,
rmetaOffsetPlusDelimiter, rlength - rmetaOffsetPlusDelimiter,
HRegionInfo.DELIMITER);
if (leftFarDelimiter < 0 && rightFarDelimiter >= 0) {
// Nothing between .META. and regionid. Its first key.
return -1;
} else if (rightFarDelimiter < 0 && leftFarDelimiter >= 0) {
return 1;
} else if (leftFarDelimiter < 0 && rightFarDelimiter < 0) {
return 0;
}
int result = compareMetaRows(left, lmetaOffsetPlusDelimiter,
leftFarDelimiter - lmetaOffsetPlusDelimiter,
right, rmetaOffsetPlusDelimiter,
rightFarDelimiter - rmetaOffsetPlusDelimiter);
if (result != 0) {
return result;
}
// Compare last part of row, the rowid.
leftFarDelimiter++;
rightFarDelimiter++;
result = compareRowid(left, leftFarDelimiter, llength - leftFarDelimiter,
right, rightFarDelimiter, rlength - rightFarDelimiter);
return result;
}
/*
* @param left
* @param loffset
* @param llength
* @param right
* @param roffset
* @param rlength
* @return Result of comparing two rows from the .META. table both of which
* are of the form TABLE,REGIONNAME,REGIONID.
*/
protected static int compareMetaRows(final byte[] left, final int loffset,
final int llength, final byte[] right, final int roffset,
final int rlength) {
// System.out.println("META " + Bytes.toString(left, loffset, llength) +
// "---" + Bytes.toString(right, roffset, rlength));
int leftDelimiter = getDelimiter(left, loffset, llength,
HRegionInfo.DELIMITER);
int rightDelimiter = getDelimiter(right, roffset, rlength,
HRegionInfo.DELIMITER);
if (leftDelimiter < 0 && rightDelimiter >= 0) {
// Nothing between .META. and regionid. Its first key.
return -1;
} else if (rightDelimiter < 0 && leftDelimiter >= 0) {
return 1;
} else if (leftDelimiter < 0 && rightDelimiter < 0) {
return 0;
}
// Compare up to the delimiter
int result = Bytes.compareTo(left, loffset, leftDelimiter - loffset,
right, roffset, rightDelimiter - roffset);
if (result != 0) {
return result;
}
// Compare middle bit of the row.
// Move past delimiter
leftDelimiter++;
rightDelimiter++;
int leftFarDelimiter = getRequiredDelimiterInReverse(left, leftDelimiter,
llength - (leftDelimiter - loffset), HRegionInfo.DELIMITER);
int rightFarDelimiter = getRequiredDelimiterInReverse(right,
rightDelimiter, rlength - (rightDelimiter - roffset),
HRegionInfo.DELIMITER);
// Now compare middlesection of row.
result = Bytes.compareTo(left, leftDelimiter,
leftFarDelimiter - leftDelimiter, right, rightDelimiter,
rightFarDelimiter - rightDelimiter);
if (result != 0) {
return result;
}
// Compare last part of row, the rowid.
leftFarDelimiter++;
rightFarDelimiter++;
result = compareRowid(left, leftFarDelimiter,
llength - (leftFarDelimiter - loffset),
right, rightFarDelimiter, rlength - (rightFarDelimiter - roffset));
return result;
}
private static int compareRowid(byte[] left, int loffset, int llength,
byte[] right, int roffset, int rlength) {
return Bytes.compareTo(left, loffset, llength, right, roffset, rlength);
}
/**
* RawComparator for plain -- i.e. non-catalog table keys such as
* -ROOT- and .META. -- HStoreKeys. Compares at byte level. Knows how to
* handle the vints that introduce row and columns in the HSK byte array
* representation. Adds
* {@link #compareRows(byte[], int, int, byte[], int, int)} to
* {@link RawComparator}
*/
public static class StoreKeyComparator implements RawComparator<byte []> {
public StoreKeyComparator() {
super();
}
public int compare(final byte[] b1, final byte[] b2) {
return compare(b1, 0, b1.length, b2, 0, b2.length);
}
public int compare(final byte [] b1, int o1, int l1,
final byte [] b2, int o2, int l2) {
// Below is byte compare without creating new objects. Its awkward but
// seems no way around getting vint width, value, and compare result any
// other way. The passed byte arrays, b1 and b2, have a vint, row, vint,
// column, timestamp in them. The byte array was written by the
// #write(DataOutputStream) method above. See it to better understand the
// below.
// Calculate vint and vint width for rows in b1 and b2.
byte firstByte1 = b1[o1];
int vint1 = firstByte1;
int vintWidth1 = WritableUtils.decodeVIntSize(firstByte1);
if (vintWidth1 != 1) {
vint1 = getBigVint(vintWidth1, firstByte1, b1, o1);
}
byte firstByte2 = b2[o2];
int vint2 = firstByte2;
int vintWidth2 = WritableUtils.decodeVIntSize(firstByte2);
if (vintWidth2 != 1) {
vint2 = getBigVint(vintWidth2, firstByte2, b2, o2);
}
// Compare the rows.
int result = compareRows(b1, o1 + vintWidth1, vint1,
b2, o2 + vintWidth2, vint2);
if (result != 0) {
return result;
}
// Update offsets and lengths so we are aligned on columns.
int diff1 = vintWidth1 + vint1;
o1 += diff1;
l1 -= diff1;
int diff2 = vintWidth2 + vint2;
o2 += diff2;
l2 -= diff2;
// Calculate vint and vint width for columns in b1 and b2.
firstByte1 = b1[o1];
vint1 = firstByte1;
vintWidth1 = WritableUtils.decodeVIntSize(firstByte1);
if (vintWidth1 != 1) {
vint1 = getBigVint(vintWidth1, firstByte1, b1, o1);
}
firstByte2 = b2[o2];
vint2 = firstByte2;
vintWidth2 = WritableUtils.decodeVIntSize(firstByte2);
if (vintWidth2 != 1) {
vint2 = getBigVint(vintWidth2, firstByte2, b2, o2);
}
// Compare columns.
// System.out.println("COL <" + Bytes.toString(b1, o1 + vintWidth1, vint1) +
// "> <" + Bytes.toString(b2, o2 + vintWidth2, vint2) + ">");
result = Bytes.compareTo(b1, o1 + vintWidth1, vint1,
b2, o2 + vintWidth2, vint2);
if (result != 0) {
return result;
}
// Update offsets and lengths.
diff1 = vintWidth1 + vint1;
o1 += diff1;
l1 -= diff1;
diff2 = vintWidth2 + vint2;
o2 += diff2;
l2 -= diff2;
// The below older timestamps sorting ahead of newer timestamps looks
// wrong but it is intentional. This way, newer timestamps are first
// found when we iterate over a memcache and newer versions are the
// first we trip over when reading from a store file.
for (int i = 0; i < l1; i++) {
int leftb = b1[o1 + i] & 0xff;
int rightb = b2[o2 + i] & 0xff;
if (leftb < rightb) {
return 1;
} else if (leftb > rightb) {
return -1;
}
}
return 0;
}
/**
* @param left
* @param right
* @return Result comparing rows.
*/
public int compareRows(final byte [] left, final byte [] right) {
return compareRows(left, 0, left.length, right, 0, right.length);
}
/**
* @param left
* @param loffset
* @param llength
* @param right
* @param roffset
* @param rlength
* @return Result comparing rows.
*/
public int compareRows(final byte [] left, final int loffset,
final int llength, final byte [] right, final int roffset, final int rlength) {
return Bytes.compareTo(left, loffset, llength, right, roffset, rlength);
}
}
/**
* @param hri
* @return Compatible comparator
*/
public static WritableComparator getWritableComparator(final HRegionInfo hri) {
return hri.isRootRegion()?
new HStoreKey.HStoreKeyRootComparator(): hri.isMetaRegion()?
new HStoreKey.HStoreKeyMetaComparator():
new HStoreKey.HStoreKeyComparator();
}
/**
* @param hri
* @return Compatible raw comparator
*/
public static StoreKeyComparator getRawComparator(final HRegionInfo hri) {
return hri.isRootRegion()? ROOT_COMPARATOR:
hri.isMetaRegion()? META_COMPARATOR: META_COMPARATOR;
}
/**
* @param tablename
* @return Compatible raw comparator
*/
public static HStoreKey.StoreKeyComparator getComparator(final byte [] tablename) {
return Bytes.equals(HTableDescriptor.ROOT_TABLEDESC.getName(), tablename)?
ROOT_COMPARATOR:
(Bytes.equals(HTableDescriptor.META_TABLEDESC.getName(),tablename))?
META_COMPARATOR: PLAIN_COMPARATOR;
} }
} }

View File

@ -31,6 +31,7 @@ import java.util.Map;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.client.tableindexed.IndexSpecification; import org.apache.hadoop.hbase.client.tableindexed.IndexSpecification;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.hfile.Compression;
import org.apache.hadoop.hbase.rest.exception.HBaseRestException; import org.apache.hadoop.hbase.rest.exception.HBaseRestException;
import org.apache.hadoop.hbase.rest.serializer.IRestSerializer; import org.apache.hadoop.hbase.rest.serializer.IRestSerializer;
import org.apache.hadoop.hbase.rest.serializer.ISerializable; import org.apache.hadoop.hbase.rest.serializer.ISerializable;
@ -667,7 +668,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor>, I
HConstants.ROOT_TABLE_NAME, HConstants.ROOT_TABLE_NAME,
new HColumnDescriptor[] { new HColumnDescriptor(HConstants.COLUMN_FAMILY, new HColumnDescriptor[] { new HColumnDescriptor(HConstants.COLUMN_FAMILY,
10, // Ten is arbitrary number. Keep versions to help debuggging. 10, // Ten is arbitrary number. Keep versions to help debuggging.
HColumnDescriptor.CompressionType.NONE, false, true, Compression.Algorithm.NONE.getName(), false, true, 8 * 1024,
Integer.MAX_VALUE, HConstants.FOREVER, false) }); Integer.MAX_VALUE, HConstants.FOREVER, false) });
/** Table descriptor for <code>.META.</code> catalog table */ /** Table descriptor for <code>.META.</code> catalog table */
@ -675,11 +676,12 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor>, I
HConstants.META_TABLE_NAME, new HColumnDescriptor[] { HConstants.META_TABLE_NAME, new HColumnDescriptor[] {
new HColumnDescriptor(HConstants.COLUMN_FAMILY, new HColumnDescriptor(HConstants.COLUMN_FAMILY,
10, // Ten is arbitrary number. Keep versions to help debuggging. 10, // Ten is arbitrary number. Keep versions to help debuggging.
HColumnDescriptor.CompressionType.NONE, false, true, Compression.Algorithm.NONE.getName(), false, true, 8 * 1024,
Integer.MAX_VALUE, HConstants.FOREVER, false), Integer.MAX_VALUE, HConstants.FOREVER, false),
new HColumnDescriptor(HConstants.COLUMN_FAMILY_HISTORIAN, new HColumnDescriptor(HConstants.COLUMN_FAMILY_HISTORIAN,
HConstants.ALL_VERSIONS, HColumnDescriptor.CompressionType.NONE, HConstants.ALL_VERSIONS, Compression.Algorithm.NONE.getName(),
false, false, Integer.MAX_VALUE, HConstants.WEEK_IN_SECONDS, false)}); false, false, 8 * 1024,
Integer.MAX_VALUE, HConstants.WEEK_IN_SECONDS, false)});
/* (non-Javadoc) /* (non-Javadoc)
* @see org.apache.hadoop.hbase.rest.xml.IOutputXML#toXML() * @see org.apache.hadoop.hbase.rest.xml.IOutputXML#toXML()

View File

@ -65,6 +65,7 @@ import org.apache.hadoop.ipc.RemoteException;
* Used by {@link HTable} and {@link HBaseAdmin} * Used by {@link HTable} and {@link HBaseAdmin}
*/ */
public class HConnectionManager implements HConstants { public class HConnectionManager implements HConstants {
/* /*
* Not instantiable. * Not instantiable.
*/ */
@ -646,7 +647,7 @@ public class HConnectionManager implements HConstants {
// signifying that the region we're checking is actually the last // signifying that the region we're checking is actually the last
// region in the table. // region in the table.
if (HStoreKey.equalsTwoRowKeys(endKey, HConstants.EMPTY_END_ROW) || if (HStoreKey.equalsTwoRowKeys(endKey, HConstants.EMPTY_END_ROW) ||
HStoreKey.compareTwoRowKeys(endKey, row) > 0) { HStoreKey.getComparator(tableName).compareRows(endKey, row) > 0) {
return possibleRegion; return possibleRegion;
} }
} }
@ -683,7 +684,7 @@ public class HConnectionManager implements HConstants {
// by nature of the map, we know that the start key has to be < // by nature of the map, we know that the start key has to be <
// otherwise it wouldn't be in the headMap. // otherwise it wouldn't be in the headMap.
if (HStoreKey.compareTwoRowKeys(endKey, row) <= 0) { if (HStoreKey.getComparator(tableName).compareRows(endKey, row) <= 0) {
// delete any matching entry // delete any matching entry
HRegionLocation rl = HRegionLocation rl =
tableLocations.remove(matchingRegions.lastKey()); tableLocations.remove(matchingRegions.lastKey());
@ -1023,4 +1024,4 @@ public class HConnectionManager implements HConstants {
} }
} }
} }
} }

View File

@ -303,7 +303,7 @@ public class HTable {
* @return value for specified row/column * @return value for specified row/column
* @throws IOException * @throws IOException
*/ */
public Cell[] get(final String row, final String column, int numVersions) public Cell [] get(final String row, final String column, int numVersions)
throws IOException { throws IOException {
return get(Bytes.toBytes(row), Bytes.toBytes(column), numVersions); return get(Bytes.toBytes(row), Bytes.toBytes(column), numVersions);
} }
@ -337,7 +337,7 @@ public class HTable {
* @return Array of Cells. * @return Array of Cells.
* @throws IOException * @throws IOException
*/ */
public Cell[] get(final byte [] row, final byte [] column, public Cell [] get(final byte [] row, final byte [] column,
final int numVersions) final int numVersions)
throws IOException { throws IOException {
return connection.getRegionServerWithRetries( return connection.getRegionServerWithRetries(
@ -1276,11 +1276,12 @@ public class HTable {
if (rl != null) { if (rl != null) {
lockId = rl.getLockId(); lockId = rl.getLockId();
} }
return server.exists(location.getRegionInfo().getRegionName(), row, return Boolean.valueOf(server.
column, timestamp, lockId); exists(location.getRegionInfo().getRegionName(), row,
column, timestamp, lockId));
} }
} }
); ).booleanValue();
} }
/** /**

View File

@ -7,6 +7,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HStoreKey; import org.apache.hadoop.hbase.HStoreKey;
import org.apache.hadoop.hbase.io.RowResult; import org.apache.hadoop.hbase.io.RowResult;
import org.apache.hadoop.hbase.util.Bytes;
/** /**
* Scanner class that contains the <code>.META.</code> table scanning logic * Scanner class that contains the <code>.META.</code> table scanning logic
@ -69,7 +70,7 @@ class MetaScanner implements HConstants {
callable.setClose(); callable.setClose();
connection.getRegionServerWithRetries(callable); connection.getRegionServerWithRetries(callable);
} }
} while (HStoreKey.compareTwoRowKeys(startRow, LAST_ROW) != 0); } while (Bytes.compareTo(startRow, LAST_ROW) != 0);
} }
/** /**

View File

@ -47,9 +47,9 @@ public class RetriesExhaustedException extends IOException {
StringBuilder buffer = new StringBuilder("Trying to contact region server "); StringBuilder buffer = new StringBuilder("Trying to contact region server ");
buffer.append(serverName); buffer.append(serverName);
buffer.append(" for region "); buffer.append(" for region ");
buffer.append(Bytes.toString(regionName)); buffer.append(regionName == null? "": Bytes.toString(regionName));
buffer.append(", row '"); buffer.append(", row '");
buffer.append(Bytes.toString(row)); buffer.append(row == null? "": Bytes.toString(row));
buffer.append("', but failed after "); buffer.append("', but failed after ");
buffer.append(numTries + 1); buffer.append(numTries + 1);
buffer.append(" attempts.\nExceptions:\n"); buffer.append(" attempts.\nExceptions:\n");

View File

@ -76,7 +76,7 @@ public class ScannerCallable extends ServerCallable<RowResult[]> {
// open the scanner // open the scanner
scannerId = openScanner(); scannerId = openScanner();
} else { } else {
RowResult[] rrs = server.next(scannerId, caching); RowResult [] rrs = server.next(scannerId, caching);
return rrs.length == 0 ? null : rrs; return rrs.length == 0 ? null : rrs;
} }
return null; return null;

View File

@ -1,6 +1,7 @@
package org.apache.hadoop.hbase.client; package org.apache.hadoop.hbase.client;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.io.hfile.Compression;
/** /**
* Immutable HColumnDescriptor * Immutable HColumnDescriptor
@ -50,7 +51,7 @@ public class UnmodifyableHColumnDescriptor extends HColumnDescriptor {
} }
@Override @Override
public void setCompressionType(CompressionType type) { public void setCompressionType(Compression.Algorithm type) {
throw new UnsupportedOperationException("HColumnDescriptor is read-only"); throw new UnsupportedOperationException("HColumnDescriptor is read-only");
} }
@ -58,4 +59,4 @@ public class UnmodifyableHColumnDescriptor extends HColumnDescriptor {
public void setMapFileIndexInterval(int interval) { public void setMapFileIndexInterval(int interval) {
throw new UnsupportedOperationException("HTableDescriptor is read-only"); throw new UnsupportedOperationException("HTableDescriptor is read-only");
} }
} }

View File

@ -55,6 +55,7 @@ public class Cell implements Writable, Iterable<Map.Entry<Long, byte[]>>,
/** For Writable compatibility */ /** For Writable compatibility */
public Cell() { public Cell() {
super();
} }
/** /**
@ -93,7 +94,7 @@ public class Cell implements Writable, Iterable<Map.Entry<Long, byte[]>>,
* @param ts * @param ts
* array of timestamps * array of timestamps
*/ */
public Cell(String[] vals, long[] ts) { public Cell(String [] vals, long[] ts) {
this(Bytes.toByteArrays(vals), ts); this(Bytes.toByteArrays(vals), ts);
} }
@ -235,4 +236,4 @@ public class Cell implements Writable, Iterable<Map.Entry<Long, byte[]>>,
throws HBaseRestException { throws HBaseRestException {
serializer.serializeCell(this); serializer.serializeCell(this);
} }
} }

View File

@ -72,10 +72,10 @@ public class HBaseMapFile extends MapFile {
public HBaseReader(FileSystem fs, String dirName, Configuration conf, public HBaseReader(FileSystem fs, String dirName, Configuration conf,
boolean blockCacheEnabled, HRegionInfo hri) boolean blockCacheEnabled, HRegionInfo hri)
throws IOException { throws IOException {
super(fs, dirName, new HStoreKey.HStoreKeyWritableComparator(), super(fs, dirName, new HStoreKey.HStoreKeyComparator(),
conf, false); // defer opening streams conf, false); // defer opening streams
this.blockCacheEnabled = blockCacheEnabled; this.blockCacheEnabled = blockCacheEnabled;
open(fs, dirName, new HStoreKey.HStoreKeyWritableComparator(), conf); open(fs, dirName, new HStoreKey.HStoreKeyComparator(), conf);
// Force reading of the mapfile index by calling midKey. Reading the // Force reading of the mapfile index by calling midKey. Reading the
// index will bring the index into memory over here on the client and // index will bring the index into memory over here on the client and
@ -121,7 +121,7 @@ public class HBaseMapFile extends MapFile {
public HBaseWriter(Configuration conf, FileSystem fs, String dirName, public HBaseWriter(Configuration conf, FileSystem fs, String dirName,
SequenceFile.CompressionType compression, final HRegionInfo hri) SequenceFile.CompressionType compression, final HRegionInfo hri)
throws IOException { throws IOException {
super(conf, fs, dirName, new HStoreKey.HStoreKeyWritableComparator(), super(conf, fs, dirName, new HStoreKey.HStoreKeyComparator(),
VALUE_CLASS, compression); VALUE_CLASS, compression);
// Default for mapfiles is 128. Makes random reads faster if we // Default for mapfiles is 128. Makes random reads faster if we
// have more keys indexed and we're not 'next'-ing around in the // have more keys indexed and we're not 'next'-ing around in the

View File

@ -73,6 +73,10 @@ public class HalfHFileReader extends HFile.Reader {
this.top = Reference.isTopFileRegion(r.getFileRegion()); this.top = Reference.isTopFileRegion(r.getFileRegion());
} }
protected boolean isTop() {
return this.top;
}
public HFileScanner getScanner() { public HFileScanner getScanner() {
final HFileScanner s = super.getScanner(); final HFileScanner s = super.getScanner();
return new HFileScanner() { return new HFileScanner() {

View File

@ -46,25 +46,9 @@ import org.apache.hadoop.util.ReflectionUtils;
* @param <V> value Expects a Writable or byte []. * @param <V> value Expects a Writable or byte [].
*/ */
public class HbaseMapWritable <K,V> public class HbaseMapWritable <K,V>
implements SortedMap<byte[],V>, Configurable, Writable, implements SortedMap<byte[],V>, Configurable, Writable, CodeToClassAndBack{
CodeToClassAndBack{
private AtomicReference<Configuration> conf = null; private AtomicReference<Configuration> conf = null;
protected SortedMap<byte [], V> instance = null; protected SortedMap<byte [], V> instance = null;
// Static maps of code to class and vice versa. Includes types used in hbase
// only. These maps are now initialized in a static loader interface instead
// of in a static contructor for this class, this is done so that it is
// possible to have a regular contructor here, so that different params can
// be used.
// Removed the old types like Text from the maps, if needed to add more types
// this can be done in the StaticHBaseMapWritableLoader interface. Only
// byte[] and Cell are supported now.
// static final Map<Byte, Class<?>> CODE_TO_CLASS =
// new HashMap<Byte, Class<?>>();
// static final Map<Class<?>, Byte> CLASS_TO_CODE =
// new HashMap<Class<?>, Byte>();
/** /**
* The default contructor where a TreeMap is used * The default contructor where a TreeMap is used
@ -73,11 +57,11 @@ implements SortedMap<byte[],V>, Configurable, Writable,
this (new TreeMap<byte [], V>(Bytes.BYTES_COMPARATOR)); this (new TreeMap<byte [], V>(Bytes.BYTES_COMPARATOR));
} }
/** /**
* Contructor where another SortedMap can be used * Contructor where another SortedMap can be used
* *
* @param map the SortedMap to be used * @param map the SortedMap to be used
**/ */
public HbaseMapWritable(SortedMap<byte[], V> map){ public HbaseMapWritable(SortedMap<byte[], V> map){
conf = new AtomicReference<Configuration>(); conf = new AtomicReference<Configuration>();
instance = map; instance = map;
@ -233,4 +217,4 @@ implements SortedMap<byte[],V>, Configurable, Writable,
this.instance.put(key, value); this.instance.put(key, value);
} }
} }
} }

View File

@ -78,13 +78,13 @@ public class RowResult implements Writable, SortedMap<byte [], Cell>, ISerializa
// Map interface // Map interface
// //
public Cell put(byte [] key, public Cell put(@SuppressWarnings("unused") byte [] key,
Cell value) { @SuppressWarnings("unused") Cell value) {
throw new UnsupportedOperationException("RowResult is read-only!"); throw new UnsupportedOperationException("RowResult is read-only!");
} }
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
public void putAll(Map map) { public void putAll(@SuppressWarnings("unused") Map map) {
throw new UnsupportedOperationException("RowResult is read-only!"); throw new UnsupportedOperationException("RowResult is read-only!");
} }
@ -92,7 +92,7 @@ public class RowResult implements Writable, SortedMap<byte [], Cell>, ISerializa
return this.cells.get(key); return this.cells.get(key);
} }
public Cell remove(Object key) { public Cell remove(@SuppressWarnings("unused") Object key) {
throw new UnsupportedOperationException("RowResult is read-only!"); throw new UnsupportedOperationException("RowResult is read-only!");
} }
@ -104,7 +104,7 @@ public class RowResult implements Writable, SortedMap<byte [], Cell>, ISerializa
return cells.containsKey(Bytes.toBytes(key)); return cells.containsKey(Bytes.toBytes(key));
} }
public boolean containsValue(Object value) { public boolean containsValue(@SuppressWarnings("unused") Object value) {
throw new UnsupportedOperationException("Don't support containsValue!"); throw new UnsupportedOperationException("Don't support containsValue!");
} }
@ -135,7 +135,7 @@ public class RowResult implements Writable, SortedMap<byte [], Cell>, ISerializa
/** /**
* This method used solely for the REST serialization * This method used solely for the REST serialization
* *
* @return * @return Cells
*/ */
@TOJSON @TOJSON
public RestCell[] getCells() { public RestCell[] getCells() {
@ -211,7 +211,7 @@ public class RowResult implements Writable, SortedMap<byte [], Cell>, ISerializa
this.cell = cell; this.cell = cell;
} }
public Cell setValue(Cell c) { public Cell setValue(@SuppressWarnings("unused") Cell c) {
throw new UnsupportedOperationException("RowResult is read-only!"); throw new UnsupportedOperationException("RowResult is read-only!");
} }

View File

@ -297,7 +297,7 @@ public final class Compression {
} }
} }
static Algorithm getCompressionAlgorithmByName(String compressName) { public static Algorithm getCompressionAlgorithmByName(String compressName) {
Algorithm[] algos = Algorithm.class.getEnumConstants(); Algorithm[] algos = Algorithm.class.getEnumConstants();
for (Algorithm a : algos) { for (Algorithm a : algos) {

View File

@ -141,8 +141,10 @@ public class HFile {
/** /**
* Default compression: none. * Default compression: none.
*/ */
public final static Compression.Algorithm DEFAULT_COMPRESSION_ALGORITHM =
Compression.Algorithm.NONE;
public final static String DEFAULT_COMPRESSION = public final static String DEFAULT_COMPRESSION =
Compression.Algorithm.NONE.getName(); DEFAULT_COMPRESSION_ALGORITHM.getName();
/** /**
* HFile Writer. * HFile Writer.
@ -216,7 +218,7 @@ public class HFile {
*/ */
public Writer(FileSystem fs, Path path) public Writer(FileSystem fs, Path path)
throws IOException { throws IOException {
this(fs, path, DEFAULT_BLOCKSIZE, null, null); this(fs, path, DEFAULT_BLOCKSIZE, null, null, false);
} }
/** /**
@ -226,12 +228,35 @@ public class HFile {
* @param blocksize * @param blocksize
* @param compress * @param compress
* @param comparator * @param comparator
* @param bloomfilter
* @throws IOException
* @throws IOException * @throws IOException
*/ */
public Writer(FileSystem fs, Path path, int blocksize, String compress, public Writer(FileSystem fs, Path path, int blocksize,
final RawComparator<byte []> comparator) String compress, final RawComparator<byte []> comparator)
throws IOException { throws IOException {
this(fs.create(path), blocksize, compress, comparator); this(fs, path, blocksize,
compress == null? DEFAULT_COMPRESSION_ALGORITHM:
Compression.getCompressionAlgorithmByName(compress),
comparator, false);
}
/**
* Constructor that takes a Path.
* @param fs
* @param path
* @param blocksize
* @param compress
* @param comparator
* @param bloomfilter
* @throws IOException
*/
public Writer(FileSystem fs, Path path, int blocksize,
Compression.Algorithm compress,
final RawComparator<byte []> comparator,
final boolean bloomfilter)
throws IOException {
this(fs.create(path), blocksize, compress, comparator, bloomfilter);
this.closeOutputStream = true; this.closeOutputStream = true;
this.name = path.toString(); this.name = path.toString();
this.path = path; this.path = path;
@ -243,19 +268,38 @@ public class HFile {
* @param blocksize * @param blocksize
* @param compress * @param compress
* @param c * @param c
* @param bloomfilter
* @throws IOException * @throws IOException
*/ */
public Writer(final FSDataOutputStream ostream, final int blocksize, public Writer(final FSDataOutputStream ostream, final int blocksize,
final String compress, final RawComparator<byte []> c) final String compress, final RawComparator<byte []> c)
throws IOException {
this(ostream, blocksize,
compress == null? DEFAULT_COMPRESSION_ALGORITHM:
Compression.getCompressionAlgorithmByName(compress), c, false);
}
/**
* Constructor that takes a stream.
* @param ostream Stream to use.
* @param blocksize
* @param compress
* @param c
* @param bloomfilter
* @throws IOException
*/
public Writer(final FSDataOutputStream ostream, final int blocksize,
final Compression.Algorithm compress,
final RawComparator<byte []> c,
final boolean bloomfilter)
throws IOException { throws IOException {
this.outputStream = ostream; this.outputStream = ostream;
this.closeOutputStream = false; this.closeOutputStream = false;
this.blocksize = blocksize; this.blocksize = blocksize;
this.comparator = c == null? Bytes.BYTES_RAWCOMPARATOR: c; this.comparator = c == null? Bytes.BYTES_RAWCOMPARATOR: c;
this.name = this.outputStream.toString(); this.name = this.outputStream.toString();
this.compressAlgo = this.compressAlgo = compress == null?
Compression.getCompressionAlgorithmByName(compress == null? DEFAULT_COMPRESSION_ALGORITHM: compress;
Compression.Algorithm.NONE.getName(): compress);
} }
/* /*
@ -391,7 +435,7 @@ public class HFile {
/** /**
* Add key/value to file. * Add key/value to file.
* Keys must be added in an order that agrees with the RawComparator passed * Keys must be added in an order that agrees with the Comparator passed
* on construction. * on construction.
* @param key Key to add. Cannot be empty nor null. * @param key Key to add. Cannot be empty nor null.
* @param value Value to add. Cannot be empty nor null. * @param value Value to add. Cannot be empty nor null.
@ -430,7 +474,7 @@ public class HFile {
if (this.lastKey != null) { if (this.lastKey != null) {
if (this.comparator.compare(this.lastKey, key) > 0) { if (this.comparator.compare(this.lastKey, key) > 0) {
throw new IOException("Added a key not lexically larger than" + throw new IOException("Added a key not lexically larger than" +
" previous: key=" + Bytes.toString(key) + ", lastkey=" + " previous key=" + Bytes.toString(key) + ", lastkey=" +
Bytes.toString(lastKey)); Bytes.toString(lastKey));
} }
} }
@ -622,14 +666,22 @@ public class HFile {
public String toString() { public String toString() {
return "reader=" + this.name + return "reader=" + this.name +
(!isFileInfoLoaded()? "": (!isFileInfoLoaded()? "":
", compression=" + this.compressAlgo.getName() + ", compression=" + this.compressAlgo.getName() +
", firstKey=" + Bytes.toString(getFirstKey()) + ", firstKey=" + toStringFirstKey() +
", lastKey=" + Bytes.toString(getLastKey()) + ", lastKey=" + toStringLastKey()) +
", avgKeyLen=" + this.avgKeyLen + ", avgKeyLen=" + this.avgKeyLen +
", avgValueLen=" + this.avgValueLen + ", avgValueLen=" + this.avgValueLen +
", entries=" + this.trailer.entryCount + ", entries=" + this.trailer.entryCount +
", length=" + this.fileSize); ", length=" + this.fileSize;
}
protected String toStringFirstKey() {
return Bytes.toString(getFirstKey());
}
protected String toStringLastKey() {
return Bytes.toString(getFirstKey());
} }
public long length() { public long length() {
@ -661,7 +713,7 @@ public class HFile {
// Read in the metadata index. // Read in the metadata index.
if (trailer.metaIndexCount > 0) { if (trailer.metaIndexCount > 0) {
this.metaIndex = BlockIndex.readIndex(Bytes.BYTES_RAWCOMPARATOR, this.metaIndex = BlockIndex.readIndex(this.comparator,
this.istream, this.trailer.metaIndexOffset, trailer.metaIndexCount); this.istream, this.trailer.metaIndexOffset, trailer.metaIndexCount);
} }
this.fileInfoLoaded = true; this.fileInfoLoaded = true;
@ -679,7 +731,7 @@ public class HFile {
return null; return null;
} }
try { try {
return (RawComparator)Class.forName(clazzName).newInstance(); return (RawComparator<byte[]>) Class.forName(clazzName).newInstance();
} catch (InstantiationException e) { } catch (InstantiationException e) {
throw new IOException(e); throw new IOException(e);
} catch (IllegalAccessException e) { } catch (IllegalAccessException e) {
@ -1014,7 +1066,7 @@ public class HFile {
klen = block.getInt(); klen = block.getInt();
vlen = block.getInt(); vlen = block.getInt();
int comp = this.reader.comparator.compare(key, 0, key.length, int comp = this.reader.comparator.compare(key, 0, key.length,
block.array(), block.arrayOffset() + block.position(), klen); block.array(), block.arrayOffset() + block.position(), klen);
if (comp == 0) { if (comp == 0) {
if (seekBefore) { if (seekBefore) {
block.position(block.position() - lastLen - 16); block.position(block.position() - lastLen - 16);
@ -1035,8 +1087,10 @@ public class HFile {
} }
block.position(block.position() + klen + vlen); block.position(block.position() + klen + vlen);
lastLen = klen + vlen ; lastLen = klen + vlen ;
} while( block.remaining() > 0 ); } while(block.remaining() > 0);
// ok we are at the end, so go back a littleeeeee.... // ok we are at the end, so go back a littleeeeee....
// The 8 in the below is intentionally different to the 16s in the above
// Do the math you you'll figure it.
block.position(block.position() - lastLen - 8); block.position(block.position() - lastLen - 8);
currKeyLen = block.getInt(); currKeyLen = block.getInt();
currValueLen = block.getInt(); currValueLen = block.getInt();
@ -1213,7 +1267,7 @@ public class HFile {
/* Needed doing lookup on blocks. /* Needed doing lookup on blocks.
*/ */
RawComparator<byte []> comparator; final RawComparator<byte []> comparator;
/* /*
* Shutdown default constructor * Shutdown default constructor
@ -1227,7 +1281,7 @@ public class HFile {
* Constructor * Constructor
* @param trailer File tail structure with index stats. * @param trailer File tail structure with index stats.
*/ */
BlockIndex(final RawComparator<byte []> c) { BlockIndex(final RawComparator<byte []>c) {
this.comparator = c; this.comparator = c;
// Guess that cost of three arrays + this object is 4 * 8 bytes. // Guess that cost of three arrays + this object is 4 * 8 bytes.
this.size += (4 * 8); this.size += (4 * 8);

View File

@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.master;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HServerAddress; import org.apache.hadoop.hbase.HServerAddress;
import org.apache.hadoop.hbase.HStoreKey;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
@ -87,7 +86,7 @@ public class MetaRegion implements Comparable<MetaRegion> {
public int compareTo(MetaRegion other) { public int compareTo(MetaRegion other) {
int result = Bytes.compareTo(this.regionName, other.getRegionName()); int result = Bytes.compareTo(this.regionName, other.getRegionName());
if(result == 0) { if(result == 0) {
result = HStoreKey.compareTwoRowKeys(this.startKey, other.getStartKey()); result = Bytes.compareTo(this.startKey, other.getStartKey());
if (result == 0) { if (result == 0) {
// Might be on different host? // Might be on different host?
result = this.server.compareTo(other.server); result = this.server.compareTo(other.server);

View File

@ -125,7 +125,8 @@ public abstract class HAbstractScanner implements InternalScanner {
private boolean multipleMatchers; private boolean multipleMatchers;
/** Constructor for abstract base class */ /** Constructor for abstract base class */
protected HAbstractScanner(long timestamp, byte [][] targetCols) throws IOException { protected HAbstractScanner(long timestamp, byte [][] targetCols)
throws IOException {
this.timestamp = timestamp; this.timestamp = timestamp;
this.wildcardMatch = false; this.wildcardMatch = false;
this.multipleMatchers = false; this.multipleMatchers = false;

View File

@ -1440,11 +1440,10 @@ public class HRegion implements HConstants {
long now = System.currentTimeMillis(); long now = System.currentTimeMillis();
try { try {
for (Store store : stores.values()) { for (Store store : stores.values()) {
List<HStoreKey> keys = List<HStoreKey> keys = store.getKeys(new HStoreKey(row, ts),
store.getKeys(new HStoreKey(row, ts), ALL_VERSIONS, now, null);
ALL_VERSIONS, now, null);
TreeMap<HStoreKey, byte []> edits = new TreeMap<HStoreKey, byte []>( TreeMap<HStoreKey, byte []> edits = new TreeMap<HStoreKey, byte []>(
new HStoreKey.HStoreKeyWritableComparator()); HStoreKey.getWritableComparator(store.getHRegionInfo()));
for (HStoreKey key: keys) { for (HStoreKey key: keys) {
edits.put(key, HLogEdit.DELETED_BYTES); edits.put(key, HLogEdit.DELETED_BYTES);
} }
@ -1474,15 +1473,14 @@ public class HRegion implements HConstants {
long now = System.currentTimeMillis(); long now = System.currentTimeMillis();
try { try {
for (Store store : stores.values()) { for (Store store : stores.values()) {
List<HStoreKey> keys = List<HStoreKey> keys = store.getKeys(new HStoreKey(row, timestamp),
store.getKeys(new HStoreKey(row, timestamp),
ALL_VERSIONS, now, columnPattern); ALL_VERSIONS, now, columnPattern);
TreeMap<HStoreKey, byte []> edits = new TreeMap<HStoreKey, byte []>( TreeMap<HStoreKey, byte []> edits = new TreeMap<HStoreKey, byte []>(
new HStoreKey.HStoreKeyWritableComparator()); HStoreKey.getWritableComparator(store.getHRegionInfo()));
for (HStoreKey key: keys) { for (HStoreKey key: keys) {
edits.put(key, HLogEdit.DELETED_BYTES); edits.put(key, HLogEdit.DELETED_BYTES);
} }
update(edits); update(edits);
} }
} finally { } finally {
if(lockid == null) releaseRowLock(lid); if(lockid == null) releaseRowLock(lid);
@ -1514,7 +1512,7 @@ public class HRegion implements HConstants {
ALL_VERSIONS, now, null); ALL_VERSIONS, now, null);
// delete all the cells // delete all the cells
TreeMap<HStoreKey, byte []> edits = new TreeMap<HStoreKey, byte []>( TreeMap<HStoreKey, byte []> edits = new TreeMap<HStoreKey, byte []>(
new HStoreKey.HStoreKeyWritableComparator()); HStoreKey.getWritableComparator(store.getHRegionInfo()));
for (HStoreKey key: keys) { for (HStoreKey key: keys) {
edits.put(key, HLogEdit.DELETED_BYTES); edits.put(key, HLogEdit.DELETED_BYTES);
} }
@ -1553,7 +1551,7 @@ public class HRegion implements HConstants {
List<HStoreKey> keys = store.getKeys(new HStoreKey(row, timestamp), List<HStoreKey> keys = store.getKeys(new HStoreKey(row, timestamp),
ALL_VERSIONS, now, null); ALL_VERSIONS, now, null);
TreeMap<HStoreKey, byte []> edits = new TreeMap<HStoreKey, byte []>( TreeMap<HStoreKey, byte []> edits = new TreeMap<HStoreKey, byte []>(
new HStoreKey.HStoreKeyWritableComparator()); HStoreKey.getWritableComparator(store.getHRegionInfo()));
for (HStoreKey key: keys) { for (HStoreKey key: keys) {
edits.put(key, HLogEdit.DELETED_BYTES); edits.put(key, HLogEdit.DELETED_BYTES);
} }
@ -1583,8 +1581,11 @@ public class HRegion implements HConstants {
HStoreKey origin = new HStoreKey(row, column, ts); HStoreKey origin = new HStoreKey(row, column, ts);
Set<HStoreKey> keys = getKeys(origin, versions); Set<HStoreKey> keys = getKeys(origin, versions);
if (keys.size() > 0) { if (keys.size() > 0) {
// I think the below map doesn't have to be exactly storetd. Its deletes
// they don't have to go in in exact sorted order (we don't have to worry
// about the meta or root sort comparator here).
TreeMap<HStoreKey, byte []> edits = new TreeMap<HStoreKey, byte []>( TreeMap<HStoreKey, byte []> edits = new TreeMap<HStoreKey, byte []>(
new HStoreKey.HStoreKeyWritableComparator()); new HStoreKey.HStoreKeyComparator());
for (HStoreKey key: keys) { for (HStoreKey key: keys) {
edits.put(key, HLogEdit.DELETED_BYTES); edits.put(key, HLogEdit.DELETED_BYTES);
} }
@ -1650,8 +1651,10 @@ public class HRegion implements HConstants {
checkReadOnly(); checkReadOnly();
TreeMap<HStoreKey, byte []> targets = this.targetColumns.get(lockid); TreeMap<HStoreKey, byte []> targets = this.targetColumns.get(lockid);
if (targets == null) { if (targets == null) {
targets = new TreeMap<HStoreKey, byte []>( // I think the below map doesn't have to be exactly storetd. Its deletes
new HStoreKey.HStoreKeyWritableComparator()); // they don't have to go in in exact sorted order (we don't have to worry
// about the meta or root sort comparator here).
targets = new TreeMap<HStoreKey, byte []>(new HStoreKey.HStoreKeyComparator());
this.targetColumns.put(lockid, targets); this.targetColumns.put(lockid, targets);
} }
targets.put(key, val); targets.put(key, val);
@ -1948,9 +1951,7 @@ public class HRegion implements HConstants {
this.scanners = new InternalScanner[stores.length]; this.scanners = new InternalScanner[stores.length];
try { try {
for (int i = 0; i < stores.length; i++) { for (int i = 0; i < stores.length; i++) {
// Only pass relevant columns to each store // Only pass relevant columns to each store
List<byte[]> columns = new ArrayList<byte[]>(); List<byte[]> columns = new ArrayList<byte[]>();
for (int j = 0; j < cols.length; j++) { for (int j = 0; j < cols.length; j++) {
if (Bytes.equals(HStoreKey.getFamily(cols[j]), if (Bytes.equals(HStoreKey.getFamily(cols[j]),
@ -2007,8 +2008,8 @@ public class HRegion implements HConstants {
for (int i = 0; i < this.keys.length; i++) { for (int i = 0; i < this.keys.length; i++) {
if (scanners[i] != null && if (scanners[i] != null &&
(chosenRow == null || (chosenRow == null ||
(HStoreKey.compareTwoRowKeys(this.keys[i].getRow(), chosenRow) < 0) || (Bytes.compareTo(this.keys[i].getRow(), chosenRow) < 0) ||
((HStoreKey.compareTwoRowKeys(this.keys[i].getRow(), chosenRow) == 0) && ((Bytes.compareTo(this.keys[i].getRow(), chosenRow) == 0) &&
(keys[i].getTimestamp() > chosenTimestamp)))) { (keys[i].getTimestamp() > chosenTimestamp)))) {
chosenRow = keys[i].getRow(); chosenRow = keys[i].getRow();
chosenTimestamp = keys[i].getTimestamp(); chosenTimestamp = keys[i].getTimestamp();
@ -2025,7 +2026,7 @@ public class HRegion implements HConstants {
for (int i = 0; i < scanners.length; i++) { for (int i = 0; i < scanners.length; i++) {
if (scanners[i] != null && if (scanners[i] != null &&
HStoreKey.compareTwoRowKeys(this.keys[i].getRow(), chosenRow) == 0) { Bytes.compareTo(this.keys[i].getRow(), chosenRow) == 0) {
// NOTE: We used to do results.putAll(resultSets[i]); // NOTE: We used to do results.putAll(resultSets[i]);
// but this had the effect of overwriting newer // but this had the effect of overwriting newer
// values with older ones. So now we only insert // values with older ones. So now we only insert
@ -2047,7 +2048,7 @@ public class HRegion implements HConstants {
// If the current scanner is non-null AND has a lower-or-equal // If the current scanner is non-null AND has a lower-or-equal
// row label, then its timestamp is bad. We need to advance it. // row label, then its timestamp is bad. We need to advance it.
while ((scanners[i] != null) && while ((scanners[i] != null) &&
(HStoreKey.compareTwoRowKeys(this.keys[i].getRow(), chosenRow) <= 0)) { (Bytes.compareTo(this.keys[i].getRow(), chosenRow) <= 0)) {
resultSets[i].clear(); resultSets[i].clear();
if (!scanners[i].next(keys[i], resultSets[i])) { if (!scanners[i].next(keys[i], resultSets[i])) {
closeScanner(i); closeScanner(i);
@ -2228,7 +2229,7 @@ public class HRegion implements HConstants {
HStoreKey key = new HStoreKey(row, COL_REGIONINFO, HStoreKey key = new HStoreKey(row, COL_REGIONINFO,
System.currentTimeMillis()); System.currentTimeMillis());
TreeMap<HStoreKey, byte[]> edits = new TreeMap<HStoreKey, byte[]>( TreeMap<HStoreKey, byte[]> edits = new TreeMap<HStoreKey, byte[]>(
new HStoreKey.HStoreKeyWritableComparator()); HStoreKey.getWritableComparator(r.getRegionInfo()));
edits.put(key, Writables.getBytes(r.getRegionInfo())); edits.put(key, Writables.getBytes(r.getRegionInfo()));
meta.update(edits); meta.update(edits);
} finally { } finally {
@ -2351,9 +2352,9 @@ public class HRegion implements HConstants {
*/ */
public static boolean rowIsInRange(HRegionInfo info, final byte [] row) { public static boolean rowIsInRange(HRegionInfo info, final byte [] row) {
return ((info.getStartKey().length == 0) || return ((info.getStartKey().length == 0) ||
(HStoreKey.compareTwoRowKeys(info.getStartKey(), row) <= 0)) && (Bytes.compareTo(info.getStartKey(), row) <= 0)) &&
((info.getEndKey().length == 0) || ((info.getEndKey().length == 0) ||
(HStoreKey.compareTwoRowKeys(info.getEndKey(), row) > 0)); (Bytes.compareTo(info.getEndKey(), row) > 0));
} }
/** /**
@ -2396,7 +2397,7 @@ public class HRegion implements HConstants {
} }
// A's start key is null but B's isn't. Assume A comes before B // A's start key is null but B's isn't. Assume A comes before B
} else if ((srcB.getStartKey() == null) || } else if ((srcB.getStartKey() == null) ||
(HStoreKey.compareTwoRowKeys(srcA.getStartKey(), srcB.getStartKey()) > 0)) { (Bytes.compareTo(srcA.getStartKey(), srcB.getStartKey()) > 0)) {
a = srcB; a = srcB;
b = srcA; b = srcA;
} }
@ -2448,14 +2449,14 @@ public class HRegion implements HConstants {
final byte [] startKey = HStoreKey.equalsTwoRowKeys(a.getStartKey(), final byte [] startKey = HStoreKey.equalsTwoRowKeys(a.getStartKey(),
EMPTY_BYTE_ARRAY) || EMPTY_BYTE_ARRAY) ||
HStoreKey.equalsTwoRowKeys(b.getStartKey(), EMPTY_BYTE_ARRAY)? HStoreKey.equalsTwoRowKeys(b.getStartKey(), EMPTY_BYTE_ARRAY)?
EMPTY_BYTE_ARRAY: HStoreKey.compareTwoRowKeys(a.getStartKey(), EMPTY_BYTE_ARRAY: Bytes.compareTo(a.getStartKey(),
b.getStartKey()) <= 0? b.getStartKey()) <= 0?
a.getStartKey(): b.getStartKey(); a.getStartKey(): b.getStartKey();
final byte [] endKey = HStoreKey.equalsTwoRowKeys(a.getEndKey(), final byte [] endKey = HStoreKey.equalsTwoRowKeys(a.getEndKey(),
EMPTY_BYTE_ARRAY) || EMPTY_BYTE_ARRAY) ||
HStoreKey.equalsTwoRowKeys(b.getEndKey(), EMPTY_BYTE_ARRAY)? HStoreKey.equalsTwoRowKeys(b.getEndKey(), EMPTY_BYTE_ARRAY)?
EMPTY_BYTE_ARRAY: EMPTY_BYTE_ARRAY:
HStoreKey.compareTwoRowKeys(a.getEndKey(), b.getEndKey()) <= 0? b.getEndKey(): a.getEndKey(); Bytes.compareTo(a.getEndKey(), b.getEndKey()) <= 0? b.getEndKey(): a.getEndKey();
HRegionInfo newRegionInfo = new HRegionInfo(tabledesc, startKey, endKey); HRegionInfo newRegionInfo = new HRegionInfo(tabledesc, startKey, endKey);
LOG.info("Creating new region " + newRegionInfo.toString()); LOG.info("Creating new region " + newRegionInfo.toString());
@ -2584,4 +2585,4 @@ public class HRegion implements HConstants {
} }
} }
} }
} }

View File

@ -1608,8 +1608,8 @@ public class HRegionServer implements HConstants, HRegionInterface, HBaseRPCErro
} }
} }
public void batchUpdate(final byte [] regionName, BatchUpdate b, public void batchUpdate(final byte [] regionName, BatchUpdate b, long lockId)
long lockId) throws IOException { throws IOException {
if (b.getRow() == null) if (b.getRow() == null)
throw new IllegalArgumentException("update has null row"); throw new IllegalArgumentException("update has null row");

View File

@ -26,6 +26,7 @@ import java.lang.management.RuntimeMXBean;
import java.rmi.UnexpectedException; import java.rmi.UnexpectedException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.Comparator;
import java.util.HashSet; import java.util.HashSet;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
@ -68,13 +69,16 @@ class Memcache {
private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
private final Comparator<HStoreKey> comparator;
private final HStoreKey.StoreKeyComparator rawcomparator;
/** /**
* Default constructor. Used for tests. * Default constructor. Used for tests.
*/ */
@SuppressWarnings("unchecked")
public Memcache() { public Memcache() {
this.ttl = HConstants.FOREVER; this(HConstants.FOREVER, new HStoreKey.HStoreKeyComparator(),
this.memcache = createSynchronizedSortedMap(); new HStoreKey.StoreKeyComparator());
this.snapshot = createSynchronizedSortedMap();
} }
/** /**
@ -82,21 +86,21 @@ class Memcache {
* @param ttl The TTL for cache entries, in milliseconds. * @param ttl The TTL for cache entries, in milliseconds.
* @param regionInfo The HRI for this cache * @param regionInfo The HRI for this cache
*/ */
public Memcache(final long ttl) { public Memcache(final long ttl, final Comparator<HStoreKey> c,
final HStoreKey.StoreKeyComparator rc) {
this.ttl = ttl; this.ttl = ttl;
this.memcache = createSynchronizedSortedMap(); this.comparator = c;
this.snapshot = createSynchronizedSortedMap(); this.rawcomparator = rc;
this.memcache = createSynchronizedSortedMap(c);
this.snapshot = createSynchronizedSortedMap(c);
} }
/* /*
* Utility method using HSKWritableComparator * Utility method using HSKWritableComparator
* @return synchronized sorted map of HStoreKey to byte arrays. * @return synchronized sorted map of HStoreKey to byte arrays.
*/ */
@SuppressWarnings("unchecked") private SortedMap<HStoreKey, byte[]> createSynchronizedSortedMap(final Comparator<HStoreKey> c) {
private SortedMap<HStoreKey, byte[]> createSynchronizedSortedMap() { return Collections.synchronizedSortedMap(new TreeMap<HStoreKey, byte []>(c));
return Collections.synchronizedSortedMap(
new TreeMap<HStoreKey, byte []>(
new HStoreKey.HStoreKeyWritableComparator()));
} }
/** /**
@ -119,7 +123,7 @@ class Memcache {
// mistake. St.Ack // mistake. St.Ack
if (this.memcache.size() != 0) { if (this.memcache.size() != 0) {
this.snapshot = this.memcache; this.snapshot = this.memcache;
this.memcache = createSynchronizedSortedMap(); this.memcache = createSynchronizedSortedMap(this.comparator);
} }
} }
} finally { } finally {
@ -156,7 +160,7 @@ class Memcache {
// OK. Passed in snapshot is same as current snapshot. If not-empty, // OK. Passed in snapshot is same as current snapshot. If not-empty,
// create a new snapshot and let the old one go. // create a new snapshot and let the old one go.
if (ss.size() != 0) { if (ss.size() != 0) {
this.snapshot = createSynchronizedSortedMap(); this.snapshot = createSynchronizedSortedMap(this.comparator);
} }
} finally { } finally {
this.lock.writeLock().unlock(); this.lock.writeLock().unlock();
@ -261,7 +265,7 @@ class Memcache {
if (b == null) { if (b == null) {
return a; return a;
} }
return HStoreKey.compareTwoRowKeys(a, b) <= 0? a: b; return Bytes.compareTo(a, b) <= 0? a: b;
} }
/** /**
@ -296,7 +300,7 @@ class Memcache {
// Iterate until we fall into the next row; i.e. move off current row // Iterate until we fall into the next row; i.e. move off current row
for (Map.Entry<HStoreKey, byte []> es: tailMap.entrySet()) { for (Map.Entry<HStoreKey, byte []> es: tailMap.entrySet()) {
HStoreKey itKey = es.getKey(); HStoreKey itKey = es.getKey();
if (HStoreKey.compareTwoRowKeys(itKey.getRow(), row) <= 0) if (Bytes.compareTo(itKey.getRow(), row) <= 0)
continue; continue;
// Note: Not suppressing deletes or expired cells. // Note: Not suppressing deletes or expired cells.
result = itKey.getRow(); result = itKey.getRow();
@ -344,7 +348,8 @@ class Memcache {
HStoreKey itKey = es.getKey(); HStoreKey itKey = es.getKey();
byte [] itCol = itKey.getColumn(); byte [] itCol = itKey.getColumn();
Cell cell = results.get(itCol); Cell cell = results.get(itCol);
if ((cell == null || cell.getNumValues() < numVersions) && key.matchesWithoutColumn(itKey)) { if ((cell == null || cell.getNumValues() < numVersions) &&
key.matchesWithoutColumn(itKey)) {
if (columns == null || columns.contains(itKey.getColumn())) { if (columns == null || columns.contains(itKey.getColumn())) {
byte [] val = tailMap.get(itKey); byte [] val = tailMap.get(itKey);
if (HLogEdit.isDeleted(val)) { if (HLogEdit.isDeleted(val)) {
@ -367,7 +372,7 @@ class Memcache {
} }
} }
} }
} else if (HStoreKey.compareTwoRowKeys(key.getRow(), itKey.getRow()) < 0) { } else if (this.rawcomparator.compareRows(key.getRow(), itKey.getRow()) < 0) {
break; break;
} }
} }
@ -428,7 +433,7 @@ class Memcache {
// the search key, or a range of values between the first candidate key // the search key, or a range of values between the first candidate key
// and the ultimate search key (or the end of the cache) // and the ultimate search key (or the end of the cache)
if (!tailMap.isEmpty() && if (!tailMap.isEmpty() &&
HStoreKey.compareTwoRowKeys(tailMap.firstKey().getRow(), Bytes.compareTo(tailMap.firstKey().getRow(),
search_key.getRow()) <= 0) { search_key.getRow()) <= 0) {
Iterator<HStoreKey> key_iterator = tailMap.keySet().iterator(); Iterator<HStoreKey> key_iterator = tailMap.keySet().iterator();
@ -437,9 +442,9 @@ class Memcache {
HStoreKey deletedOrExpiredRow = null; HStoreKey deletedOrExpiredRow = null;
for (HStoreKey found_key = null; key_iterator.hasNext() && for (HStoreKey found_key = null; key_iterator.hasNext() &&
(found_key == null || (found_key == null ||
HStoreKey.compareTwoRowKeys(found_key.getRow(), row) <= 0);) { Bytes.compareTo(found_key.getRow(), row) <= 0);) {
found_key = key_iterator.next(); found_key = key_iterator.next();
if (HStoreKey.compareTwoRowKeys(found_key.getRow(), row) <= 0) { if (Bytes.compareTo(found_key.getRow(), row) <= 0) {
if (HLogEdit.isDeleted(tailMap.get(found_key))) { if (HLogEdit.isDeleted(tailMap.get(found_key))) {
Store.handleDeleted(found_key, candidateKeys, deletes); Store.handleDeleted(found_key, candidateKeys, deletes);
if (deletedOrExpiredRow == null) { if (deletedOrExpiredRow == null) {
@ -900,4 +905,4 @@ class Memcache {
} }
LOG.info("Exiting."); LOG.info("Exiting.");
} }
} }

View File

@ -26,6 +26,7 @@ import java.nio.ByteBuffer;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.Iterator; import java.util.Iterator;
@ -54,6 +55,7 @@ import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.filter.RowFilterInterface; import org.apache.hadoop.hbase.filter.RowFilterInterface;
import org.apache.hadoop.hbase.io.Cell; import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.io.SequenceFile; import org.apache.hadoop.hbase.io.SequenceFile;
import org.apache.hadoop.hbase.io.hfile.Compression;
import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.io.hfile.HFileScanner;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
@ -126,6 +128,14 @@ public class Store implements HConstants {
private final Path compactionDir; private final Path compactionDir;
private final Integer compactLock = new Integer(0); private final Integer compactLock = new Integer(0);
private final int compactionThreshold; private final int compactionThreshold;
private final int blocksize;
private final boolean bloomfilter;
private final Compression.Algorithm compression;
// Comparing HStoreKeys in byte arrays.
final HStoreKey.StoreKeyComparator rawcomparator;
// Comparing HStoreKey objects.
final Comparator<HStoreKey> comparator;
/** /**
* Constructor * Constructor
@ -141,6 +151,7 @@ public class Store implements HConstants {
* failed. Can be null. * failed. Can be null.
* @throws IOException * @throws IOException
*/ */
@SuppressWarnings("unchecked")
protected Store(Path basedir, HRegionInfo info, HColumnDescriptor family, protected Store(Path basedir, HRegionInfo info, HColumnDescriptor family,
FileSystem fs, Path reconstructionLog, HBaseConfiguration conf, FileSystem fs, Path reconstructionLog, HBaseConfiguration conf,
final Progressable reporter) final Progressable reporter)
@ -151,12 +162,23 @@ public class Store implements HConstants {
this.family = family; this.family = family;
this.fs = fs; this.fs = fs;
this.conf = conf; this.conf = conf;
this.bloomfilter = family.isBloomfilter();
this.blocksize = family.getBlocksize();
this.compression = family.getCompression();
this.rawcomparator = info.isRootRegion()?
new HStoreKey.RootStoreKeyComparator(): info.isMetaRegion()?
new HStoreKey.MetaStoreKeyComparator():
new HStoreKey.StoreKeyComparator();
this.comparator = info.isRootRegion()?
new HStoreKey.HStoreKeyRootComparator(): info.isMetaRegion()?
new HStoreKey.HStoreKeyMetaComparator():
new HStoreKey.HStoreKeyComparator();
// getTimeToLive returns ttl in seconds. Convert to milliseconds. // getTimeToLive returns ttl in seconds. Convert to milliseconds.
this.ttl = family.getTimeToLive(); this.ttl = family.getTimeToLive();
if (ttl != HConstants.FOREVER) { if (ttl != HConstants.FOREVER) {
this.ttl *= 1000; this.ttl *= 1000;
} }
this.memcache = new Memcache(this.ttl); this.memcache = new Memcache(this.ttl, this.comparator, this.rawcomparator);
this.compactionDir = HRegion.getCompactionDir(basedir); this.compactionDir = HRegion.getCompactionDir(basedir);
this.storeName = Bytes.toBytes(this.regioninfo.getEncodedName() + "/" + this.storeName = Bytes.toBytes(this.regioninfo.getEncodedName() + "/" +
Bytes.toString(this.family.getName())); Bytes.toString(this.family.getName()));
@ -254,7 +276,6 @@ public class Store implements HConstants {
* lower than maxSeqID. (Because we know such log messages are already * lower than maxSeqID. (Because we know such log messages are already
* reflected in the MapFiles.) * reflected in the MapFiles.)
*/ */
@SuppressWarnings("unchecked")
private void doReconstructionLog(final Path reconstructionLog, private void doReconstructionLog(final Path reconstructionLog,
final long maxSeqID, final Progressable reporter) final long maxSeqID, final Progressable reporter)
throws UnsupportedEncodingException, IOException { throws UnsupportedEncodingException, IOException {
@ -273,7 +294,7 @@ public class Store implements HConstants {
// general memory usage accounting. // general memory usage accounting.
long maxSeqIdInLog = -1; long maxSeqIdInLog = -1;
NavigableMap<HStoreKey, byte []> reconstructedCache = NavigableMap<HStoreKey, byte []> reconstructedCache =
new TreeMap<HStoreKey, byte []>(new HStoreKey.HStoreKeyWritableComparator()); new TreeMap<HStoreKey, byte []>(this.comparator);
SequenceFile.Reader logReader = new SequenceFile.Reader(this.fs, SequenceFile.Reader logReader = new SequenceFile.Reader(this.fs,
reconstructionLog, this.conf); reconstructionLog, this.conf);
try { try {
@ -463,7 +484,7 @@ public class Store implements HConstants {
// if we fail. // if we fail.
synchronized (flushLock) { synchronized (flushLock) {
// A. Write the map out to the disk // A. Write the map out to the disk
writer = StoreFile.getWriter(this.fs, this.homedir); writer = getWriter();
int entries = 0; int entries = 0;
try { try {
for (Map.Entry<HStoreKey, byte []> es: cache.entrySet()) { for (Map.Entry<HStoreKey, byte []> es: cache.entrySet()) {
@ -493,7 +514,16 @@ public class Store implements HConstants {
} }
return sf; return sf;
} }
/**
* @return Writer for this store.
* @throws IOException
*/
HFile.Writer getWriter() throws IOException {
return StoreFile.getWriter(this.fs, this.homedir, this.blocksize,
this.compression, this.rawcomparator, this.bloomfilter);
}
/* /*
* Change storefiles adding into place the Reader produced by this new flush. * Change storefiles adding into place the Reader produced by this new flush.
* @param logCacheFlushId * @param logCacheFlushId
@ -650,7 +680,7 @@ public class Store implements HConstants {
} }
// Step through them, writing to the brand-new file // Step through them, writing to the brand-new file
HFile.Writer writer = StoreFile.getWriter(this.fs, this.homedir); HFile.Writer writer = getWriter();
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("Started compaction of " + filesToCompact.size() + " file(s)" + LOG.debug("Started compaction of " + filesToCompact.size() + " file(s)" +
(references? ", hasReferences=true,": " ") + " into " + (references? ", hasReferences=true,": " ") + " into " +
@ -992,6 +1022,7 @@ public class Store implements HConstants {
final int numVersions, Map<byte [], Cell> results) final int numVersions, Map<byte [], Cell> results)
throws IOException { throws IOException {
int versions = versionsToReturn(numVersions); int versions = versionsToReturn(numVersions);
// This is map of columns to timestamp
Map<byte [], Long> deletes = Map<byte [], Long> deletes =
new TreeMap<byte [], Long>(Bytes.BYTES_COMPARATOR); new TreeMap<byte [], Long>(Bytes.BYTES_COMPARATOR);
// if the key is null, we're not even looking for anything. return. // if the key is null, we're not even looking for anything. return.
@ -1061,7 +1092,9 @@ public class Store implements HConstants {
} }
} }
} }
} else if (HStoreKey.compareTwoRowKeys(key.getRow(), readkey.getRow()) < 0) { } else if (this.rawcomparator.compareRows(key.getRow(), 0,
key.getRow().length,
readkey.getRow(), 0, readkey.getRow().length) < 0) {
// if we've crossed into the next row, then we can just stop // if we've crossed into the next row, then we can just stop
// iterating // iterating
break; break;
@ -1280,15 +1313,14 @@ public class Store implements HConstants {
* @return Found row * @return Found row
* @throws IOException * @throws IOException
*/ */
@SuppressWarnings("unchecked")
byte [] getRowKeyAtOrBefore(final byte [] row) byte [] getRowKeyAtOrBefore(final byte [] row)
throws IOException{ throws IOException{
// Map of HStoreKeys that are candidates for holding the row key that // Map of HStoreKeys that are candidates for holding the row key that
// most closely matches what we're looking for. We'll have to update it as // most closely matches what we're looking for. We'll have to update it as
// deletes are found all over the place as we go along before finally // deletes are found all over the place as we go along before finally
// reading the best key out of it at the end. // reading the best key out of it at the end.
NavigableMap<HStoreKey, Long> candidateKeys = new TreeMap<HStoreKey, Long>( NavigableMap<HStoreKey, Long> candidateKeys =
new HStoreKey.HStoreKeyWritableComparator()); new TreeMap<HStoreKey, Long>(this.comparator);
// Keep a list of deleted cell keys. We need this because as we go through // Keep a list of deleted cell keys. We need this because as we go through
// the store files, the cell with the delete marker may be in one file and // the store files, the cell with the delete marker may be in one file and
@ -1365,11 +1397,13 @@ public class Store implements HConstants {
// up to the row before and return that. // up to the row before and return that.
HStoreKey finalKey = HStoreKey.create(f.getReader().getLastKey()); HStoreKey finalKey = HStoreKey.create(f.getReader().getLastKey());
HStoreKey searchKey = null; HStoreKey searchKey = null;
if (HStoreKey.compareTwoRowKeys(finalKey.getRow(), row) < 0) { if (this.rawcomparator.compareRows(finalKey.getRow(), 0,
finalKey.getRow().length,
row, 0, row.length) < 0) {
searchKey = finalKey; searchKey = finalKey;
} else { } else {
searchKey = new HStoreKey(row); searchKey = new HStoreKey(row);
if (searchKey.compareTo(startKey) < 0) { if (this.comparator.compare(searchKey, startKey) < 0) {
searchKey = startKey; searchKey = startKey;
} }
} }
@ -1435,8 +1469,9 @@ public class Store implements HConstants {
if (deletedOrExpiredRow == null) { if (deletedOrExpiredRow == null) {
deletedOrExpiredRow = copy; deletedOrExpiredRow = copy;
} }
} else if (HStoreKey.compareTwoRowKeys(readkey.getRow(), } else if (this.rawcomparator.compareRows(readkey.getRow(), 0,
searchKey.getRow()) > 0) { readkey.getRow().length,
searchKey.getRow(), 0, searchKey.getRow().length) > 0) {
// if the row key we just read is beyond the key we're searching for, // if the row key we just read is beyond the key we're searching for,
// then we're done. // then we're done.
break; break;
@ -1457,7 +1492,7 @@ public class Store implements HConstants {
} }
} }
} while(scanner.next() && (knownNoGoodKey == null || } while(scanner.next() && (knownNoGoodKey == null ||
readkey.compareTo(knownNoGoodKey) < 0)); this.comparator.compare(readkey, knownNoGoodKey) < 0));
// If we get here and have no candidates but we did find a deleted or // If we get here and have no candidates but we did find a deleted or
// expired candidate, we need to look at the key before that // expired candidate, we need to look at the key before that
@ -1502,11 +1537,14 @@ public class Store implements HConstants {
// of the row in case there are deletes for this candidate in this mapfile // of the row in case there are deletes for this candidate in this mapfile
// BUT do not backup before the first key in the store file. // BUT do not backup before the first key in the store file.
// TODO: FIX THIS PROFLIGATE OBJECT MAKING!!! // TODO: FIX THIS PROFLIGATE OBJECT MAKING!!!
byte [] searchKey = HStoreKey firstCandidateKey = candidateKeys.firstKey();
new HStoreKey(candidateKeys.firstKey().getRow()).getBytes(); byte [] searchKey = null;
if (f.getReader().getComparator().compare(searchKey, 0, searchKey.length, HStoreKey.StoreKeyComparator c =
startKey.getRow(), 0, startKey.getRow().length) < 0) { (HStoreKey.StoreKeyComparator)f.getReader().getComparator();
if (c.compareRows(firstCandidateKey.getRow(), startKey.getRow()) < 0) {
searchKey = startKey.getBytes(); searchKey = startKey.getBytes();
} else {
searchKey = new HStoreKey(firstCandidateKey.getRow()).getBytes();
} }
// Seek to the exact row, or the one that would be immediately before it // Seek to the exact row, or the one that would be immediately before it
@ -1523,7 +1561,7 @@ public class Store implements HConstants {
// as a candidate key // as a candidate key
if (HStoreKey.equalsTwoRowKeys(k.getRow(), row)) { if (HStoreKey.equalsTwoRowKeys(k.getRow(), row)) {
handleKey(k, v, now, deletes, candidateKeys); handleKey(k, v, now, deletes, candidateKeys);
} else if (HStoreKey.compareTwoRowKeys(k.getRow(), row) > 0 ) { } else if (this.rawcomparator.compareRows(k.getRow(), row) > 0 ) {
// if the row key we just read is beyond the key we're searching for, // if the row key we just read is beyond the key we're searching for,
// then we're done. // then we're done.
break; break;
@ -1804,4 +1842,4 @@ public class Store implements HConstants {
} }
return true; return true;
} }
} }

View File

@ -35,9 +35,10 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HStoreKey; import org.apache.hadoop.hbase.HStoreKey;
import org.apache.hadoop.hbase.io.HalfHFileReader; import org.apache.hadoop.hbase.io.HalfHFileReader;
import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.io.Reference;
import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.io.hfile.Compression;
import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.RawComparator;
/** /**
* A Store data file. Stores usually have one or more of these files. They * A Store data file. Stores usually have one or more of these files. They
@ -210,7 +211,7 @@ public class StoreFile implements HConstants {
this.reader = new HalfHFileReader(this.fs, this.referencePath, null, this.reader = new HalfHFileReader(this.fs, this.referencePath, null,
this.reference); this.reference);
} else { } else {
this.reader = new HFile.Reader(this.fs, this.path, null); this.reader = new StoreFileReader(this.fs, this.path, null);
} }
// Load up indices and fileinfo. // Load up indices and fileinfo.
Map<byte [], byte []> map = this.reader.loadFileInfo(); Map<byte [], byte []> map = this.reader.loadFileInfo();
@ -241,6 +242,71 @@ public class StoreFile implements HConstants {
} }
return this.reader; return this.reader;
} }
/**
* Override to add some customization on HFile.Reader
*/
static class StoreFileReader extends HFile.Reader {
public StoreFileReader(FileSystem fs, Path path, BlockCache cache)
throws IOException {
super(fs, path, cache);
}
protected String toStringFirstKey() {
String result = "";
try {
result = HStoreKey.create(getFirstKey()).toString();
} catch (IOException e) {
LOG.warn("Failed toString first key", e);
}
return result;
}
protected String toStringLastKey() {
String result = "";
try {
result = HStoreKey.create(getLastKey()).toString();
} catch (IOException e) {
LOG.warn("Failed toString last key", e);
}
return result;
}
}
/**
* Override to add some customization on HalfHFileReader
*/
static class HalfStoreFileReader extends HalfHFileReader {
public HalfStoreFileReader(FileSystem fs, Path p, BlockCache c, Reference r)
throws IOException {
super(fs, p, c, r);
}
@Override
public String toString() {
return super.toString() + (isTop()? ", half=top": ", half=bottom");
}
protected String toStringFirstKey() {
String result = "";
try {
result = HStoreKey.create(getFirstKey()).toString();
} catch (IOException e) {
LOG.warn("Failed toString first key", e);
}
return result;
}
protected String toStringLastKey() {
String result = "";
try {
result = HStoreKey.create(getLastKey()).toString();
} catch (IOException e) {
LOG.warn("Failed toString last key", e);
}
return result;
}
}
/** /**
* @return Current reader. Must call open first. * @return Current reader. Must call open first.
@ -309,7 +375,7 @@ public class StoreFile implements HConstants {
*/ */
public static HFile.Writer getWriter(final FileSystem fs, final Path dir) public static HFile.Writer getWriter(final FileSystem fs, final Path dir)
throws IOException { throws IOException {
return getWriter(fs, dir, DEFAULT_BLOCKSIZE_SMALL, null, null); return getWriter(fs, dir, DEFAULT_BLOCKSIZE_SMALL, null, null, false);
} }
/** /**
@ -326,15 +392,16 @@ public class StoreFile implements HConstants {
* @throws IOException * @throws IOException
*/ */
public static HFile.Writer getWriter(final FileSystem fs, final Path dir, public static HFile.Writer getWriter(final FileSystem fs, final Path dir,
final int blocksize, final String algorithm, final RawComparator<byte []> c) final int blocksize, final Compression.Algorithm algorithm,
final HStoreKey.StoreKeyComparator c, final boolean bloomfilter)
throws IOException { throws IOException {
if (!fs.exists(dir)) { if (!fs.exists(dir)) {
fs.mkdirs(dir); fs.mkdirs(dir);
} }
Path path = getUniqueFile(fs, dir); Path path = getUniqueFile(fs, dir);
return new HFile.Writer(fs, path, blocksize, return new HFile.Writer(fs, path, blocksize,
algorithm == null? HFile.DEFAULT_COMPRESSION: algorithm, algorithm == null? HFile.DEFAULT_COMPRESSION_ALGORITHM: algorithm,
c == null? HStoreKey.BYTECOMPARATOR: c); c == null? new HStoreKey.StoreKeyComparator(): c, bloomfilter);
} }
/** /**

View File

@ -141,19 +141,17 @@ implements ChangedReadersObserver {
try { try {
// Find the next viable row label (and timestamp). // Find the next viable row label (and timestamp).
ViableRow viableRow = getNextViableRow(); ViableRow viableRow = getNextViableRow();
// Grab all the values that match this row/timestamp // Grab all the values that match this row/timestamp
boolean insertedItem = false; boolean insertedItem = false;
if (viableRow.getRow() != null) { if (viableRow.getRow() != null) {
key.setRow(viableRow.getRow()); key.setRow(viableRow.getRow());
key.setVersion(viableRow.getTimestamp()); key.setVersion(viableRow.getTimestamp());
for (int i = 0; i < keys.length; i++) { for (int i = 0; i < keys.length; i++) {
// Fetch the data // Fetch the data
while ((keys[i] != null) && while ((keys[i] != null) &&
(HStoreKey.compareTwoRowKeys(this.keys[i].getRow(), (this.store.rawcomparator.compareRows(this.keys[i].getRow(),
viableRow.getRow()) == 0)) { viableRow.getRow()) == 0)) {
// If we are doing a wild card match or there are multiple matchers // If we are doing a wild card match or there are multiple matchers
// per column, we need to scan all the older versions of this row // per column, we need to scan all the older versions of this row
// to pick up the rest of the family members // to pick up the rest of the family members
@ -162,8 +160,7 @@ implements ChangedReadersObserver {
&& (keys[i].getTimestamp() != viableRow.getTimestamp())) { && (keys[i].getTimestamp() != viableRow.getTimestamp())) {
break; break;
} }
if(columnMatch(i)) {
if(columnMatch(i)) {
// We only want the first result for any specific family member // We only want the first result for any specific family member
if(!results.containsKey(keys[i].getColumn())) { if(!results.containsKey(keys[i].getColumn())) {
results.put(keys[i].getColumn(), results.put(keys[i].getColumn(),
@ -179,10 +176,10 @@ implements ChangedReadersObserver {
// Advance the current scanner beyond the chosen row, to // Advance the current scanner beyond the chosen row, to
// a valid timestamp, so we're ready next time. // a valid timestamp, so we're ready next time.
while ((keys[i] != null) && while ((keys[i] != null) &&
((HStoreKey.compareTwoRowKeys(this.keys[i].getRow(), ((this.store.rawcomparator.compareRows(this.keys[i].getRow(),
viableRow.getRow()) <= 0) viableRow.getRow()) <= 0) ||
|| (keys[i].getTimestamp() > this.timestamp) (keys[i].getTimestamp() > this.timestamp) ||
|| (! columnMatch(i)))) { (! columnMatch(i)))) {
getNext(i); getNext(i);
} }
} }
@ -192,7 +189,7 @@ implements ChangedReadersObserver {
this.lock.readLock().unlock(); this.lock.readLock().unlock();
} }
} }
// Data stucture to hold next, viable row (and timestamp). // Data stucture to hold next, viable row (and timestamp).
class ViableRow { class ViableRow {
private final byte [] row; private final byte [] row;
@ -239,8 +236,10 @@ implements ChangedReadersObserver {
// column matches and the timestamp of the row is less than or equal // column matches and the timestamp of the row is less than or equal
// to this.timestamp, so we do not need to test that here // to this.timestamp, so we do not need to test that here
&& ((viableRow == null) || && ((viableRow == null) ||
(HStoreKey.compareTwoRowKeys(this.keys[i].getRow(), viableRow) < 0) || (this.store.rawcomparator.compareRows(this.keys[i].getRow(),
((HStoreKey.compareTwoRowKeys(this.keys[i].getRow(), viableRow) == 0) && viableRow) < 0) ||
((this.store.rawcomparator.compareRows(this.keys[i].getRow(),
viableRow) == 0) &&
(keys[i].getTimestamp() > viableTimestamp)))) { (keys[i].getTimestamp() > viableTimestamp)))) {
if (ttl == HConstants.FOREVER || now < keys[i].getTimestamp() + ttl) { if (ttl == HConstants.FOREVER || now < keys[i].getTimestamp() + ttl) {
viableRow = keys[i].getRow(); viableRow = keys[i].getRow();
@ -270,8 +269,7 @@ implements ChangedReadersObserver {
return true; return true;
} }
} else { } else {
if (!Store.getClosest(this.scanners[i], if (!Store.getClosest(this.scanners[i], HStoreKey.getBytes(firstRow))) {
new HStoreKey(firstRow).getBytes())) {
closeSubScanner(i); closeSubScanner(i);
return true; return true;
} }

View File

@ -153,8 +153,10 @@ class StoreScanner implements InternalScanner, ChangedReadersObserver {
for (int i = 0; i < this.keys.length; i++) { for (int i = 0; i < this.keys.length; i++) {
if (scanners[i] != null && if (scanners[i] != null &&
(chosenRow == null || (chosenRow == null ||
(HStoreKey.compareTwoRowKeys(this.keys[i].getRow(), chosenRow) < 0) || (this.store.rawcomparator.compareRows(this.keys[i].getRow(),
((HStoreKey.compareTwoRowKeys(this.keys[i].getRow(), chosenRow) == 0) && chosenRow) < 0) ||
((this.store.rawcomparator.compareRows(this.keys[i].getRow(),
chosenRow) == 0) &&
(keys[i].getTimestamp() > chosenTimestamp)))) { (keys[i].getTimestamp() > chosenTimestamp)))) {
chosenRow = keys[i].getRow(); chosenRow = keys[i].getRow();
chosenTimestamp = keys[i].getTimestamp(); chosenTimestamp = keys[i].getTimestamp();
@ -181,10 +183,9 @@ class StoreScanner implements InternalScanner, ChangedReadersObserver {
// problem, could redo as bloom filter. // problem, could redo as bloom filter.
Set<HStoreKey> deletes = new HashSet<HStoreKey>(); Set<HStoreKey> deletes = new HashSet<HStoreKey>();
for (int i = 0; i < scanners.length && !filtered; i++) { for (int i = 0; i < scanners.length && !filtered; i++) {
while ((scanners[i] != null while ((scanners[i] != null && !filtered && moreToFollow) &&
&& !filtered (this.store.rawcomparator.compareRows(this.keys[i].getRow(),
&& moreToFollow) chosenRow) == 0)) {
&& (HStoreKey.compareTwoRowKeys(this.keys[i].getRow(), chosenRow) == 0)) {
// If we are doing a wild card match or there are multiple // If we are doing a wild card match or there are multiple
// matchers per column, we need to scan all the older versions of // matchers per column, we need to scan all the older versions of
// this row to pick up the rest of the family members // this row to pick up the rest of the family members
@ -206,7 +207,7 @@ class StoreScanner implements InternalScanner, ChangedReadersObserver {
if (HLogEdit.isDeleted(e.getValue().getValue())) { if (HLogEdit.isDeleted(e.getValue().getValue())) {
// Only first key encountered is added; deletes is a Set. // Only first key encountered is added; deletes is a Set.
deletes.add(new HStoreKey(hsk)); deletes.add(new HStoreKey(hsk));
} else if (!deletes.contains(hsk) && } else if ((deletes.size() == 0 || !deletes.contains(hsk)) &&
!filtered && !filtered &&
moreToFollow && moreToFollow &&
!results.containsKey(e.getKey())) { !results.containsKey(e.getKey())) {
@ -233,7 +234,8 @@ class StoreScanner implements InternalScanner, ChangedReadersObserver {
// If the current scanner is non-null AND has a lower-or-equal // If the current scanner is non-null AND has a lower-or-equal
// row label, then its timestamp is bad. We need to advance it. // row label, then its timestamp is bad. We need to advance it.
while ((scanners[i] != null) && while ((scanners[i] != null) &&
(HStoreKey.compareTwoRowKeys(this.keys[i].getRow(), chosenRow) <= 0)) { (this.store.rawcomparator.compareRows(this.keys[i].getRow(),
chosenRow) <= 0)) {
resultSets[i].clear(); resultSets[i].clear();
if (!scanners[i].next(keys[i], resultSets[i])) { if (!scanners[i].next(keys[i], resultSets[i])) {
closeScanner(i); closeScanner(i);

View File

@ -23,6 +23,7 @@ import java.util.ArrayList;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.io.hfile.Compression;
import org.apache.hadoop.hbase.rest.RESTConstants; import org.apache.hadoop.hbase.rest.RESTConstants;
import org.apache.hadoop.hbase.rest.descriptors.RowUpdateDescriptor; import org.apache.hadoop.hbase.rest.descriptors.RowUpdateDescriptor;
import org.apache.hadoop.hbase.rest.descriptors.ScannerDescriptor; import org.apache.hadoop.hbase.rest.descriptors.ScannerDescriptor;
@ -76,7 +77,7 @@ public class JsonRestParser implements IHBaseRestParser {
byte[] name = Bytes.toBytes(strTemp); byte[] name = Bytes.toBytes(strTemp);
int maxVersions; int maxVersions;
HColumnDescriptor.CompressionType cType; String cType;
boolean inMemory; boolean inMemory;
boolean blockCacheEnabled; boolean blockCacheEnabled;
int maxValueLength; int maxValueLength;
@ -96,10 +97,9 @@ public class JsonRestParser implements IHBaseRestParser {
} }
try { try {
cType = HColumnDescriptor.CompressionType.valueOf(jsonObject cType = jsonObject.getString("compression_type").toUpperCase();
.getString("compression_type"));
} catch (JSONException e) { } catch (JSONException e) {
cType = HColumnDescriptor.CompressionType.NONE; cType = HColumnDescriptor.DEFAULT_COMPRESSION;
} }
try { try {

View File

@ -28,6 +28,7 @@ import javax.xml.parsers.DocumentBuilderFactory;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.HColumnDescriptor.CompressionType; import org.apache.hadoop.hbase.HColumnDescriptor.CompressionType;
import org.apache.hadoop.hbase.io.hfile.Compression;
import org.apache.hadoop.hbase.rest.RESTConstants; import org.apache.hadoop.hbase.rest.RESTConstants;
import org.apache.hadoop.hbase.rest.descriptors.RowUpdateDescriptor; import org.apache.hadoop.hbase.rest.descriptors.RowUpdateDescriptor;
import org.apache.hadoop.hbase.rest.descriptors.ScannerDescriptor; import org.apache.hadoop.hbase.rest.descriptors.ScannerDescriptor;
@ -95,7 +96,7 @@ public class XMLRestParser implements IHBaseRestParser {
String colname = makeColumnName(name_node.getFirstChild().getNodeValue()); String colname = makeColumnName(name_node.getFirstChild().getNodeValue());
int max_versions = HColumnDescriptor.DEFAULT_VERSIONS; int max_versions = HColumnDescriptor.DEFAULT_VERSIONS;
CompressionType compression = HColumnDescriptor.DEFAULT_COMPRESSION; String compression = HColumnDescriptor.DEFAULT_COMPRESSION;
boolean in_memory = HColumnDescriptor.DEFAULT_IN_MEMORY; boolean in_memory = HColumnDescriptor.DEFAULT_IN_MEMORY;
boolean block_cache = HColumnDescriptor.DEFAULT_BLOCKCACHE; boolean block_cache = HColumnDescriptor.DEFAULT_BLOCKCACHE;
int max_cell_size = HColumnDescriptor.DEFAULT_LENGTH; int max_cell_size = HColumnDescriptor.DEFAULT_LENGTH;
@ -126,8 +127,8 @@ public class XMLRestParser implements IHBaseRestParser {
NodeList compression_list = columnfamily NodeList compression_list = columnfamily
.getElementsByTagName("compression"); .getElementsByTagName("compression");
if (compression_list.getLength() > 0) { if (compression_list.getLength() > 0) {
compression = CompressionType.valueOf(compression_list.item(0) compression = compression_list.item(0)
.getFirstChild().getNodeValue()); .getFirstChild().getNodeValue().toUpperCase();
} }
NodeList in_memory_list = columnfamily.getElementsByTagName("in-memory"); NodeList in_memory_list = columnfamily.getElementsByTagName("in-memory");
@ -163,8 +164,8 @@ public class XMLRestParser implements IHBaseRestParser {
} }
HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes(colname), HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes(colname),
max_versions, compression, in_memory, block_cache, max_cell_size, ttl, max_versions, compression, in_memory, block_cache,
bloomfilter); max_cell_size, ttl, bloomfilter);
NodeList metadataList = columnfamily.getElementsByTagName("metadata"); NodeList metadataList = columnfamily.getElementsByTagName("metadata");
for (int i = 0; i < metadataList.getLength(); i++) { for (int i = 0; i < metadataList.getLength(); i++) {

View File

@ -22,9 +22,9 @@ import java.util.Map;
import java.util.TreeMap; import java.util.TreeMap;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HColumnDescriptor.CompressionType;
import org.apache.hadoop.hbase.io.Cell; import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.io.RowResult; import org.apache.hadoop.hbase.io.RowResult;
import org.apache.hadoop.hbase.io.hfile.Compression;
import org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor; import org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor;
import org.apache.hadoop.hbase.thrift.generated.IllegalArgument; import org.apache.hadoop.hbase.thrift.generated.IllegalArgument;
import org.apache.hadoop.hbase.thrift.generated.TCell; import org.apache.hadoop.hbase.thrift.generated.TCell;
@ -44,7 +44,8 @@ public class ThriftUtilities {
*/ */
static public HColumnDescriptor colDescFromThrift(ColumnDescriptor in) static public HColumnDescriptor colDescFromThrift(ColumnDescriptor in)
throws IllegalArgument { throws IllegalArgument {
CompressionType comp = CompressionType.valueOf(in.compression); Compression.Algorithm comp =
Compression.getCompressionAlgorithmByName(in.compression.toLowerCase());
boolean bloom = false; boolean bloom = false;
if (in.bloomFilterType.compareTo("NONE") != 0) { if (in.bloomFilterType.compareTo("NONE") != 0) {
bloom = true; bloom = true;
@ -54,7 +55,7 @@ public class ThriftUtilities {
throw new IllegalArgument("column name is empty"); throw new IllegalArgument("column name is empty");
} }
HColumnDescriptor col = new HColumnDescriptor(in.name, HColumnDescriptor col = new HColumnDescriptor(in.name,
in.maxVersions, comp, in.inMemory, in.blockCacheEnabled, in.maxVersions, comp.getName(), in.inMemory, in.blockCacheEnabled,
in.maxValueLength, in.timeToLive, bloom); in.maxValueLength, in.timeToLive, bloom);
return col; return col;
} }

View File

@ -116,6 +116,15 @@ public class Bytes {
out.write(b, 0, b.length); out.write(b, 0, b.length);
} }
public static int writeByteArray(final byte [] tgt, final int tgtOffset,
final byte [] src, final int srcOffset, final int srcLength) {
byte [] vint = vintToBytes(srcLength);
System.arraycopy(vint, 0, tgt, tgtOffset, vint.length);
int offset = tgtOffset + vint.length;
System.arraycopy(src, srcOffset, tgt, offset, srcLength);
return offset + srcLength;
}
/** /**
* Reads a zero-compressed encoded long from input stream and returns it. * Reads a zero-compressed encoded long from input stream and returns it.
* @param buffer Binary array * @param buffer Binary array
@ -218,16 +227,99 @@ public class Bytes {
return bb.array(); return bb.array();
} }
/**
* @param vint Integer to make a vint of.
* @return Vint as bytes array.
*/
public static byte [] vintToBytes(final long vint) {
long i = vint;
int size = WritableUtils.getVIntSize(i);
byte [] result = new byte[size];
int offset = 0;
if (i >= -112 && i <= 127) {
result[offset] = ((byte)i);
return result;
}
offset++;
int len = -112;
if (i < 0) {
i ^= -1L; // take one's complement'
len = -120;
}
long tmp = i;
while (tmp != 0) {
tmp = tmp >> 8;
len--;
}
result[offset++] = (byte)len;
len = (len < -120) ? -(len + 120) : -(len + 112);
for (int idx = len; idx != 0; idx--) {
int shiftbits = (idx - 1) * 8;
long mask = 0xFFL << shiftbits;
result[offset++] = (byte)((i & mask) >> shiftbits);
}
return result;
}
/**
* @param buffer
* @return vint bytes as an integer.
*/
public static long bytesToVint(final byte [] buffer) {
int offset = 0;
byte firstByte = buffer[offset++];
int len = WritableUtils.decodeVIntSize(firstByte);
if (len == 1) {
return firstByte;
}
long i = 0;
for (int idx = 0; idx < len-1; idx++) {
byte b = buffer[offset++];
i = i << 8;
i = i | (b & 0xFF);
}
return (WritableUtils.isNegativeVInt(firstByte) ? (i ^ -1L) : i);
}
/** /**
* Converts a byte array to a long value * Converts a byte array to a long value
* @param bytes * @param bytes
* @return the long value * @return the long value
*/ */
public static long toLong(byte[] bytes) { public static long toLong(byte[] bytes) {
if (bytes == null || bytes.length == 0) { return toLong(bytes, 0);
}
/**
* Converts a byte array to a long value
* @param bytes
* @return the long value
*/
public static long toLong(byte[] bytes, int offset) {
return toLong(bytes, offset, SIZEOF_LONG);
}
/**
* Converts a byte array to a long value
* @param bytes
* @return the long value
*/
public static long toLong(byte[] bytes, int offset,final int length) {
if (bytes == null || bytes.length == 0 ||
(offset + length > bytes.length)) {
return -1L; return -1L;
} }
return ByteBuffer.wrap(bytes).getLong(); long l = 0;
for(int i = offset; i < (offset + length); i++) {
l <<= 8;
l ^= (long)bytes[i] & 0xFF;
}
return l;
} }
/** /**
@ -309,21 +401,29 @@ public class Bytes {
} }
/** /**
* @param left * @param b1
* @param right * @param b2
* @param leftOffset Where to start comparing in the left buffer * @param s1 Where to start comparing in the left buffer
* @param rightOffset Where to start comparing in the right buffer * @param s2 Where to start comparing in the right buffer
* @param leftLength How much to compare from the left buffer * @param l1 How much to compare from the left buffer
* @param rightLength How much to compare from the right buffer * @param l2 How much to compare from the right buffer
* @return 0 if equal, < 0 if left is less than right, etc. * @return 0 if equal, < 0 if left is less than right, etc.
*/ */
public static int compareTo(final byte [] left, final int leftOffset, public static int compareTo(byte[] b1, int s1, int l1,
final int leftLength, final byte [] right, final int rightOffset, byte[] b2, int s2, int l2) {
final int rightLength) { // Bring WritableComparator code local
return WritableComparator.compareBytes(left,leftOffset, leftLength, int end1 = s1 + l1;
right, rightOffset, rightLength); int end2 = s2 + l2;
for (int i = s1, j = s2; i < end1 && j < end2; i++, j++) {
int a = (b1[i] & 0xff);
int b = (b2[j] & 0xff);
if (a != b) {
return a - b;
}
}
return l1 - l2;
} }
/** /**
* @param left * @param left
* @param right * @param right

View File

@ -0,0 +1,32 @@
/**
* Copyright 2009 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.util;
import junit.framework.TestCase;
public class TestBytes extends TestCase {
public void testToLong() throws Exception {
long [] longs = {-1l, 123l, 122232323232l};
for (int i = 0; i < longs.length; i++) {
byte [] b = Bytes.toBytes(longs[i]);
assertEquals(longs[i], Bytes.toLong(b));
}
}
}

View File

@ -22,25 +22,25 @@ package org.apache.hadoop.hbase;
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.io.UnsupportedEncodingException; import java.io.UnsupportedEncodingException;
import java.util.Iterator;
import java.util.Map; import java.util.Map;
import java.util.SortedMap; import java.util.SortedMap;
import java.util.Iterator;
import junit.framework.TestCase; import junit.framework.TestCase;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HColumnDescriptor.CompressionType;
import org.apache.hadoop.hbase.io.BatchUpdate;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Scanner; import org.apache.hadoop.hbase.client.Scanner;
import org.apache.hadoop.hbase.io.BatchUpdate;
import org.apache.hadoop.hbase.io.Cell; import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.io.RowResult; import org.apache.hadoop.hbase.io.RowResult;
import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.regionserver.InternalScanner;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hdfs.MiniDFSCluster;
/** /**
* Abstract base class for test cases. Performs all static initialization * Abstract base class for test cases. Performs all static initialization
@ -93,8 +93,7 @@ public abstract class HBaseTestCase extends TestCase {
private void init() { private void init() {
conf = new HBaseConfiguration(); conf = new HBaseConfiguration();
try { try {
START_KEY = START_KEY = new String(START_KEY_BYTES, HConstants.UTF8_ENCODING);
new String(START_KEY_BYTES, HConstants.UTF8_ENCODING) + PUNCTUATION;
} catch (UnsupportedEncodingException e) { } catch (UnsupportedEncodingException e) {
LOG.fatal("error during initialization", e); LOG.fatal("error during initialization", e);
fail(); fail();
@ -191,14 +190,14 @@ public abstract class HBaseTestCase extends TestCase {
final int versions) { final int versions) {
HTableDescriptor htd = new HTableDescriptor(name); HTableDescriptor htd = new HTableDescriptor(name);
htd.addFamily(new HColumnDescriptor(COLFAMILY_NAME1, versions, htd.addFamily(new HColumnDescriptor(COLFAMILY_NAME1, versions,
CompressionType.NONE, false, false, Integer.MAX_VALUE, HColumnDescriptor.DEFAULT_COMPRESSION, false, false,
HConstants.FOREVER, false)); Integer.MAX_VALUE, HConstants.FOREVER, false));
htd.addFamily(new HColumnDescriptor(COLFAMILY_NAME2, versions, htd.addFamily(new HColumnDescriptor(COLFAMILY_NAME2, versions,
CompressionType.NONE, false, false, Integer.MAX_VALUE, HColumnDescriptor.DEFAULT_COMPRESSION, false, false,
HConstants.FOREVER, false)); Integer.MAX_VALUE, HConstants.FOREVER, false));
htd.addFamily(new HColumnDescriptor(COLFAMILY_NAME3, versions, htd.addFamily(new HColumnDescriptor(COLFAMILY_NAME3, versions,
CompressionType.NONE, false, false, Integer.MAX_VALUE, HColumnDescriptor.DEFAULT_COMPRESSION, false, false,
HConstants.FOREVER, false)); Integer.MAX_VALUE, HConstants.FOREVER, false));
return htd; return htd;
} }
@ -279,9 +278,7 @@ public abstract class HBaseTestCase extends TestCase {
EXIT: for (char c = (char)startKeyBytes[0]; c <= LAST_CHAR; c++) { EXIT: for (char c = (char)startKeyBytes[0]; c <= LAST_CHAR; c++) {
for (char d = secondCharStart; d <= LAST_CHAR; d++) { for (char d = secondCharStart; d <= LAST_CHAR; d++) {
for (char e = thirdCharStart; e <= LAST_CHAR; e++) { for (char e = thirdCharStart; e <= LAST_CHAR; e++) {
byte [] bytes = new byte [] {(byte)c, (byte)d, (byte)e}; byte [] t = new byte [] {(byte)c, (byte)d, (byte)e};
String s = Bytes.toString(bytes) + PUNCTUATION;
byte [] t = Bytes.toBytes(s);
if (endKey != null && endKey.length > 0 if (endKey != null && endKey.length > 0
&& Bytes.compareTo(endKey, t) <= 0) { && Bytes.compareTo(endKey, t) <= 0) {
break EXIT; break EXIT;

View File

@ -30,16 +30,21 @@ import java.util.Random;
import java.util.regex.Matcher; import java.util.regex.Matcher;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Scanner; import org.apache.hadoop.hbase.client.Scanner;
import org.apache.hadoop.hbase.filter.PageRowFilter;
import org.apache.hadoop.hbase.filter.WhileMatchRowFilter;
import org.apache.hadoop.hbase.io.BatchUpdate; import org.apache.hadoop.hbase.io.BatchUpdate;
import org.apache.hadoop.hbase.io.RowResult;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat; import org.apache.hadoop.mapred.FileInputFormat;
@ -52,8 +57,6 @@ import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TextInputFormat; import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapred.TextOutputFormat; import org.apache.hadoop.mapred.TextOutputFormat;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/** /**
@ -88,6 +91,7 @@ public class PerformanceEvaluation implements HConstants {
} }
private static final String RANDOM_READ = "randomRead"; private static final String RANDOM_READ = "randomRead";
private static final String RANDOM_SEEK_SCAN = "randomSeekScan";
private static final String RANDOM_READ_MEM = "randomReadMem"; private static final String RANDOM_READ_MEM = "randomReadMem";
private static final String RANDOM_WRITE = "randomWrite"; private static final String RANDOM_WRITE = "randomWrite";
private static final String SEQUENTIAL_READ = "sequentialRead"; private static final String SEQUENTIAL_READ = "sequentialRead";
@ -96,6 +100,7 @@ public class PerformanceEvaluation implements HConstants {
private static final List<String> COMMANDS = private static final List<String> COMMANDS =
Arrays.asList(new String [] {RANDOM_READ, Arrays.asList(new String [] {RANDOM_READ,
RANDOM_SEEK_SCAN,
RANDOM_READ_MEM, RANDOM_READ_MEM,
RANDOM_WRITE, RANDOM_WRITE,
SEQUENTIAL_READ, SEQUENTIAL_READ,
@ -406,7 +411,36 @@ public class PerformanceEvaluation implements HConstants {
*/ */
abstract String getTestName(); abstract String getTestName();
} }
class RandomSeekScanTest extends Test {
RandomSeekScanTest(final HBaseConfiguration conf, final int startRow,
final int perClientRunRows, final int totalRows, final Status status) {
super(conf, startRow, perClientRunRows, totalRows, status);
}
void testRow(@SuppressWarnings("unused") final int i) throws IOException {
Scanner s = this.table.getScanner(new byte [][] {COLUMN_NAME},
getRandomRow(this.rand, this.totalRows),
new WhileMatchRowFilter(new PageRowFilter(120)));
int count = 0;
for (RowResult rr = null; (rr = s.next()) != null;) {
// LOG.info("" + count++ + " " + rr.toString());
}
s.close();
}
@Override
protected int getReportingPeriod() {
//
return this.perClientRunRows / 100;
}
@Override
String getTestName() {
return "randomSeekScanTest";
}
}
class RandomReadTest extends Test { class RandomReadTest extends Test {
RandomReadTest(final HBaseConfiguration conf, final int startRow, RandomReadTest(final HBaseConfiguration conf, final int startRow,
final int perClientRunRows, final int totalRows, final Status status) { final int perClientRunRows, final int totalRows, final Status status) {
@ -581,6 +615,10 @@ public class PerformanceEvaluation implements HConstants {
Test t = new SequentialWriteTest(this.conf, startRow, perClientRunRows, Test t = new SequentialWriteTest(this.conf, startRow, perClientRunRows,
totalRows, status); totalRows, status);
totalElapsedTime = t.test(); totalElapsedTime = t.test();
} else if (cmd.equals(RANDOM_SEEK_SCAN)) {
Test t = new RandomSeekScanTest(this.conf, startRow, perClientRunRows,
totalRows, status);
totalElapsedTime = t.test();
} else { } else {
new IllegalArgumentException("Invalid command value: " + cmd); new IllegalArgumentException("Invalid command value: " + cmd);
} }
@ -671,6 +709,7 @@ public class PerformanceEvaluation implements HConstants {
System.err.println(" randomRead Run random read test"); System.err.println(" randomRead Run random read test");
System.err.println(" randomReadMem Run random read test where table " + System.err.println(" randomReadMem Run random read test where table " +
"is in memory"); "is in memory");
System.err.println(" randomSeekScan Run random seek and scan 100 test");
System.err.println(" randomWrite Run random write test"); System.err.println(" randomWrite Run random write test");
System.err.println(" sequentialRead Run sequential read test"); System.err.println(" sequentialRead Run sequential read test");
System.err.println(" sequentialWrite Run sequential write test"); System.err.println(" sequentialWrite Run sequential write test");

View File

@ -19,10 +19,11 @@ package org.apache.hadoop.hbase;
import java.io.IOException; import java.io.IOException;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import java.util.Set;
import java.util.TreeSet;
import junit.framework.TestCase; import junit.framework.TestCase;
import org.apache.hadoop.hbase.HStoreKey.StoreKeyByteComparator;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Writables; import org.apache.hadoop.hbase.util.Writables;
@ -38,6 +39,184 @@ public class TestHStoreKey extends TestCase {
super.tearDown(); super.tearDown();
} }
public void testMoreComparisons() throws Exception {
// Root compares
HStoreKey a = new HStoreKey(".META.,,99999999999999");
HStoreKey b = new HStoreKey(".META.,,1");
HStoreKey.StoreKeyComparator c = new HStoreKey.RootStoreKeyComparator();
assertTrue(c.compare(b.getBytes(), a.getBytes()) < 0);
HStoreKey aa = new HStoreKey(".META.,,1");
HStoreKey bb = new HStoreKey(".META.,,1", "info:regioninfo", 1235943454602L);
assertTrue(c.compare(aa.getBytes(), bb.getBytes()) < 0);
// Meta compares
HStoreKey aaa = new HStoreKey("TestScanMultipleVersions,row_0500,1236020145502");
HStoreKey bbb = new HStoreKey("TestScanMultipleVersions,,99999999999999");
c = new HStoreKey.MetaStoreKeyComparator();
assertTrue(c.compare(bbb.getBytes(), aaa.getBytes()) < 0);
HStoreKey aaaa = new HStoreKey("TestScanMultipleVersions,,1236023996656",
"info:regioninfo", 1236024396271L);
assertTrue(c.compare(aaaa.getBytes(), bbb.getBytes()) < 0);
HStoreKey x = new HStoreKey("TestScanMultipleVersions,row_0500,1236034574162",
"", 9223372036854775807L);
HStoreKey y = new HStoreKey("TestScanMultipleVersions,row_0500,1236034574162",
"info:regioninfo", 1236034574912L);
assertTrue(c.compare(x.getBytes(), y.getBytes()) < 0);
comparisons(new HStoreKey.HStoreKeyRootComparator());
comparisons(new HStoreKey.HStoreKeyMetaComparator());
comparisons(new HStoreKey.HStoreKeyComparator());
metacomparisons(new HStoreKey.HStoreKeyRootComparator());
metacomparisons(new HStoreKey.HStoreKeyMetaComparator());
}
/**
* Tests cases where rows keys have characters below the ','.
* See HBASE-832
* @throws IOException
*/
public void testHStoreKeyBorderCases() throws IOException {
HStoreKey rowA = new HStoreKey("testtable,www.hbase.org/,1234",
"", Long.MAX_VALUE);
byte [] rowABytes = Writables.getBytes(rowA);
HStoreKey rowB = new HStoreKey("testtable,www.hbase.org/%20,99999",
"", Long.MAX_VALUE);
byte [] rowBBytes = Writables.getBytes(rowB);
// This is a plain compare on the row. It gives wrong answer for meta table
// row entry.
assertTrue(rowA.compareTo(rowB) > 0);
HStoreKey.MetaStoreKeyComparator c =
new HStoreKey.MetaStoreKeyComparator();
assertTrue(c.compare(rowABytes, rowBBytes) < 0);
rowA = new HStoreKey("testtable,,1234", "", Long.MAX_VALUE);
rowB = new HStoreKey("testtable,$www.hbase.org/,99999", "", Long.MAX_VALUE);
assertTrue(rowA.compareTo(rowB) > 0);
assertTrue(c.compare( Writables.getBytes(rowA), Writables.getBytes(rowB)) < 0);
rowA = new HStoreKey(".META.,testtable,www.hbase.org/,1234,4321", "",
Long.MAX_VALUE);
rowB = new HStoreKey(".META.,testtable,www.hbase.org/%20,99999,99999", "",
Long.MAX_VALUE);
assertTrue(rowA.compareTo(rowB) > 0);
HStoreKey.RootStoreKeyComparator rootComparator =
new HStoreKey.RootStoreKeyComparator();
assertTrue(rootComparator.compare( Writables.getBytes(rowA),
Writables.getBytes(rowB)) < 0);
}
private void metacomparisons(final HStoreKey.HStoreKeyComparator c) {
assertTrue(c.compare(new HStoreKey(".META.,a,,0,1"),
new HStoreKey(".META.,a,,0,1")) == 0);
assertTrue(c.compare(new HStoreKey(".META.,a,,0,1"),
new HStoreKey(".META.,a,,0,2")) < 0);
assertTrue(c.compare(new HStoreKey(".META.,a,,0,2"),
new HStoreKey(".META.,a,,0,1")) > 0);
}
private void comparisons(final HStoreKey.HStoreKeyComparator c) {
assertTrue(c.compare(new HStoreKey(".META.,,1"),
new HStoreKey(".META.,,1")) == 0);
assertTrue(c.compare(new HStoreKey(".META.,,1"),
new HStoreKey(".META.,,2")) < 0);
assertTrue(c.compare(new HStoreKey(".META.,,2"),
new HStoreKey(".META.,,1")) > 0);
}
@SuppressWarnings("unchecked")
public void testBinaryKeys() throws Exception {
Set<HStoreKey> set = new TreeSet<HStoreKey>(new HStoreKey.HStoreKeyComparator());
HStoreKey [] keys = {new HStoreKey("aaaaa,\u0000\u0000,2", getName(), 2),
new HStoreKey("aaaaa,\u0001,3", getName(), 3),
new HStoreKey("aaaaa,,1", getName(), 1),
new HStoreKey("aaaaa,\u1000,5", getName(), 5),
new HStoreKey("aaaaa,a,4", getName(), 4),
new HStoreKey("a,a,0", getName(), 0),
};
// Add to set with bad comparator
for (int i = 0; i < keys.length; i++) {
set.add(keys[i]);
}
// This will output the keys incorrectly.
boolean assertion = false;
int count = 0;
try {
for (HStoreKey k: set) {
assertTrue(count++ == k.getTimestamp());
}
} catch (junit.framework.AssertionFailedError e) {
// Expected
assertion = true;
}
assertTrue(assertion);
// Make set with good comparator
set = new TreeSet<HStoreKey>(new HStoreKey.HStoreKeyMetaComparator());
for (int i = 0; i < keys.length; i++) {
set.add(keys[i]);
}
count = 0;
for (HStoreKey k: set) {
assertTrue(count++ == k.getTimestamp());
}
// Make up -ROOT- table keys.
HStoreKey [] rootKeys = {
new HStoreKey(".META.,aaaaa,\u0000\u0000,0,2", getName(), 2),
new HStoreKey(".META.,aaaaa,\u0001,0,3", getName(), 3),
new HStoreKey(".META.,aaaaa,,0,1", getName(), 1),
new HStoreKey(".META.,aaaaa,\u1000,0,5", getName(), 5),
new HStoreKey(".META.,aaaaa,a,0,4", getName(), 4),
new HStoreKey(".META.,,0", getName(), 0),
};
// This will output the keys incorrectly.
set = new TreeSet<HStoreKey>(new HStoreKey.HStoreKeyMetaComparator());
// Add to set with bad comparator
for (int i = 0; i < keys.length; i++) {
set.add(rootKeys[i]);
}
assertion = false;
count = 0;
try {
for (HStoreKey k: set) {
assertTrue(count++ == k.getTimestamp());
}
} catch (junit.framework.AssertionFailedError e) {
// Expected
assertion = true;
}
// Now with right comparator
set = new TreeSet<HStoreKey>(new HStoreKey.HStoreKeyRootComparator());
// Add to set with bad comparator
for (int i = 0; i < keys.length; i++) {
set.add(rootKeys[i]);
}
count = 0;
for (HStoreKey k: set) {
assertTrue(count++ == k.getTimestamp());
}
}
public void testSerialization() throws IOException {
HStoreKey hsk = new HStoreKey(getName(), getName(), 123);
byte [] b = hsk.getBytes();
HStoreKey hsk2 = HStoreKey.create(b);
assertTrue(hsk.equals(hsk2));
// Test getBytes with empty column
hsk = new HStoreKey(getName());
assertTrue(Bytes.equals(hsk.getBytes(),
HStoreKey.getBytes(Bytes.toBytes(getName()), null,
HConstants.LATEST_TIMESTAMP)));
}
public void testGetBytes() throws IOException {
long now = System.currentTimeMillis();
HStoreKey hsk = new HStoreKey("one", "two", now);
byte [] writablesBytes = Writables.getBytes(hsk);
byte [] selfSerializationBytes = hsk.getBytes();
Bytes.equals(writablesBytes, selfSerializationBytes);
}
public void testByteBuffer() throws Exception { public void testByteBuffer() throws Exception {
final long ts = 123; final long ts = 123;
final byte [] row = Bytes.toBytes("row"); final byte [] row = Bytes.toBytes("row");
@ -61,7 +240,8 @@ public class TestHStoreKey extends TestCase {
byte [] nowBytes = Writables.getBytes(now); byte [] nowBytes = Writables.getBytes(now);
HStoreKey future = new HStoreKey(a, a, timestamp + 10); HStoreKey future = new HStoreKey(a, a, timestamp + 10);
byte [] futureBytes = Writables.getBytes(future); byte [] futureBytes = Writables.getBytes(future);
StoreKeyByteComparator comparator = new HStoreKey.StoreKeyByteComparator(); HStoreKey.StoreKeyComparator comparator =
new HStoreKey.StoreKeyComparator();
assertTrue(past.compareTo(now) > 0); assertTrue(past.compareTo(now) > 0);
assertTrue(comparator.compare(pastBytes, nowBytes) > 0); assertTrue(comparator.compare(pastBytes, nowBytes) > 0);
assertTrue(now.compareTo(now) == 0); assertTrue(now.compareTo(now) == 0);
@ -84,45 +264,4 @@ public class TestHStoreKey extends TestCase {
assertTrue(nocolumn.compareTo(withcolumn) < 0); assertTrue(nocolumn.compareTo(withcolumn) < 0);
assertTrue(comparator.compare(nocolumnBytes, withcolumnBytes) < 0); assertTrue(comparator.compare(nocolumnBytes, withcolumnBytes) < 0);
} }
// /**
// * Tests cases where rows keys have characters below the ','.
// * See HBASE-832
// * @throws IOException
// */
// public void testHStoreKeyBorderCases() throws IOException {
// HRegionInfo info = new HRegionInfo(new HTableDescriptor("testtable"),
// HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY);
//
// HStoreKey rowA = new HStoreKey("testtable,www.hbase.org/,1234",
// "", Long.MAX_VALUE, info);
// byte [] rowABytes = Writables.getBytes(rowA);
// HStoreKey rowB = new HStoreKey("testtable,www.hbase.org/%20,99999",
// "", Long.MAX_VALUE, info);
// byte [] rowBBytes = Writables.getBytes(rowB);
// assertTrue(rowA.compareTo(rowB) > 0);
// HStoreKey.Comparator comparator = new HStoreKey.PlainStoreKeyComparator();
// assertTrue(comparator.compare(rowABytes, rowBBytes) > 0);
//
// rowA = new HStoreKey("testtable,www.hbase.org/,1234",
// "", Long.MAX_VALUE, HRegionInfo.FIRST_META_REGIONINFO);
// rowB = new HStoreKey("testtable,www.hbase.org/%20,99999",
// "", Long.MAX_VALUE, HRegionInfo.FIRST_META_REGIONINFO);
// assertTrue(rowA.compareTo(rowB) < 0);
// assertTrue(comparator.compare(rowABytes, rowBBytes) < 0);
//
// rowA = new HStoreKey("testtable,,1234",
// "", Long.MAX_VALUE, HRegionInfo.FIRST_META_REGIONINFO);
// rowB = new HStoreKey("testtable,$www.hbase.org/,99999",
// "", Long.MAX_VALUE, HRegionInfo.FIRST_META_REGIONINFO);
// assertTrue(rowA.compareTo(rowB) < 0);
// assertTrue(comparator.compare(rowABytes, rowBBytes) < 0);
//
// rowA = new HStoreKey(".META.,testtable,www.hbase.org/,1234,4321",
// "", Long.MAX_VALUE, HRegionInfo.ROOT_REGIONINFO);
// rowB = new HStoreKey(".META.,testtable,www.hbase.org/%20,99999,99999",
// "", Long.MAX_VALUE, HRegionInfo.ROOT_REGIONINFO);
// assertTrue(rowA.compareTo(rowB) > 0);
// assertTrue(comparator.compare(rowABytes, rowBBytes) > 0);
// }
} }

View File

@ -66,7 +66,7 @@ public class TestBatchUpdate extends HBaseClusterTestCase {
desc.addFamily(new HColumnDescriptor(CONTENTS_STR)); desc.addFamily(new HColumnDescriptor(CONTENTS_STR));
desc.addFamily(new HColumnDescriptor(SMALLFAM, desc.addFamily(new HColumnDescriptor(SMALLFAM,
HColumnDescriptor.DEFAULT_VERSIONS, HColumnDescriptor.DEFAULT_VERSIONS,
HColumnDescriptor.DEFAULT_COMPRESSION, HColumnDescriptor.DEFAULT_COMPRESSION,
HColumnDescriptor.DEFAULT_IN_MEMORY, HColumnDescriptor.DEFAULT_IN_MEMORY,
HColumnDescriptor.DEFAULT_BLOCKCACHE, SMALL_LENGTH, HColumnDescriptor.DEFAULT_BLOCKCACHE, SMALL_LENGTH,
HColumnDescriptor.DEFAULT_TTL, HColumnDescriptor.DEFAULT_BLOOMFILTER)); HColumnDescriptor.DEFAULT_TTL, HColumnDescriptor.DEFAULT_BLOOMFILTER));

View File

@ -89,4 +89,4 @@ public class TestStopRowFilter extends TestCase {
assertFalse("Filter a null", filter.filterRowKey(null)); assertFalse("Filter a null", filter.filterRowKey(null));
} }
} }

View File

@ -35,10 +35,10 @@ import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RawLocalFileSystem; import org.apache.hadoop.fs.RawLocalFileSystem;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HStoreKey;
import org.apache.hadoop.hbase.io.hfile.HFile.Reader; import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
import org.apache.hadoop.hbase.io.hfile.HFile.Writer; import org.apache.hadoop.hbase.io.hfile.HFile.Writer;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.RawComparator;
/** /**
* test hfile features. * test hfile features.
@ -129,7 +129,8 @@ public class TestHFile extends TestCase {
void basicWithSomeCodec(String codec) throws IOException { void basicWithSomeCodec(String codec) throws IOException {
Path ncTFile = new Path(ROOT_DIR, "basic.hfile"); Path ncTFile = new Path(ROOT_DIR, "basic.hfile");
FSDataOutputStream fout = createFSOutput(ncTFile); FSDataOutputStream fout = createFSOutput(ncTFile);
Writer writer = new Writer(fout, minBlockSize, codec, null); Writer writer = new Writer(fout, minBlockSize,
Compression.getCompressionAlgorithmByName(codec), null, false);
LOG.info(writer); LOG.info(writer);
writeRecords(writer); writeRecords(writer);
fout.close(); fout.close();
@ -192,7 +193,8 @@ public class TestHFile extends TestCase {
private void metablocks(final String compress) throws Exception { private void metablocks(final String compress) throws Exception {
Path mFile = new Path(ROOT_DIR, "meta.tfile"); Path mFile = new Path(ROOT_DIR, "meta.tfile");
FSDataOutputStream fout = createFSOutput(mFile); FSDataOutputStream fout = createFSOutput(mFile);
Writer writer = new Writer(fout, minBlockSize, compress, null); Writer writer = new Writer(fout, minBlockSize,
Compression.getCompressionAlgorithmByName(compress), null, false);
someTestingWithMetaBlock(writer); someTestingWithMetaBlock(writer);
writer.close(); writer.close();
fout.close(); fout.close();
@ -227,8 +229,8 @@ public class TestHFile extends TestCase {
public void testComparator() throws IOException { public void testComparator() throws IOException {
Path mFile = new Path(ROOT_DIR, "meta.tfile"); Path mFile = new Path(ROOT_DIR, "meta.tfile");
FSDataOutputStream fout = createFSOutput(mFile); FSDataOutputStream fout = createFSOutput(mFile);
Writer writer = new Writer(fout, minBlockSize, "none", Writer writer = new Writer(fout, minBlockSize, null,
new RawComparator<byte []>() { new HStoreKey.StoreKeyComparator() {
@Override @Override
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2,
int l2) { int l2) {
@ -239,7 +241,7 @@ public class TestHFile extends TestCase {
public int compare(byte[] o1, byte[] o2) { public int compare(byte[] o1, byte[] o2) {
return compare(o1, 0, o1.length, o2, 0, o2.length); return compare(o1, 0, o1.length, o2, 0, o2.length);
} }
}); }, false);
writer.append("3".getBytes(), "0".getBytes()); writer.append("3".getBytes(), "0".getBytes());
writer.append("2".getBytes(), "0".getBytes()); writer.append("2".getBytes(), "0".getBytes());
writer.append("1".getBytes(), "0".getBytes()); writer.append("1".getBytes(), "0".getBytes());

View File

@ -168,7 +168,7 @@ public class TestBloomFilters extends HBaseClusterTestCase {
desc.addFamily( desc.addFamily(
new HColumnDescriptor(CONTENTS, // Column name new HColumnDescriptor(CONTENTS, // Column name
1, // Max versions 1, // Max versions
HColumnDescriptor.CompressionType.NONE, // no compression HColumnDescriptor.DEFAULT_COMPRESSION, // no compression
HColumnDescriptor.DEFAULT_IN_MEMORY, // not in memory HColumnDescriptor.DEFAULT_IN_MEMORY, // not in memory
HColumnDescriptor.DEFAULT_BLOCKCACHE, HColumnDescriptor.DEFAULT_BLOCKCACHE,
HColumnDescriptor.DEFAULT_LENGTH, HColumnDescriptor.DEFAULT_LENGTH,

View File

@ -186,7 +186,7 @@ public class TestCompaction extends HBaseTestCase {
private void createSmallerStoreFile(final HRegion region) throws IOException { private void createSmallerStoreFile(final HRegion region) throws IOException {
HRegionIncommon loader = new HRegionIncommon(region); HRegionIncommon loader = new HRegionIncommon(region);
addContent(loader, Bytes.toString(COLUMN_FAMILY), addContent(loader, Bytes.toString(COLUMN_FAMILY),
("bbb" + PUNCTUATION).getBytes(), null); ("bbb").getBytes(), null);
loader.flushcache(); loader.flushcache();
} }
} }

View File

@ -28,12 +28,17 @@ import java.util.TreeMap;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HBaseTestCase; import org.apache.hadoop.hbase.HBaseTestCase;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HServerAddress; import org.apache.hadoop.hbase.HServerAddress;
import org.apache.hadoop.hbase.HStoreKey; import org.apache.hadoop.hbase.HStoreKey;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.filter.StopRowFilter;
import org.apache.hadoop.hbase.filter.WhileMatchRowFilter;
import org.apache.hadoop.hbase.io.BatchUpdate; import org.apache.hadoop.hbase.io.BatchUpdate;
import org.apache.hadoop.hbase.io.Cell; import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.io.hfile.Compression;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Writables; import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
@ -55,10 +60,20 @@ public class TestScanner extends HBaseTestCase {
HConstants.COL_STARTCODE HConstants.COL_STARTCODE
}; };
private static final byte [] ROW_KEY = static final HTableDescriptor TESTTABLEDESC =
HRegionInfo.ROOT_REGIONINFO.getRegionName(); new HTableDescriptor("testscanner");
private static final HRegionInfo REGION_INFO = static {
HRegionInfo.ROOT_REGIONINFO; TESTTABLEDESC.addFamily(new HColumnDescriptor(HConstants.COLUMN_FAMILY,
10, // Ten is arbitrary number. Keep versions to help debuggging.
Compression.Algorithm.NONE.getName(), false, true, 8 * 1024,
Integer.MAX_VALUE, HConstants.FOREVER, false));
}
/** HRegionInfo for root region */
public static final HRegionInfo REGION_INFO =
new HRegionInfo(TESTTABLEDESC, HConstants.EMPTY_BYTE_ARRAY,
HConstants.EMPTY_BYTE_ARRAY);
private static final byte [] ROW_KEY = REGION_INFO.getRegionName();
private static final long START_CODE = Long.MAX_VALUE; private static final long START_CODE = Long.MAX_VALUE;
@ -75,141 +90,23 @@ public class TestScanner extends HBaseTestCase {
super.setUp(); super.setUp();
} }
/** Compare the HRegionInfo we read from HBase to what we stored */
private void validateRegionInfo(byte [] regionBytes) throws IOException {
HRegionInfo info =
(HRegionInfo) Writables.getWritable(regionBytes, new HRegionInfo());
assertEquals(REGION_INFO.getRegionId(), info.getRegionId());
assertEquals(0, info.getStartKey().length);
assertEquals(0, info.getEndKey().length);
assertEquals(0, Bytes.compareTo(info.getRegionName(), REGION_INFO.getRegionName()));
assertEquals(0, info.getTableDesc().compareTo(REGION_INFO.getTableDesc()));
}
/** Use a scanner to get the region info and then validate the results */
private void scan(boolean validateStartcode, String serverName)
throws IOException {
InternalScanner scanner = null;
TreeMap<byte [], Cell> results =
new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
HStoreKey key = new HStoreKey();
byte [][][] scanColumns = {
COLS,
EXPLICIT_COLS
};
for(int i = 0; i < scanColumns.length; i++) {
try {
scanner = r.getScanner(scanColumns[i], FIRST_ROW,
System.currentTimeMillis(), null);
while (scanner.next(key, results)) {
assertTrue(results.containsKey(HConstants.COL_REGIONINFO));
byte [] val = results.get(HConstants.COL_REGIONINFO).getValue();
validateRegionInfo(val);
if(validateStartcode) {
assertTrue(results.containsKey(HConstants.COL_STARTCODE));
val = results.get(HConstants.COL_STARTCODE).getValue();
assertNotNull(val);
assertFalse(val.length == 0);
long startCode = Bytes.toLong(val);
assertEquals(START_CODE, startCode);
}
if(serverName != null) {
assertTrue(results.containsKey(HConstants.COL_SERVER));
val = results.get(HConstants.COL_SERVER).getValue();
assertNotNull(val);
assertFalse(val.length == 0);
String server = Bytes.toString(val);
assertEquals(0, server.compareTo(serverName));
}
results.clear();
}
} finally {
InternalScanner s = scanner;
scanner = null;
if(s != null) {
s.close();
}
}
}
}
/** Use get to retrieve the HRegionInfo and validate it */
private void getRegionInfo() throws IOException {
byte [] bytes = region.get(ROW_KEY, HConstants.COL_REGIONINFO).getValue();
validateRegionInfo(bytes);
}
/**
* HBase-910.
* @throws Exception
*/
public void testScanAndConcurrentFlush() throws Exception {
this.r = createNewHRegion(REGION_INFO.getTableDesc(), null, null);
HRegionIncommon hri = new HRegionIncommon(r);
try {
LOG.info("Added: " +
addContent(hri, Bytes.toString(HConstants.COL_REGIONINFO)));
int count = count(hri, -1);
assertEquals(count, count(hri, 100));
assertEquals(count, count(hri, 0));
assertEquals(count, count(hri, count - 1));
} finally {
this.r.close();
this.r.getLog().closeAndDelete();
shutdownDfs(cluster);
}
}
/*
* @param hri Region
* @param flushIndex At what row we start the flush.
* @return Count of rows found.
* @throws IOException
*/
private int count(final HRegionIncommon hri, final int flushIndex)
throws IOException {
LOG.info("Taking out counting scan");
ScannerIncommon s = hri.getScanner(EXPLICIT_COLS,
HConstants.EMPTY_START_ROW, HConstants.LATEST_TIMESTAMP);
HStoreKey key = new HStoreKey();
SortedMap<byte [], Cell> values =
new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
int count = 0;
while (s.next(key, values)) {
count++;
if (flushIndex == count) {
LOG.info("Starting flush at flush index " + flushIndex);
hri.flushcache();
LOG.info("Finishing flush");
}
}
s.close();
LOG.info("Found " + count + " items");
return count;
}
/** The test! /** The test!
* @throws IOException * @throws IOException
*/ */
public void testScanner() throws IOException { public void testScanner() throws IOException {
try { try {
r = createNewHRegion(REGION_INFO.getTableDesc(), null, null); r = createNewHRegion(TESTTABLEDESC, null, null);
region = new HRegionIncommon(r); region = new HRegionIncommon(r);
// Write information to the meta table // Write information to the meta table
BatchUpdate batchUpdate = new BatchUpdate(ROW_KEY, System.currentTimeMillis()); BatchUpdate batchUpdate =
new BatchUpdate(ROW_KEY, System.currentTimeMillis());
ByteArrayOutputStream byteStream = new ByteArrayOutputStream(); ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
DataOutputStream s = new DataOutputStream(byteStream); DataOutputStream s = new DataOutputStream(byteStream);
HRegionInfo.ROOT_REGIONINFO.write(s); REGION_INFO.write(s);
batchUpdate.put(HConstants.COL_REGIONINFO, byteStream.toByteArray()); batchUpdate.put(HConstants.COL_REGIONINFO, byteStream.toByteArray());
region.commit(batchUpdate); region.commit(batchUpdate);
@ -313,4 +210,157 @@ public class TestScanner extends HBaseTestCase {
shutdownDfs(cluster); shutdownDfs(cluster);
} }
} }
/** Compare the HRegionInfo we read from HBase to what we stored */
private void validateRegionInfo(byte [] regionBytes) throws IOException {
HRegionInfo info =
(HRegionInfo) Writables.getWritable(regionBytes, new HRegionInfo());
assertEquals(REGION_INFO.getRegionId(), info.getRegionId());
assertEquals(0, info.getStartKey().length);
assertEquals(0, info.getEndKey().length);
assertEquals(0, Bytes.compareTo(info.getRegionName(), REGION_INFO.getRegionName()));
assertEquals(0, info.getTableDesc().compareTo(REGION_INFO.getTableDesc()));
}
/** Use a scanner to get the region info and then validate the results */
private void scan(boolean validateStartcode, String serverName)
throws IOException {
InternalScanner scanner = null;
TreeMap<byte [], Cell> results =
new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
HStoreKey key = new HStoreKey();
byte [][][] scanColumns = {
COLS,
EXPLICIT_COLS
};
for(int i = 0; i < scanColumns.length; i++) {
try {
scanner = r.getScanner(scanColumns[i], FIRST_ROW,
System.currentTimeMillis(), null);
while (scanner.next(key, results)) {
assertTrue(results.containsKey(HConstants.COL_REGIONINFO));
byte [] val = results.get(HConstants.COL_REGIONINFO).getValue();
validateRegionInfo(val);
if(validateStartcode) {
assertTrue(results.containsKey(HConstants.COL_STARTCODE));
val = results.get(HConstants.COL_STARTCODE).getValue();
assertNotNull(val);
assertFalse(val.length == 0);
long startCode = Bytes.toLong(val);
assertEquals(START_CODE, startCode);
}
if(serverName != null) {
assertTrue(results.containsKey(HConstants.COL_SERVER));
val = results.get(HConstants.COL_SERVER).getValue();
assertNotNull(val);
assertFalse(val.length == 0);
String server = Bytes.toString(val);
assertEquals(0, server.compareTo(serverName));
}
results.clear();
}
} finally {
InternalScanner s = scanner;
scanner = null;
if(s != null) {
s.close();
}
}
}
}
/** Use get to retrieve the HRegionInfo and validate it */
private void getRegionInfo() throws IOException {
byte [] bytes = region.get(ROW_KEY, HConstants.COL_REGIONINFO).getValue();
validateRegionInfo(bytes);
}
/**
* Test basic stop row filter works.
*/
public void testStopRow() throws Exception {
byte [] startrow = Bytes.toBytes("bbb");
byte [] stoprow = Bytes.toBytes("ccc");
try {
this.r = createNewHRegion(REGION_INFO.getTableDesc(), null, null);
addContent(this.r, HConstants.COLUMN_FAMILY);
InternalScanner s = r.getScanner(HConstants.COLUMN_FAMILY_ARRAY,
startrow, HConstants.LATEST_TIMESTAMP,
new WhileMatchRowFilter(new StopRowFilter(stoprow)));
HStoreKey key = new HStoreKey();
SortedMap<byte [], Cell> results =
new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
int count = 0;
for (boolean first = true; s.next(key, results);) {
if (first) {
assertTrue(Bytes.BYTES_COMPARATOR.compare(startrow, key.getRow()) == 0);
first = false;
}
count++;
}
assertTrue(Bytes.BYTES_COMPARATOR.compare(stoprow, key.getRow()) > 0);
// We got something back.
assertTrue(count > 10);
s.close();
} finally {
this.r.close();
this.r.getLog().closeAndDelete();
shutdownDfs(this.cluster);
}
}
/**
* HBase-910.
* @throws Exception
*/
public void testScanAndConcurrentFlush() throws Exception {
this.r = createNewHRegion(REGION_INFO.getTableDesc(), null, null);
HRegionIncommon hri = new HRegionIncommon(r);
try {
LOG.info("Added: " +
addContent(hri, Bytes.toString(HConstants.COL_REGIONINFO)));
int count = count(hri, -1);
assertEquals(count, count(hri, 100));
assertEquals(count, count(hri, 0));
assertEquals(count, count(hri, count - 1));
} finally {
this.r.close();
this.r.getLog().closeAndDelete();
shutdownDfs(cluster);
}
}
/*
* @param hri Region
* @param flushIndex At what row we start the flush.
* @return Count of rows found.
* @throws IOException
*/
private int count(final HRegionIncommon hri, final int flushIndex)
throws IOException {
LOG.info("Taking out counting scan");
ScannerIncommon s = hri.getScanner(EXPLICIT_COLS,
HConstants.EMPTY_START_ROW, HConstants.LATEST_TIMESTAMP);
HStoreKey key = new HStoreKey();
SortedMap<byte [], Cell> values =
new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
int count = 0;
while (s.next(key, values)) {
count++;
if (flushIndex == count) {
LOG.info("Starting flush at flush index " + flushIndex);
hri.flushcache();
LOG.info("Finishing flush");
}
}
s.close();
LOG.info("Found " + count + " items");
return count;
}
} }

View File

@ -70,7 +70,7 @@ public class TestStoreFile extends HBaseTestCase {
// Make up a directory hierarchy that has a regiondir and familyname. // Make up a directory hierarchy that has a regiondir and familyname.
HFile.Writer writer = StoreFile.getWriter(this.fs, HFile.Writer writer = StoreFile.getWriter(this.fs,
new Path(new Path(this.testDir, "regionname"), "familyname"), new Path(new Path(this.testDir, "regionname"), "familyname"),
2 * 1024, null, null); 2 * 1024, null, null, false);
writeStoreFile(writer); writeStoreFile(writer);
checkHalfHFile(new StoreFile(this.fs, writer.getPath())); checkHalfHFile(new StoreFile(this.fs, writer.getPath()));
} }
@ -107,7 +107,8 @@ public class TestStoreFile extends HBaseTestCase {
Path storedir = new Path(new Path(this.testDir, "regionname"), "familyname"); Path storedir = new Path(new Path(this.testDir, "regionname"), "familyname");
Path dir = new Path(storedir, "1234567890"); Path dir = new Path(storedir, "1234567890");
// Make a store file and write data to it. // Make a store file and write data to it.
HFile.Writer writer = StoreFile.getWriter(this.fs, dir, 8 * 1024, null, null); HFile.Writer writer = StoreFile.getWriter(this.fs, dir, 8 * 1024, null,
null, false);
writeStoreFile(writer); writeStoreFile(writer);
StoreFile hsf = new StoreFile(this.fs, writer.getPath()); StoreFile hsf = new StoreFile(this.fs, writer.getPath());
HFile.Reader reader = hsf.getReader(); HFile.Reader reader = hsf.getReader();

View File

@ -27,7 +27,6 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TimestampTestBase; import org.apache.hadoop.hbase.TimestampTestBase;
import org.apache.hadoop.hbase.HColumnDescriptor.CompressionType;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
/** /**
@ -79,8 +78,8 @@ public class TestTimestamp extends HBaseClusterTestCase {
private HRegion createRegion() throws IOException { private HRegion createRegion() throws IOException {
HTableDescriptor htd = createTableDescriptor(getName()); HTableDescriptor htd = createTableDescriptor(getName());
htd.addFamily(new HColumnDescriptor(COLUMN, VERSIONS, htd.addFamily(new HColumnDescriptor(COLUMN, VERSIONS,
CompressionType.NONE, false, false, Integer.MAX_VALUE, HColumnDescriptor.DEFAULT_COMPRESSION, false, false,
HConstants.FOREVER, false)); Integer.MAX_VALUE, HConstants.FOREVER, false));
return createNewHRegion(htd, null, null); return createNewHRegion(htd, null, null);
} }
} }