HBASE-868 and HBASE-871 Incrementing binary rows cause strange behavior once table splits AND Major compaction periodicity should be specifyable at the column family level, not cluster wide
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@692963 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
2e13c047ab
commit
3b540886df
|
@ -58,6 +58,8 @@ Release 0.18.0 - Unreleased
|
|||
HBASE-864 Deadlock in regionserver
|
||||
HBASE-865 Fix javadoc warnings (Rong-En Fan via Jim Kellerman)
|
||||
HBASE-872 Getting exceptions in shell when creating/disabling tables
|
||||
HBASE-868 Incrementing binary rows cause strange behavior once table
|
||||
splits (Jonathan Gray via Stack)
|
||||
|
||||
IMPROVEMENTS
|
||||
HBASE-801 When a table haven't disable, shell could response in a "user
|
||||
|
@ -78,6 +80,8 @@ Release 0.18.0 - Unreleased
|
|||
(Sishen Freecity via Stack)
|
||||
HBASE-874 deleting a table kills client rpc; no subsequent communication if
|
||||
shell or thrift server, etc. (Jonathan Gray via Jim Kellerman)
|
||||
HBASE-871 Major compaction periodicity should be specifyable at the column
|
||||
family level, not cluster wide (Jonathan Gray via Stack)
|
||||
|
||||
NEW FEATURES
|
||||
HBASE-787 Postgresql to HBase table replication example (Tim Sell via Stack)
|
||||
|
|
|
@ -92,6 +92,9 @@ public interface HConstants {
|
|||
|
||||
/** Parameter name for how often threads should wake up */
|
||||
static final String THREAD_WAKE_FREQUENCY = "hbase.server.thread.wakefrequency";
|
||||
|
||||
/** Parameter name for how often a region should should perform a major compaction */
|
||||
static final String MAJOR_COMPACTION_PERIOD = "hbase.hregion.majorcompaction";
|
||||
|
||||
/** Parameter name for HBase instance root directory */
|
||||
static final String HBASE_DIR = "hbase.rootdir";
|
||||
|
|
|
@ -23,6 +23,7 @@ import java.io.DataInput;
|
|||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.JenkinsHash;
|
||||
import org.apache.hadoop.io.VersionedWritable;
|
||||
|
@ -397,12 +398,12 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable
|
|||
}
|
||||
|
||||
// Compare start keys.
|
||||
result = Bytes.compareTo(this.startKey, other.startKey);
|
||||
result = HStoreKey.compareTwoRowKeys(other, this.startKey, other.startKey);
|
||||
if (result != 0) {
|
||||
return result;
|
||||
}
|
||||
|
||||
// Compare end keys.
|
||||
return Bytes.compareTo(this.endKey, other.endKey);
|
||||
return HStoreKey.compareTwoRowKeys(other, this.endKey, other.endKey);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@ import java.io.IOException;
|
|||
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.io.WritableComparable;
|
||||
import org.apache.hadoop.io.WritableComparator;
|
||||
|
||||
/**
|
||||
* A Key for a stored row.
|
||||
|
@ -73,6 +74,29 @@ public class HStoreKey implements WritableComparable {
|
|||
this(row, Long.MAX_VALUE);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an HStoreKey specifying the row and timestamp
|
||||
* The column and table names default to the empty string
|
||||
*
|
||||
* @param row row key
|
||||
* @param hri
|
||||
*/
|
||||
public HStoreKey(final byte [] row, final HRegionInfo hri) {
|
||||
this(row, HConstants.EMPTY_BYTE_ARRAY, hri);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an HStoreKey specifying the row and timestamp
|
||||
* The column and table names default to the empty string
|
||||
*
|
||||
* @param row row key
|
||||
* @param timestamp timestamp value
|
||||
* @param hri HRegionInfo
|
||||
*/
|
||||
public HStoreKey(final byte [] row, long timestamp, final HRegionInfo hri) {
|
||||
this(row, HConstants.EMPTY_BYTE_ARRAY, timestamp, hri);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an HStoreKey specifying the row and timestamp
|
||||
* The column and table names default to the empty string
|
||||
|
@ -188,7 +212,7 @@ public class HStoreKey implements WritableComparable {
|
|||
* @param other the source key
|
||||
*/
|
||||
public HStoreKey(HStoreKey other) {
|
||||
this(other.row, other.column, other.timestamp);
|
||||
this(other.row, other.column, other.timestamp, other.regionInfo);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -257,7 +281,7 @@ public class HStoreKey implements WritableComparable {
|
|||
* @see #matchesRowFamily(HStoreKey)
|
||||
*/
|
||||
public boolean matchesRowCol(HStoreKey other) {
|
||||
return Bytes.equals(this.row, other.row) &&
|
||||
return HStoreKey.equalsTwoRowKeys(this.regionInfo, this.row, other.row) &&
|
||||
Bytes.equals(column, other.column);
|
||||
}
|
||||
|
||||
|
@ -271,7 +295,7 @@ public class HStoreKey implements WritableComparable {
|
|||
* @see #matchesRowFamily(HStoreKey)
|
||||
*/
|
||||
public boolean matchesWithoutColumn(HStoreKey other) {
|
||||
return Bytes.equals(this.row, other.row) &&
|
||||
return equalsTwoRowKeys(this.regionInfo, this.row, other.row) &&
|
||||
this.timestamp >= other.getTimestamp();
|
||||
}
|
||||
|
||||
|
@ -286,7 +310,7 @@ public class HStoreKey implements WritableComparable {
|
|||
*/
|
||||
public boolean matchesRowFamily(HStoreKey that) {
|
||||
int delimiterIndex = getFamilyDelimiterIndex(this.column);
|
||||
return Bytes.equals(this.row, that.row) &&
|
||||
return equalsTwoRowKeys(this.regionInfo, this.row, that.row) &&
|
||||
Bytes.compareTo(this.column, 0, delimiterIndex, that.column, 0,
|
||||
delimiterIndex) == 0;
|
||||
}
|
||||
|
@ -317,15 +341,19 @@ public class HStoreKey implements WritableComparable {
|
|||
|
||||
/** {@inheritDoc} */
|
||||
public int compareTo(Object o) {
|
||||
HStoreKey other = (HStoreKey)o;
|
||||
int result = compareTwoRowKeys(this.regionInfo, this.row, other.row);
|
||||
return compareTo(this.regionInfo, this, (HStoreKey)o);
|
||||
}
|
||||
|
||||
static int compareTo(final HRegionInfo hri, final HStoreKey left,
|
||||
final HStoreKey right) {
|
||||
int result = compareTwoRowKeys(hri, left.getRow(), right.getRow());
|
||||
if (result != 0) {
|
||||
return result;
|
||||
}
|
||||
result = this.column == null && other.column == null? 0:
|
||||
this.column == null && other.column != null? -1:
|
||||
this.column != null && other.column == null? 1:
|
||||
Bytes.compareTo(this.column, other.column);
|
||||
result = left.getColumn() == null && right.getColumn() == null? 0:
|
||||
left.getColumn() == null && right.getColumn() != null? -1:
|
||||
left.getColumn() != null && right.getColumn() == null? 1:
|
||||
Bytes.compareTo(left.getColumn(), right.getColumn());
|
||||
if (result != 0) {
|
||||
return result;
|
||||
}
|
||||
|
@ -333,9 +361,9 @@ public class HStoreKey implements WritableComparable {
|
|||
// wrong but it is intentional. This way, newer timestamps are first
|
||||
// found when we iterate over a memcache and newer versions are the
|
||||
// first we trip over when reading from a store file.
|
||||
if (this.timestamp < other.timestamp) {
|
||||
if (left.getTimestamp() < right.getTimestamp()) {
|
||||
result = 1;
|
||||
} else if (this.timestamp > other.timestamp) {
|
||||
} else if (left.getTimestamp() > right.getTimestamp()) {
|
||||
result = -1;
|
||||
}
|
||||
return result;
|
||||
|
@ -482,9 +510,8 @@ public class HStoreKey implements WritableComparable {
|
|||
if(rowCompare == 0)
|
||||
rowCompare = Bytes.compareTo(keysA[1], KeysB[1]);
|
||||
return rowCompare;
|
||||
} else {
|
||||
return Bytes.compareTo(rowA, rowB);
|
||||
}
|
||||
return Bytes.compareTo(rowA, rowB);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -513,10 +540,14 @@ public class HStoreKey implements WritableComparable {
|
|||
break;
|
||||
}
|
||||
}
|
||||
byte [] row = new byte[offset];
|
||||
System.arraycopy(rowKey, 0, row, 0,offset);
|
||||
byte [] timestamp = new byte[rowKey.length - offset - 1];
|
||||
System.arraycopy(rowKey, offset+1, timestamp, 0,rowKey.length - offset - 1);
|
||||
byte [] row = rowKey;
|
||||
byte [] timestamp = HConstants.EMPTY_BYTE_ARRAY;
|
||||
if (offset != -1) {
|
||||
row = new byte[offset];
|
||||
System.arraycopy(rowKey, 0, row, 0, offset);
|
||||
timestamp = new byte[rowKey.length - offset - 1];
|
||||
System.arraycopy(rowKey, offset+1, timestamp, 0,rowKey.length - offset - 1);
|
||||
}
|
||||
byte[][] elements = new byte[2][];
|
||||
elements[0] = row;
|
||||
elements[1] = timestamp;
|
||||
|
@ -538,4 +569,20 @@ public class HStoreKey implements WritableComparable {
|
|||
this.column = Bytes.readByteArray(in);
|
||||
this.timestamp = in.readLong();
|
||||
}
|
||||
|
||||
/**
|
||||
* Passed as comparator for memcache and for store files. See HBASE-868.
|
||||
*/
|
||||
public static class HStoreKeyWritableComparator extends WritableComparator {
|
||||
private final HRegionInfo hri;
|
||||
|
||||
public HStoreKeyWritableComparator(final HRegionInfo hri) {
|
||||
super(HStoreKey.class);
|
||||
this.hri = hri;
|
||||
}
|
||||
|
||||
public int compare(final WritableComparable left, final WritableComparable right) {
|
||||
return compareTo(this.hri, (HStoreKey)left, (HStoreKey)right);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.HConstants;
|
|||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
import org.apache.hadoop.hbase.HServerAddress;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.LocalHBaseCluster;
|
||||
import org.apache.hadoop.hbase.MasterNotRunningException;
|
||||
|
@ -323,7 +324,7 @@ public class HConnectionManager implements HConstants {
|
|||
if (currentRegion != null) {
|
||||
byte[] endKey = currentRegion.getEndKey();
|
||||
if (endKey == null ||
|
||||
Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY)) {
|
||||
HStoreKey.equalsTwoRowKeys(currentRegion, endKey, HConstants.EMPTY_BYTE_ARRAY)) {
|
||||
// We have reached the end of the table and we're done
|
||||
break;
|
||||
}
|
||||
|
@ -636,8 +637,10 @@ public class HConnectionManager implements HConstants {
|
|||
// this one. the exception case is when the endkey is EMPTY_START_ROW,
|
||||
// signifying that the region we're checking is actually the last
|
||||
// region in the table.
|
||||
if (Bytes.equals(endKey, HConstants.EMPTY_END_ROW) ||
|
||||
Bytes.compareTo(endKey, row) > 0) {
|
||||
if (HStoreKey.equalsTwoRowKeys(possibleRegion.getRegionInfo(),
|
||||
endKey, HConstants.EMPTY_END_ROW) ||
|
||||
HStoreKey.compareTwoRowKeys(possibleRegion.getRegionInfo(),
|
||||
endKey, row) > 0) {
|
||||
return possibleRegion;
|
||||
}
|
||||
}
|
||||
|
@ -684,7 +687,8 @@ public class HConnectionManager implements HConstants {
|
|||
|
||||
// by nature of the map, we know that the start key has to be <
|
||||
// otherwise it wouldn't be in the headMap.
|
||||
if (Bytes.compareTo(endKey, row) <= 0) {
|
||||
if (HStoreKey.compareTwoRowKeys(possibleRegion.getRegionInfo(),
|
||||
endKey, row) <= 0) {
|
||||
// delete any matching entry
|
||||
HRegionLocation rl =
|
||||
tableLocations.remove(matchingRegions.lastKey());
|
||||
|
|
|
@ -5,6 +5,7 @@ import java.io.IOException;
|
|||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.io.RowResult;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
|
@ -47,9 +48,10 @@ class MetaScanner implements HConstants {
|
|||
HRegionInfo.createRegionName(tableName, null, ZEROES);
|
||||
|
||||
// Scan over each meta region
|
||||
ScannerCallable callable = null;
|
||||
do {
|
||||
ScannerCallable callable = new ScannerCallable(connection,
|
||||
META_TABLE_NAME, COLUMN_FAMILY_ARRAY, startRow, LATEST_TIMESTAMP, null);
|
||||
callable = new ScannerCallable(connection, META_TABLE_NAME,
|
||||
COLUMN_FAMILY_ARRAY, startRow, LATEST_TIMESTAMP, null);
|
||||
// Open scanner
|
||||
connection.getRegionServerWithRetries(callable);
|
||||
try {
|
||||
|
@ -67,7 +69,7 @@ class MetaScanner implements HConstants {
|
|||
callable.setClose();
|
||||
connection.getRegionServerWithRetries(callable);
|
||||
}
|
||||
} while (Bytes.compareTo(startRow, LAST_ROW) != 0);
|
||||
} while (HStoreKey.compareTwoRowKeys(callable.getHRegionInfo(), startRow, LAST_ROW) != 0);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -20,7 +20,9 @@
|
|||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HServerAddress;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
|
||||
|
@ -90,7 +92,8 @@ public class MetaRegion implements Comparable<MetaRegion> {
|
|||
public int compareTo(MetaRegion other) {
|
||||
int result = Bytes.compareTo(this.regionName, other.getRegionName());
|
||||
if(result == 0) {
|
||||
result = Bytes.compareTo(this.startKey, other.getStartKey());
|
||||
result = HStoreKey.compareTwoRowKeys(HRegionInfo.FIRST_META_REGIONINFO,
|
||||
this.startKey, other.getStartKey());
|
||||
if (result == 0) {
|
||||
// Might be on different host?
|
||||
result = this.server.compareTo(other.server);
|
||||
|
@ -98,4 +101,4 @@ public class MetaRegion implements Comparable<MetaRegion> {
|
|||
}
|
||||
return result;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -109,7 +109,7 @@ public class HRegion implements HConstants {
|
|||
static final Log LOG = LogFactory.getLog(HRegion.class);
|
||||
final AtomicBoolean closed = new AtomicBoolean(false);
|
||||
private final RegionHistorian historian;
|
||||
|
||||
|
||||
/**
|
||||
* Merge two HRegions. The regions must be adjacent andmust not overlap.
|
||||
*
|
||||
|
@ -132,12 +132,14 @@ public class HRegion implements HConstants {
|
|||
}
|
||||
// A's start key is null but B's isn't. Assume A comes before B
|
||||
} else if ((srcB.getStartKey() == null) // A is not null but B is
|
||||
|| (Bytes.compareTo(srcA.getStartKey(), srcB.getStartKey()) > 0)) { // A > B
|
||||
|| (HStoreKey.compareTwoRowKeys(srcA.getRegionInfo(),
|
||||
srcA.getStartKey(), srcB.getStartKey()) > 0)) { // A > B
|
||||
a = srcB;
|
||||
b = srcA;
|
||||
}
|
||||
|
||||
if (!Bytes.equals(a.getEndKey(), b.getStartKey())) {
|
||||
if (!HStoreKey.equalsTwoRowKeys(srcA.getRegionInfo(),
|
||||
a.getEndKey(), b.getStartKey())) {
|
||||
throw new IOException("Cannot merge non-adjacent regions");
|
||||
}
|
||||
return merge(a, b);
|
||||
|
@ -181,13 +183,19 @@ public class HRegion implements HConstants {
|
|||
HTableDescriptor tabledesc = a.getTableDesc();
|
||||
HLog log = a.getLog();
|
||||
Path basedir = a.getBaseDir();
|
||||
final byte [] startKey = Bytes.equals(a.getStartKey(), EMPTY_BYTE_ARRAY) ||
|
||||
Bytes.equals(b.getStartKey(), EMPTY_BYTE_ARRAY) ? EMPTY_BYTE_ARRAY :
|
||||
Bytes.compareTo(a.getStartKey(), b.getStartKey()) <= 0 ?
|
||||
final byte [] startKey = HStoreKey.equalsTwoRowKeys(a.getRegionInfo(),
|
||||
a.getStartKey(), EMPTY_BYTE_ARRAY) ||
|
||||
HStoreKey.equalsTwoRowKeys(a.getRegionInfo(),
|
||||
b.getStartKey(), EMPTY_BYTE_ARRAY) ? EMPTY_BYTE_ARRAY :
|
||||
HStoreKey.compareTwoRowKeys(a.getRegionInfo(), a.getStartKey(),
|
||||
b.getStartKey()) <= 0 ?
|
||||
a.getStartKey() : b.getStartKey();
|
||||
final byte [] endKey = Bytes.equals(a.getEndKey(), EMPTY_BYTE_ARRAY) ||
|
||||
Bytes.equals(b.getEndKey(), EMPTY_BYTE_ARRAY) ? EMPTY_BYTE_ARRAY :
|
||||
Bytes.compareTo(a.getEndKey(), b.getEndKey()) <= 0 ?
|
||||
final byte [] endKey = HStoreKey.equalsTwoRowKeys(a.getRegionInfo(),
|
||||
a.getEndKey(), EMPTY_BYTE_ARRAY) ||
|
||||
HStoreKey.equalsTwoRowKeys(b.getRegionInfo(), b.getEndKey(),
|
||||
EMPTY_BYTE_ARRAY) ? EMPTY_BYTE_ARRAY :
|
||||
HStoreKey.compareTwoRowKeys(a.getRegionInfo(), a.getEndKey(),
|
||||
b.getEndKey()) <= 0 ?
|
||||
b.getEndKey() : a.getEndKey();
|
||||
|
||||
HRegionInfo newRegionInfo = new HRegionInfo(tabledesc, startKey, endKey);
|
||||
|
@ -232,7 +240,7 @@ public class HRegion implements HConstants {
|
|||
}
|
||||
for (HStoreFile hsf: srcFiles) {
|
||||
HStoreFile dst = new HStoreFile(conf, fs, basedir,
|
||||
newRegionInfo.getEncodedName(), colFamily, -1, null);
|
||||
newRegionInfo, colFamily, -1, null);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Renaming " + hsf + " to " + dst);
|
||||
}
|
||||
|
@ -718,12 +726,12 @@ public class HRegion implements HConstants {
|
|||
// Add start/end key checking: hbase-428.
|
||||
byte [] startKey = this.regionInfo.getStartKey();
|
||||
byte [] endKey = this.regionInfo.getEndKey();
|
||||
if (Bytes.equals(startKey, midKey)) {
|
||||
if (HStoreKey.equalsTwoRowKeys(this.regionInfo,startKey, midKey)) {
|
||||
LOG.debug("Startkey (" + startKey + ") and midkey + (" +
|
||||
midKey + ") are same, not splitting");
|
||||
return null;
|
||||
}
|
||||
if (Bytes.equals(midKey, endKey)) {
|
||||
if (HStoreKey.equalsTwoRowKeys(this.regionInfo,midKey, endKey)) {
|
||||
LOG.debug("Endkey and midkey are same, not splitting");
|
||||
return null;
|
||||
}
|
||||
|
@ -769,15 +777,15 @@ public class HRegion implements HConstants {
|
|||
// A reference to the bottom half of the hsf store file.
|
||||
HStoreFile.Reference aReference = new HStoreFile.Reference(
|
||||
this.regionInfo.getEncodedName(), h.getFileId(),
|
||||
new HStoreKey(midKey), HStoreFile.Range.bottom);
|
||||
new HStoreKey(midKey, this.regionInfo), HStoreFile.Range.bottom);
|
||||
HStoreFile a = new HStoreFile(this.conf, fs, splits,
|
||||
regionAInfo.getEncodedName(), h.getColFamily(), -1, aReference);
|
||||
regionAInfo, h.getColFamily(), -1, aReference);
|
||||
// Reference to top half of the hsf store file.
|
||||
HStoreFile.Reference bReference = new HStoreFile.Reference(
|
||||
this.regionInfo.getEncodedName(), h.getFileId(),
|
||||
new HStoreKey(midKey), HStoreFile.Range.top);
|
||||
HStoreFile b = new HStoreFile(this.conf, fs, splits,
|
||||
regionBInfo.getEncodedName(), h.getColFamily(), -1, bReference);
|
||||
regionBInfo, h.getColFamily(), -1, bReference);
|
||||
h.splitStoreFile(a, b, this.fs);
|
||||
}
|
||||
|
||||
|
@ -1142,7 +1150,7 @@ public class HRegion implements HConstants {
|
|||
checkRow(row);
|
||||
checkColumn(column);
|
||||
// Don't need a row lock for a simple get
|
||||
HStoreKey key = new HStoreKey(row, column, timestamp);
|
||||
HStoreKey key = new HStoreKey(row, column, timestamp, this.regionInfo);
|
||||
Cell[] result = getStore(column).get(key, numVersions);
|
||||
// Guarantee that we return null instead of a zero-length array,
|
||||
// if there are no results to return.
|
||||
|
@ -1178,7 +1186,7 @@ public class HRegion implements HConstants {
|
|||
checkColumn(column);
|
||||
}
|
||||
}
|
||||
HStoreKey key = new HStoreKey(row, ts);
|
||||
HStoreKey key = new HStoreKey(row, ts, this.regionInfo);
|
||||
Integer lid = getLock(lockid,row);
|
||||
HashSet<HStore> storeSet = new HashSet<HStore>();
|
||||
try {
|
||||
|
@ -1242,14 +1250,14 @@ public class HRegion implements HConstants {
|
|||
byte [] closestKey = store.getRowKeyAtOrBefore(row);
|
||||
// if it happens to be an exact match, we can stop looping
|
||||
if (HStoreKey.equalsTwoRowKeys(regionInfo,row, closestKey)) {
|
||||
key = new HStoreKey(closestKey);
|
||||
key = new HStoreKey(closestKey, this.regionInfo);
|
||||
break;
|
||||
}
|
||||
// otherwise, we need to check if it's the max and move to the next
|
||||
if (closestKey != null
|
||||
&& (key == null || HStoreKey.compareTwoRowKeys(
|
||||
regionInfo,closestKey, key.getRow()) > 0) ) {
|
||||
key = new HStoreKey(closestKey);
|
||||
key = new HStoreKey(closestKey, this.regionInfo);
|
||||
}
|
||||
}
|
||||
if (key == null) {
|
||||
|
@ -1401,7 +1409,8 @@ public class HRegion implements HConstants {
|
|||
try {
|
||||
List<byte []> deletes = null;
|
||||
for (BatchOperation op: b) {
|
||||
HStoreKey key = new HStoreKey(row, op.getColumn(), commitTime);
|
||||
HStoreKey key = new HStoreKey(row, op.getColumn(), commitTime,
|
||||
this.regionInfo);
|
||||
byte[] val = null;
|
||||
if (op.isPut()) {
|
||||
val = op.getValue();
|
||||
|
@ -1524,7 +1533,7 @@ public class HRegion implements HConstants {
|
|||
long now = System.currentTimeMillis();
|
||||
try {
|
||||
for (HStore store : stores.values()) {
|
||||
List<HStoreKey> keys = store.getKeys(new HStoreKey(row, ts),
|
||||
List<HStoreKey> keys = store.getKeys(new HStoreKey(row, ts, this.regionInfo),
|
||||
ALL_VERSIONS, now);
|
||||
TreeMap<HStoreKey, byte []> edits = new TreeMap<HStoreKey, byte []>();
|
||||
for (HStoreKey key: keys) {
|
||||
|
@ -1557,8 +1566,8 @@ public class HRegion implements HConstants {
|
|||
// find the HStore for the column family
|
||||
HStore store = getStore(family);
|
||||
// find all the keys that match our criteria
|
||||
List<HStoreKey> keys = store.getKeys(new HStoreKey(row, timestamp),
|
||||
ALL_VERSIONS, now);
|
||||
List<HStoreKey> keys = store.getKeys(new HStoreKey(row, timestamp,
|
||||
this.regionInfo), ALL_VERSIONS, now);
|
||||
// delete all the cells
|
||||
TreeMap<HStoreKey, byte []> edits = new TreeMap<HStoreKey, byte []>();
|
||||
for (HStoreKey key: keys) {
|
||||
|
@ -1585,7 +1594,7 @@ public class HRegion implements HConstants {
|
|||
final long ts, final int versions)
|
||||
throws IOException {
|
||||
checkReadOnly();
|
||||
HStoreKey origin = new HStoreKey(row, column, ts);
|
||||
HStoreKey origin = new HStoreKey(row, column, ts, this.regionInfo);
|
||||
Set<HStoreKey> keys = getKeys(origin, versions);
|
||||
if (keys.size() > 0) {
|
||||
TreeMap<HStoreKey, byte []> edits = new TreeMap<HStoreKey, byte []>();
|
||||
|
@ -1881,7 +1890,7 @@ public class HRegion implements HConstants {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/** {@inheritDoc} */
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
|
@ -1958,7 +1967,7 @@ public class HRegion implements HConstants {
|
|||
this.resultSets = new TreeMap[scanners.length];
|
||||
this.keys = new HStoreKey[scanners.length];
|
||||
for (int i = 0; i < scanners.length; i++) {
|
||||
keys[i] = new HStoreKey();
|
||||
keys[i] = new HStoreKey(HConstants.EMPTY_BYTE_ARRAY,regionInfo);
|
||||
resultSets[i] = new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
|
||||
if(scanners[i] != null && !scanners[i].next(keys[i], resultSets[i])) {
|
||||
closeScanner(i);
|
||||
|
@ -1983,9 +1992,11 @@ public class HRegion implements HConstants {
|
|||
long chosenTimestamp = -1;
|
||||
for (int i = 0; i < this.keys.length; i++) {
|
||||
if (scanners[i] != null &&
|
||||
(chosenRow == null ||
|
||||
(Bytes.compareTo(keys[i].getRow(), chosenRow) < 0) ||
|
||||
((Bytes.compareTo(keys[i].getRow(), chosenRow) == 0) &&
|
||||
(chosenRow == null ||
|
||||
(HStoreKey.compareTwoRowKeys(regionInfo,
|
||||
keys[i].getRow(), chosenRow) < 0) ||
|
||||
((HStoreKey.compareTwoRowKeys(regionInfo, keys[i].getRow(),
|
||||
chosenRow) == 0) &&
|
||||
(keys[i].getTimestamp() > chosenTimestamp)))) {
|
||||
chosenRow = keys[i].getRow();
|
||||
chosenTimestamp = keys[i].getTimestamp();
|
||||
|
@ -2002,7 +2013,7 @@ public class HRegion implements HConstants {
|
|||
|
||||
for (int i = 0; i < scanners.length; i++) {
|
||||
if (scanners[i] != null &&
|
||||
Bytes.compareTo(keys[i].getRow(), chosenRow) == 0) {
|
||||
HStoreKey.compareTwoRowKeys(regionInfo,keys[i].getRow(), chosenRow) == 0) {
|
||||
// NOTE: We used to do results.putAll(resultSets[i]);
|
||||
// but this had the effect of overwriting newer
|
||||
// values with older ones. So now we only insert
|
||||
|
@ -2024,7 +2035,7 @@ public class HRegion implements HConstants {
|
|||
// If the current scanner is non-null AND has a lower-or-equal
|
||||
// row label, then its timestamp is bad. We need to advance it.
|
||||
while ((scanners[i] != null) &&
|
||||
(Bytes.compareTo(keys[i].getRow(), chosenRow) <= 0)) {
|
||||
(HStoreKey.compareTwoRowKeys(regionInfo,keys[i].getRow(), chosenRow) <= 0)) {
|
||||
resultSets[i].clear();
|
||||
if (!scanners[i].next(keys[i], resultSets[i])) {
|
||||
closeScanner(i);
|
||||
|
@ -2206,7 +2217,8 @@ public class HRegion implements HConstants {
|
|||
byte [] row = r.getRegionName();
|
||||
Integer lid = meta.obtainRowLock(row);
|
||||
try {
|
||||
HStoreKey key = new HStoreKey(row, COL_REGIONINFO, System.currentTimeMillis());
|
||||
HStoreKey key = new HStoreKey(row, COL_REGIONINFO,
|
||||
System.currentTimeMillis(), r.getRegionInfo());
|
||||
TreeMap<HStoreKey, byte[]> edits = new TreeMap<HStoreKey, byte[]>();
|
||||
edits.put(key, Writables.getBytes(r.getRegionInfo()));
|
||||
meta.update(edits);
|
||||
|
@ -2307,9 +2319,9 @@ public class HRegion implements HConstants {
|
|||
*/
|
||||
public static boolean rowIsInRange(HRegionInfo info, final byte [] row) {
|
||||
return ((info.getStartKey().length == 0) ||
|
||||
(Bytes.compareTo(info.getStartKey(), row) <= 0)) &&
|
||||
(HStoreKey.compareTwoRowKeys(info,info.getStartKey(), row) <= 0)) &&
|
||||
((info.getEndKey().length == 0) ||
|
||||
(Bytes.compareTo(info.getEndKey(), row) > 0));
|
||||
(HStoreKey.compareTwoRowKeys(info,info.getEndKey(), row) > 0));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -188,7 +188,12 @@ public class HStore implements HConstants {
|
|||
}
|
||||
this.desiredMaxFileSize = maxFileSize;
|
||||
|
||||
this.majorCompactionTime = conf.getLong("hbase.hregion.majorcompaction", 86400000);
|
||||
this.majorCompactionTime = conf.getLong(HConstants.MAJOR_COMPACTION_PERIOD, 86400000);
|
||||
if (family.getValue(HConstants.MAJOR_COMPACTION_PERIOD) != null) {
|
||||
String strCompactionTime = family.getValue(HConstants.MAJOR_COMPACTION_PERIOD);
|
||||
this.majorCompactionTime = (new Long(strCompactionTime)).longValue();
|
||||
}
|
||||
|
||||
this.maxFilesToCompact = conf.getInt("hbase.hstore.compaction.max", 10);
|
||||
this.storeSize = 0L;
|
||||
|
||||
|
@ -319,7 +324,8 @@ public class HStore implements HConstants {
|
|||
|| !HStoreKey.matchingFamily(family.getName(), column)) {
|
||||
continue;
|
||||
}
|
||||
HStoreKey k = new HStoreKey(key.getRow(), column, val.getTimestamp());
|
||||
HStoreKey k = new HStoreKey(key.getRow(), column, val.getTimestamp(),
|
||||
this.info);
|
||||
reconstructedCache.put(k, val.getVal());
|
||||
editsCount++;
|
||||
// Every 2k edits, tell the reporter we're making progress.
|
||||
|
@ -390,7 +396,7 @@ public class HStore implements HConstants {
|
|||
if (isReference) {
|
||||
reference = HStoreFile.readSplitInfo(p, fs);
|
||||
}
|
||||
curfile = new HStoreFile(conf, fs, basedir, info.getEncodedName(),
|
||||
curfile = new HStoreFile(conf, fs, basedir, this.info,
|
||||
family.getName(), fid, reference);
|
||||
long storeSeqId = -1;
|
||||
try {
|
||||
|
@ -424,7 +430,9 @@ public class HStore implements HConstants {
|
|||
// Try fixing this file.. if we can. Use the hbase version of fix.
|
||||
// Need to remove the old index file first else fix won't go ahead.
|
||||
this.fs.delete(new Path(mapfile, MapFile.INDEX_FILE_NAME), false);
|
||||
long count = MapFile.fix(this.fs, mapfile, HStoreFile.HbaseMapFile.KEY_CLASS,
|
||||
// TODO: This is going to fail if we are to rebuild a file from
|
||||
// meta because it won't have right comparator: HBASE-848.
|
||||
long count = MapFile.fix(this.fs, mapfile, HStoreKey.class,
|
||||
HStoreFile.HbaseMapFile.VALUE_CLASS, false, this.conf);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Fixed index on " + mapfile.toString() + "; had " +
|
||||
|
@ -589,7 +597,7 @@ public class HStore implements HConstants {
|
|||
long now = System.currentTimeMillis();
|
||||
// A. Write the Maps out to the disk
|
||||
HStoreFile flushedFile = new HStoreFile(conf, fs, basedir,
|
||||
info.getEncodedName(), family.getName(), -1L, null);
|
||||
this.info, family.getName(), -1L, null);
|
||||
MapFile.Writer out = flushedFile.getWriter(this.fs, this.compression,
|
||||
this.family.isBloomfilter(), cache.size());
|
||||
out.setIndexInterval(family.getMapFileIndexInterval());
|
||||
|
@ -873,8 +881,7 @@ public class HStore implements HConstants {
|
|||
|
||||
// Step through them, writing to the brand-new MapFile
|
||||
HStoreFile compactedOutputFile = new HStoreFile(conf, fs,
|
||||
this.compactionDir, info.getEncodedName(), family.getName(),
|
||||
-1L, null);
|
||||
this.compactionDir, this.info, family.getName(), -1L, null);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("started compaction of " + rdrs.size() + " files into " +
|
||||
FSUtils.getPath(compactedOutputFile.getMapFilePath()));
|
||||
|
@ -962,7 +969,7 @@ public class HStore implements HConstants {
|
|||
}
|
||||
}
|
||||
HStoreKey sk = keys[smallestKey];
|
||||
if (Bytes.equals(lastRow, sk.getRow())
|
||||
if (HStoreKey.equalsTwoRowKeys(info,lastRow, sk.getRow())
|
||||
&& Bytes.equals(lastColumn, sk.getColumn())) {
|
||||
timesSeen++;
|
||||
} else {
|
||||
|
@ -1045,7 +1052,7 @@ public class HStore implements HConstants {
|
|||
try {
|
||||
// 1. Moving the new MapFile into place.
|
||||
HStoreFile finalCompactedFile = new HStoreFile(conf, fs, basedir,
|
||||
info.getEncodedName(), family.getName(), -1, null);
|
||||
this.info, family.getName(), -1, null);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("moving " + FSUtils.getPath(compactedFile.getMapFilePath()) +
|
||||
" to " + FSUtils.getPath(finalCompactedFile.getMapFilePath()));
|
||||
|
@ -1207,7 +1214,7 @@ public class HStore implements HConstants {
|
|||
}
|
||||
}
|
||||
}
|
||||
} else if (Bytes.compareTo(key.getRow(), readkey.getRow()) < 0) {
|
||||
} else if (HStoreKey.compareTwoRowKeys(info,key.getRow(), readkey.getRow()) < 0) {
|
||||
// if we've crossed into the next row, then we can just stop
|
||||
// iterating
|
||||
break;
|
||||
|
@ -1774,7 +1781,7 @@ public class HStore implements HConstants {
|
|||
}
|
||||
|
||||
static HStoreKey stripTimestamp(HStoreKey key) {
|
||||
return new HStoreKey(key.getRow(), key.getColumn());
|
||||
return new HStoreKey(key.getRow(), key.getColumn(), key.getHRegionInfo());
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1789,7 +1796,7 @@ public class HStore implements HConstants {
|
|||
// if the origin's column is empty, then we're matching any column
|
||||
if (Bytes.equals(origin.getColumn(), HConstants.EMPTY_BYTE_ARRAY)) {
|
||||
// if the row matches, then...
|
||||
if (Bytes.equals(target.getRow(), origin.getRow())) {
|
||||
if (HStoreKey.equalsTwoRowKeys(info, target.getRow(), origin.getRow())) {
|
||||
// check the timestamp
|
||||
return target.getTimestamp() <= origin.getTimestamp();
|
||||
}
|
||||
|
@ -1810,7 +1817,7 @@ public class HStore implements HConstants {
|
|||
// if the origin's column is empty, then we're matching any column
|
||||
if (Bytes.equals(origin.getColumn(), HConstants.EMPTY_BYTE_ARRAY)) {
|
||||
// if the row matches, then...
|
||||
return Bytes.equals(target.getRow(), origin.getRow());
|
||||
return HStoreKey.equalsTwoRowKeys(info, target.getRow(), origin.getRow());
|
||||
}
|
||||
// otherwise, we want to match on row and column
|
||||
return target.matchesRowCol(origin);
|
||||
|
@ -1869,8 +1876,8 @@ public class HStore implements HConstants {
|
|||
if (mk != null) {
|
||||
// if the midkey is the same as the first and last keys, then we cannot
|
||||
// (ever) split this region.
|
||||
if (Bytes.equals(mk.getRow(), firstKey.getRow()) &&
|
||||
Bytes.equals(mk.getRow(), lastKey.getRow())) {
|
||||
if (HStoreKey.equalsTwoRowKeys(info, mk.getRow(), firstKey.getRow()) &&
|
||||
HStoreKey.equalsTwoRowKeys(info, mk.getRow(), lastKey.getRow())) {
|
||||
return null;
|
||||
}
|
||||
return new StoreSize(maxSize, mk.getRow());
|
||||
|
@ -1957,4 +1964,8 @@ public class HStore implements HConstants {
|
|||
return key;
|
||||
}
|
||||
}
|
||||
|
||||
HRegionInfo getHRegionInfo() {
|
||||
return this.info;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -119,25 +119,28 @@ public class HStoreFile implements HConstants {
|
|||
private final HBaseConfiguration conf;
|
||||
private final FileSystem fs;
|
||||
private final Reference reference;
|
||||
private final HRegionInfo hri;
|
||||
|
||||
/**
|
||||
* Constructor that fully initializes the object
|
||||
* @param conf Configuration object
|
||||
* @param basedir qualified path that is parent of region directory
|
||||
* @param encodedRegionName file name friendly name of the region
|
||||
* @param colFamily name of the column family
|
||||
* @param fileId file identifier
|
||||
* @param ref Reference to another HStoreFile.
|
||||
* @param hri The region info for this file (HACK HBASE-868). TODO: Fix.
|
||||
* @throws IOException
|
||||
*/
|
||||
HStoreFile(HBaseConfiguration conf, FileSystem fs, Path basedir,
|
||||
int encodedRegionName, byte [] colFamily, long fileId,
|
||||
final Reference ref) throws IOException {
|
||||
final HRegionInfo hri, byte [] colFamily, long fileId,
|
||||
final Reference ref)
|
||||
throws IOException {
|
||||
this.conf = conf;
|
||||
this.fs = fs;
|
||||
this.basedir = basedir;
|
||||
this.encodedRegionName = encodedRegionName;
|
||||
this.encodedRegionName = hri.getEncodedName();
|
||||
this.colFamily = colFamily;
|
||||
this.hri = hri;
|
||||
|
||||
long id = fileId;
|
||||
if (id == -1) {
|
||||
|
@ -431,7 +434,7 @@ public class HStoreFile implements HConstants {
|
|||
"HStoreFile reference");
|
||||
}
|
||||
return new BloomFilterMapFile.Writer(conf, fs,
|
||||
getMapFilePath().toString(), compression, bloomFilter, nrows);
|
||||
getMapFilePath().toString(), compression, bloomFilter, nrows, this.hri);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -584,7 +587,6 @@ public class HStoreFile implements HConstants {
|
|||
* Hbase customizations of MapFile.
|
||||
*/
|
||||
static class HbaseMapFile extends MapFile {
|
||||
static final Class<? extends Writable> KEY_CLASS = HStoreKey.class;
|
||||
static final Class<? extends Writable> VALUE_CLASS =
|
||||
ImmutableBytesWritable.class;
|
||||
|
||||
|
@ -672,9 +674,10 @@ public class HStoreFile implements HConstants {
|
|||
* @throws IOException
|
||||
*/
|
||||
public HbaseWriter(Configuration conf, FileSystem fs, String dirName,
|
||||
SequenceFile.CompressionType compression)
|
||||
SequenceFile.CompressionType compression, final HRegionInfo hri)
|
||||
throws IOException {
|
||||
super(conf, fs, dirName, KEY_CLASS, VALUE_CLASS, compression);
|
||||
super(conf, fs, dirName, new HStoreKey.HStoreKeyWritableComparator(hri),
|
||||
VALUE_CLASS, compression);
|
||||
// Default for mapfiles is 128. Makes random reads faster if we
|
||||
// have more keys indexed and we're not 'next'-ing around in the
|
||||
// mapfile.
|
||||
|
@ -788,14 +791,15 @@ public class HStoreFile implements HConstants {
|
|||
* @param compression
|
||||
* @param filter
|
||||
* @param nrows
|
||||
* @param hri
|
||||
* @throws IOException
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public Writer(Configuration conf, FileSystem fs, String dirName,
|
||||
SequenceFile.CompressionType compression, final boolean filter,
|
||||
int nrows)
|
||||
int nrows, final HRegionInfo hri)
|
||||
throws IOException {
|
||||
super(conf, fs, dirName, compression);
|
||||
super(conf, fs, dirName, compression, hri);
|
||||
this.dirName = dirName;
|
||||
this.fs = fs;
|
||||
if (filter) {
|
||||
|
|
|
@ -119,8 +119,10 @@ class HStoreScanner implements InternalScanner {
|
|||
for (int i = 0; i < this.keys.length; i++) {
|
||||
if (scanners[i] != null &&
|
||||
(chosenRow == null ||
|
||||
(Bytes.compareTo(keys[i].getRow(), chosenRow) < 0) ||
|
||||
((Bytes.compareTo(keys[i].getRow(), chosenRow) == 0) &&
|
||||
(HStoreKey.compareTwoRowKeys(store.getHRegionInfo(),
|
||||
keys[i].getRow(), chosenRow) < 0) ||
|
||||
((HStoreKey.compareTwoRowKeys(store.getHRegionInfo(),
|
||||
keys[i].getRow(), chosenRow) == 0) &&
|
||||
(keys[i].getTimestamp() > chosenTimestamp)))) {
|
||||
chosenRow = keys[i].getRow();
|
||||
chosenTimestamp = keys[i].getTimestamp();
|
||||
|
@ -150,7 +152,8 @@ class HStoreScanner implements InternalScanner {
|
|||
while ((scanners[i] != null
|
||||
&& !filtered
|
||||
&& moreToFollow)
|
||||
&& (Bytes.compareTo(keys[i].getRow(), chosenRow) == 0)) {
|
||||
&& (HStoreKey.compareTwoRowKeys(store.getHRegionInfo(),
|
||||
keys[i].getRow(), chosenRow) == 0)) {
|
||||
// If we are doing a wild card match or there are multiple
|
||||
// matchers per column, we need to scan all the older versions of
|
||||
// this row to pick up the rest of the family members
|
||||
|
@ -165,7 +168,7 @@ class HStoreScanner implements InternalScanner {
|
|||
// values with older ones. So now we only insert
|
||||
// a result if the map does not contain the key.
|
||||
HStoreKey hsk = new HStoreKey(key.getRow(), HConstants.EMPTY_BYTE_ARRAY,
|
||||
key.getTimestamp());
|
||||
key.getTimestamp(), this.store.getHRegionInfo());
|
||||
for (Map.Entry<byte [], Cell> e : resultSets[i].entrySet()) {
|
||||
hsk.setColumn(e.getKey());
|
||||
if (HLogEdit.isDeleted(e.getValue().getValue())) {
|
||||
|
@ -202,7 +205,8 @@ class HStoreScanner implements InternalScanner {
|
|||
// If the current scanner is non-null AND has a lower-or-equal
|
||||
// row label, then its timestamp is bad. We need to advance it.
|
||||
while ((scanners[i] != null) &&
|
||||
(Bytes.compareTo(keys[i].getRow(), chosenRow) <= 0)) {
|
||||
(HStoreKey.compareTwoRowKeys(store.getHRegionInfo(),
|
||||
keys[i].getRow(), chosenRow) <= 0)) {
|
||||
resultSets[i].clear();
|
||||
if (!scanners[i].next(keys[i], resultSets[i])) {
|
||||
closeScanner(i);
|
||||
|
|
|
@ -61,12 +61,10 @@ class Memcache {
|
|||
// so no additional synchronization is required.
|
||||
|
||||
// The currently active sorted map of edits.
|
||||
private volatile SortedMap<HStoreKey, byte[]> memcache =
|
||||
createSynchronizedSortedMap();
|
||||
private volatile SortedMap<HStoreKey, byte[]> memcache;
|
||||
|
||||
// Snapshot of memcache. Made for flusher.
|
||||
private volatile SortedMap<HStoreKey, byte[]> snapshot =
|
||||
createSynchronizedSortedMap();
|
||||
private volatile SortedMap<HStoreKey, byte[]> snapshot;
|
||||
|
||||
private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
|
||||
|
||||
|
@ -75,7 +73,10 @@ class Memcache {
|
|||
*/
|
||||
public Memcache() {
|
||||
this.ttl = HConstants.FOREVER;
|
||||
this.regionInfo = null;
|
||||
// Set default to be the first meta region.
|
||||
this.regionInfo = HRegionInfo.FIRST_META_REGIONINFO;
|
||||
this.memcache = createSynchronizedSortedMap();
|
||||
this.snapshot = createSynchronizedSortedMap();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -86,14 +87,18 @@ class Memcache {
|
|||
public Memcache(final long ttl, HRegionInfo regionInfo) {
|
||||
this.ttl = ttl;
|
||||
this.regionInfo = regionInfo;
|
||||
this.memcache = createSynchronizedSortedMap();
|
||||
this.snapshot = createSynchronizedSortedMap();
|
||||
}
|
||||
|
||||
/*
|
||||
* Utility method.
|
||||
* Utility method using HSKWritableComparator
|
||||
* @return sycnhronized sorted map of HStoreKey to byte arrays.
|
||||
*/
|
||||
private static SortedMap<HStoreKey, byte[]> createSynchronizedSortedMap() {
|
||||
return Collections.synchronizedSortedMap(new TreeMap<HStoreKey, byte []>());
|
||||
private SortedMap<HStoreKey, byte[]> createSynchronizedSortedMap() {
|
||||
return Collections.synchronizedSortedMap(
|
||||
new TreeMap<HStoreKey, byte []>(
|
||||
new HStoreKey.HStoreKeyWritableComparator(this.regionInfo)));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -229,7 +234,7 @@ class Memcache {
|
|||
if (b == null) {
|
||||
return a;
|
||||
}
|
||||
return Bytes.compareTo(a, b) <= 0? a: b;
|
||||
return HStoreKey.compareTwoRowKeys(regionInfo, a, b) <= 0? a: b;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -259,14 +264,13 @@ class Memcache {
|
|||
synchronized (map) {
|
||||
// Make an HSK with maximum timestamp so we get past most of the current
|
||||
// rows cell entries.
|
||||
HStoreKey hsk = new HStoreKey(row, HConstants.LATEST_TIMESTAMP);
|
||||
HStoreKey hsk = new HStoreKey(row, HConstants.LATEST_TIMESTAMP, this.regionInfo);
|
||||
SortedMap<HStoreKey, byte []> tailMap = map.tailMap(hsk);
|
||||
// Iterate until we fall into the next row; i.e. move off current row
|
||||
for (Map.Entry<HStoreKey, byte []> es: tailMap.entrySet()) {
|
||||
HStoreKey itKey = es.getKey();
|
||||
if (Bytes.compareTo(itKey.getRow(), row) <= 0) {
|
||||
if (HStoreKey.compareTwoRowKeys(regionInfo, itKey.getRow(), row) <= 0)
|
||||
continue;
|
||||
}
|
||||
// Note: Not suppressing deletes or expired cells.
|
||||
result = itKey.getRow();
|
||||
break;
|
||||
|
@ -330,13 +334,15 @@ class Memcache {
|
|||
}
|
||||
}
|
||||
}
|
||||
} else if (Bytes.compareTo(key.getRow(), itKey.getRow()) < 0) {
|
||||
} else if (HStoreKey.compareTwoRowKeys(regionInfo, key.getRow(),
|
||||
itKey.getRow()) < 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
// Remove expired victims from the map.
|
||||
for (HStoreKey v: victims)
|
||||
for (HStoreKey v: victims) {
|
||||
map.remove(v);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -377,8 +383,8 @@ class Memcache {
|
|||
final Set<HStoreKey> deletes) {
|
||||
// We want the earliest possible to start searching from. Start before
|
||||
// the candidate key in case it turns out a delete came in later.
|
||||
HStoreKey search_key = candidateKeys.isEmpty()? new HStoreKey(row):
|
||||
new HStoreKey(candidateKeys.firstKey().getRow());
|
||||
HStoreKey search_key = candidateKeys.isEmpty()? new HStoreKey(row, this.regionInfo):
|
||||
new HStoreKey(candidateKeys.firstKey().getRow(), this.regionInfo);
|
||||
List<HStoreKey> victims = new ArrayList<HStoreKey>();
|
||||
long now = System.currentTimeMillis();
|
||||
|
||||
|
@ -409,7 +415,8 @@ class Memcache {
|
|||
deletedOrExpiredRow = found_key;
|
||||
}
|
||||
} else {
|
||||
if (HStore.notExpiredAndNotInDeletes(this.ttl, found_key, now, deletes)) {
|
||||
if (HStore.notExpiredAndNotInDeletes(this.ttl,
|
||||
found_key, now, deletes)) {
|
||||
candidateKeys.put(stripTimestamp(found_key),
|
||||
new Long(found_key.getTimestamp()));
|
||||
} else {
|
||||
|
@ -469,7 +476,8 @@ class Memcache {
|
|||
// not a delete record.
|
||||
boolean deleted = HLogEdit.isDeleted(headMap.get(found_key));
|
||||
if (lastRowFound != null &&
|
||||
!Bytes.equals(lastRowFound, found_key.getRow()) && !deleted) {
|
||||
!HStoreKey.equalsTwoRowKeys(regionInfo, lastRowFound,
|
||||
found_key.getRow()) && !deleted) {
|
||||
break;
|
||||
}
|
||||
// If this isn't a delete, record it as a candidate key. Also
|
||||
|
@ -496,7 +504,7 @@ class Memcache {
|
|||
// smaller acceptable candidate keys would have caused us to start
|
||||
// our search earlier in the list, and we wouldn't be searching here.
|
||||
SortedMap<HStoreKey, byte[]> thisRowTailMap =
|
||||
headMap.tailMap(new HStoreKey(headMap.lastKey().getRow()));
|
||||
headMap.tailMap(new HStoreKey(headMap.lastKey().getRow(), this.regionInfo));
|
||||
Iterator<HStoreKey> key_iterator = thisRowTailMap.keySet().iterator();
|
||||
do {
|
||||
HStoreKey found_key = key_iterator.next();
|
||||
|
@ -521,7 +529,7 @@ class Memcache {
|
|||
}
|
||||
|
||||
static HStoreKey stripTimestamp(HStoreKey key) {
|
||||
return new HStoreKey(key.getRow(), key.getColumn());
|
||||
return new HStoreKey(key.getRow(), key.getColumn(), key.getHRegionInfo());
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -636,7 +644,8 @@ class Memcache {
|
|||
if (origin.getColumn() != null && origin.getColumn().length == 0) {
|
||||
// if the current and origin row don't match, then we can jump
|
||||
// out of the loop entirely.
|
||||
if (!Bytes.equals(key.getRow(), origin.getRow())) {
|
||||
if (!HStoreKey.equalsTwoRowKeys(regionInfo, key.getRow(),
|
||||
origin.getRow())) {
|
||||
break;
|
||||
}
|
||||
// if the rows match but the timestamp is newer, skip it so we can
|
||||
|
@ -788,7 +797,6 @@ class Memcache {
|
|||
results.put(column, c);
|
||||
}
|
||||
this.currentRow = getNextRow(this.currentRow);
|
||||
|
||||
}
|
||||
// Set the timestamp to the largest one for the row if we would otherwise
|
||||
// return HConstants.LATEST_TIMESTAMP
|
||||
|
|
|
@ -101,7 +101,7 @@ implements ChangedReadersObserver {
|
|||
|
||||
// Advance the readers to the first pos.
|
||||
for (i = 0; i < readers.length; i++) {
|
||||
keys[i] = new HStoreKey();
|
||||
keys[i] = new HStoreKey(HConstants.EMPTY_BYTE_ARRAY, this.store.getHRegionInfo());
|
||||
if (firstRow != null && firstRow.length != 0) {
|
||||
if (findFirstRow(i, firstRow)) {
|
||||
continue;
|
||||
|
@ -159,7 +159,8 @@ implements ChangedReadersObserver {
|
|||
for (int i = 0; i < keys.length; i++) {
|
||||
// Fetch the data
|
||||
while ((keys[i] != null)
|
||||
&& (Bytes.compareTo(keys[i].getRow(), viableRow.getRow()) == 0)) {
|
||||
&& (HStoreKey.compareTwoRowKeys(store.getHRegionInfo(),
|
||||
keys[i].getRow(), viableRow.getRow()) == 0)) {
|
||||
|
||||
// If we are doing a wild card match or there are multiple matchers
|
||||
// per column, we need to scan all the older versions of this row
|
||||
|
@ -187,7 +188,8 @@ implements ChangedReadersObserver {
|
|||
// Advance the current scanner beyond the chosen row, to
|
||||
// a valid timestamp, so we're ready next time.
|
||||
while ((keys[i] != null)
|
||||
&& ((Bytes.compareTo(keys[i].getRow(), viableRow.getRow()) <= 0)
|
||||
&& ((HStoreKey.compareTwoRowKeys(store.getHRegionInfo(),
|
||||
keys[i].getRow(), viableRow.getRow()) <= 0)
|
||||
|| (keys[i].getTimestamp() > this.timestamp)
|
||||
|| (! columnMatch(i)))) {
|
||||
getNext(i);
|
||||
|
@ -246,8 +248,10 @@ implements ChangedReadersObserver {
|
|||
// column matches and the timestamp of the row is less than or equal
|
||||
// to this.timestamp, so we do not need to test that here
|
||||
&& ((viableRow == null)
|
||||
|| (Bytes.compareTo(keys[i].getRow(), viableRow) < 0)
|
||||
|| ((Bytes.compareTo(keys[i].getRow(), viableRow) == 0)
|
||||
|| (HStoreKey.compareTwoRowKeys(store.getHRegionInfo(),
|
||||
keys[i].getRow(), viableRow) < 0)
|
||||
|| ((HStoreKey.compareTwoRowKeys(store.getHRegionInfo(),
|
||||
keys[i].getRow(), viableRow) == 0)
|
||||
&& (keys[i].getTimestamp() > viableTimestamp)))) {
|
||||
if (ttl == HConstants.FOREVER || now < keys[i].getTimestamp() + ttl) {
|
||||
viableRow = keys[i].getRow();
|
||||
|
@ -273,7 +277,7 @@ implements ChangedReadersObserver {
|
|||
private boolean findFirstRow(int i, final byte [] firstRow) throws IOException {
|
||||
ImmutableBytesWritable ibw = new ImmutableBytesWritable();
|
||||
HStoreKey firstKey
|
||||
= (HStoreKey)readers[i].getClosest(new HStoreKey(firstRow), ibw);
|
||||
= (HStoreKey)readers[i].getClosest(new HStoreKey(firstRow, this.store.getHRegionInfo()), ibw);
|
||||
if (firstKey == null) {
|
||||
// Didn't find it. Close the scanner and return TRUE
|
||||
closeSubScanner(i);
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.apache.hadoop.io.WritableComparable;
|
|||
import org.apache.hadoop.hbase.HBaseTestCase;
|
||||
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
/**
|
||||
* Test HStoreFile
|
||||
|
@ -126,7 +127,7 @@ public class TestHStoreFile extends HBaseTestCase {
|
|||
throws IOException {
|
||||
// Make a store file and write data to it.
|
||||
HStoreFile hsf = new HStoreFile(this.conf, this.fs, this.dir,
|
||||
JenkinsHash.hash(Bytes.toBytes(getName())),
|
||||
HRegionInfo.FIRST_META_REGIONINFO,
|
||||
Bytes.toBytes("colfamily"), 1234567890L, null);
|
||||
MapFile.Writer writer =
|
||||
hsf.getWriter(this.fs, SequenceFile.CompressionType.NONE, false, 0);
|
||||
|
@ -145,7 +146,7 @@ public class TestHStoreFile extends HBaseTestCase {
|
|||
midkey, HStoreFile.Range.top);
|
||||
HStoreFile refHsf = new HStoreFile(this.conf, this.fs,
|
||||
new Path(DIR, getName()),
|
||||
JenkinsHash.hash(Bytes.toBytes(getName() + "_reference")),
|
||||
HRegionInfo.FIRST_META_REGIONINFO,
|
||||
hsf.getColFamily(), 456, reference);
|
||||
// Assert that reference files are written and that we can write and
|
||||
// read the info reference file at least.
|
||||
|
|
Loading…
Reference in New Issue