HBASE-946 Row with 55k deletes timesout scanner lease
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@707159 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
0eb54f38b7
commit
4357226cd4
|
@ -37,6 +37,7 @@ Release 0.19.0 - Unreleased
|
|||
PerformanceEvaluation works as before.
|
||||
HBASE-939 NPE in HStoreKey
|
||||
HBASE-945 Be consistent in use of qualified/unqualified mapfile paths
|
||||
HBASE-946 Row with 55k deletes timesout scanner lease
|
||||
|
||||
IMPROVEMENTS
|
||||
HBASE-901 Add a limit to key length, check key and value length on client side
|
||||
|
|
|
@ -32,7 +32,7 @@ import org.apache.hadoop.io.WritableComparator;
|
|||
/**
|
||||
* A Key for a stored row.
|
||||
*/
|
||||
public class HStoreKey implements WritableComparable {
|
||||
public class HStoreKey implements WritableComparable<HStoreKey> {
|
||||
/**
|
||||
* Colon character in UTF-8
|
||||
*/
|
||||
|
@ -332,7 +332,14 @@ public class HStoreKey implements WritableComparable {
|
|||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
return compareTo(obj) == 0;
|
||||
HStoreKey other = (HStoreKey)obj;
|
||||
// Do a quick check.
|
||||
if (this.row.length != other.row.length ||
|
||||
this.column.length != other.column.length ||
|
||||
this.timestamp != other.timestamp) {
|
||||
return false;
|
||||
}
|
||||
return compareTo(other) == 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -345,7 +352,7 @@ public class HStoreKey implements WritableComparable {
|
|||
|
||||
// Comparable
|
||||
|
||||
public int compareTo(Object o) {
|
||||
public int compareTo(final HStoreKey o) {
|
||||
return compareTo(this.regionInfo, this, (HStoreKey)o);
|
||||
}
|
||||
|
||||
|
@ -514,8 +521,7 @@ public class HStoreKey implements WritableComparable {
|
|||
*/
|
||||
public static int compareTwoRowKeys(HRegionInfo regionInfo,
|
||||
byte[] rowA, byte[] rowB) {
|
||||
if(regionInfo != null && (regionInfo.isMetaRegion() ||
|
||||
regionInfo.isRootRegion())) {
|
||||
if (regionInfo != null && regionInfo.isMetaRegion()) {
|
||||
byte[][] keysA = stripStartKeyMeta(rowA);
|
||||
byte[][] KeysB = stripStartKeyMeta(rowB);
|
||||
int rowCompare = Bytes.compareTo(keysA[0], KeysB[0]);
|
||||
|
|
|
@ -37,7 +37,7 @@ import org.apache.hadoop.io.WritableComparable;
|
|||
* HTableDescriptor contains the name of an HTable, and its
|
||||
* column families.
|
||||
*/
|
||||
public class HTableDescriptor implements WritableComparable {
|
||||
public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
|
||||
|
||||
// Changes prior to version 3 were not recorded here.
|
||||
// Version 3 adds metadata as a map where keys and values are byte[].
|
||||
|
@ -52,7 +52,7 @@ public class HTableDescriptor implements WritableComparable {
|
|||
|
||||
public static final String FAMILIES = "FAMILIES";
|
||||
public static final ImmutableBytesWritable FAMILIES_KEY =
|
||||
new ImmutableBytesWritable(Bytes.toBytes(FAMILIES));;
|
||||
new ImmutableBytesWritable(Bytes.toBytes(FAMILIES));
|
||||
public static final String MAX_FILESIZE = "MAX_FILESIZE";
|
||||
public static final ImmutableBytesWritable MAX_FILESIZE_KEY =
|
||||
new ImmutableBytesWritable(Bytes.toBytes(MAX_FILESIZE));
|
||||
|
@ -83,7 +83,8 @@ public class HTableDescriptor implements WritableComparable {
|
|||
|
||||
public static final int DEFAULT_MEMCACHE_FLUSH_SIZE = 1024*1024*64;
|
||||
|
||||
private transient Boolean meta = null;
|
||||
private volatile Boolean meta = null;
|
||||
private volatile Boolean root = null;
|
||||
|
||||
// Key is hash of the family name.
|
||||
private final Map<Integer, HColumnDescriptor> families =
|
||||
|
@ -190,7 +191,10 @@ public class HTableDescriptor implements WritableComparable {
|
|||
|
||||
/** @return true if this is the root region */
|
||||
public boolean isRootRegion() {
|
||||
return isSomething(IS_ROOT_KEY, false);
|
||||
if (this.root == null) {
|
||||
this.root = isSomething(IS_ROOT_KEY, false)? Boolean.TRUE: Boolean.FALSE;
|
||||
}
|
||||
return this.root.booleanValue();
|
||||
}
|
||||
|
||||
/** @param isRoot true if this is the root region */
|
||||
|
@ -338,7 +342,7 @@ public class HTableDescriptor implements WritableComparable {
|
|||
public boolean isInMemory() {
|
||||
String value = getValue(HConstants.IN_MEMORY);
|
||||
if (value != null)
|
||||
return Boolean.valueOf(value);
|
||||
return Boolean.valueOf(value).booleanValue();
|
||||
return DEFAULT_IN_MEMORY;
|
||||
}
|
||||
|
||||
|
@ -472,7 +476,7 @@ public class HTableDescriptor implements WritableComparable {
|
|||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
return compareTo(obj) == 0;
|
||||
return compareTo((HTableDescriptor)obj) == 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -538,8 +542,7 @@ public class HTableDescriptor implements WritableComparable {
|
|||
|
||||
// Comparable
|
||||
|
||||
public int compareTo(Object o) {
|
||||
HTableDescriptor other = (HTableDescriptor) o;
|
||||
public int compareTo(final HTableDescriptor other) {
|
||||
int result = Bytes.compareTo(this.name, other.name);
|
||||
if (result == 0) {
|
||||
result = families.size() - other.families.size();
|
||||
|
|
|
@ -48,7 +48,7 @@ public class BeforeThisStoreKey extends HStoreKey {
|
|||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(Object o) {
|
||||
public int compareTo(final HStoreKey o) {
|
||||
int result = this.beforeThisKey.compareTo(o);
|
||||
return result == 0? -1: result;
|
||||
}
|
||||
|
|
|
@ -22,8 +22,10 @@ package org.apache.hadoop.hbase.regionserver;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.SortedMap;
|
||||
import java.util.TreeMap;
|
||||
|
||||
|
@ -146,7 +148,7 @@ class HStoreScanner implements InternalScanner {
|
|||
// are only keeping rows and columns that match those set on the
|
||||
// scanner and which have delete values. If memory usage becomes a
|
||||
// problem, could redo as bloom filter.
|
||||
List<HStoreKey> deletes = new ArrayList<HStoreKey>();
|
||||
Set<HStoreKey> deletes = new HashSet<HStoreKey>();
|
||||
for (int i = 0; i < scanners.length && !filtered; i++) {
|
||||
while ((scanners[i] != null
|
||||
&& !filtered
|
||||
|
@ -166,16 +168,14 @@ class HStoreScanner implements InternalScanner {
|
|||
// but this had the effect of overwriting newer
|
||||
// values with older ones. So now we only insert
|
||||
// a result if the map does not contain the key.
|
||||
HStoreKey hsk = new HStoreKey(key.getRow(), HConstants.EMPTY_BYTE_ARRAY,
|
||||
HStoreKey hsk = new HStoreKey(key.getRow(),
|
||||
HConstants.EMPTY_BYTE_ARRAY,
|
||||
key.getTimestamp(), this.store.getHRegionInfo());
|
||||
for (Map.Entry<byte [], Cell> e : resultSets[i].entrySet()) {
|
||||
hsk.setColumn(e.getKey());
|
||||
if (HLogEdit.isDeleted(e.getValue().getValue())) {
|
||||
if (!deletes.contains(hsk)) {
|
||||
// Key changes as we cycle the for loop so add a copy to
|
||||
// the set of deletes.
|
||||
deletes.add(new HStoreKey(hsk));
|
||||
}
|
||||
// Only first key encountered is added; deletes is a Set.
|
||||
deletes.add(new HStoreKey(hsk));
|
||||
} else if (!deletes.contains(hsk) &&
|
||||
!filtered &&
|
||||
moreToFollow &&
|
||||
|
|
|
@ -25,17 +25,15 @@ import org.apache.commons.logging.Log;
|
|||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.dfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.JenkinsHash;
|
||||
import org.apache.hadoop.io.MapFile;
|
||||
import org.apache.hadoop.io.SequenceFile;
|
||||
import org.apache.hadoop.io.WritableComparable;
|
||||
import org.apache.hadoop.hbase.HBaseTestCase;
|
||||
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.io.MapFile;
|
||||
import org.apache.hadoop.io.SequenceFile;
|
||||
import org.apache.hadoop.io.WritableComparable;
|
||||
/**
|
||||
* Test HStoreFile
|
||||
*/
|
||||
|
@ -234,7 +232,7 @@ public class TestHStoreFile extends HBaseTestCase {
|
|||
first = false;
|
||||
LOG.info("First in bottom: " + previous);
|
||||
}
|
||||
assertTrue(key.compareTo(midkey) < 0);
|
||||
assertTrue(key.compareTo((HStoreKey)midkey) < 0);
|
||||
}
|
||||
if (previous != null) {
|
||||
LOG.info("Last in bottom: " + previous.toString());
|
||||
|
@ -244,7 +242,7 @@ public class TestHStoreFile extends HBaseTestCase {
|
|||
HStoreFile.Range.top, midkey, null);
|
||||
first = true;
|
||||
while (top.next(key, value)) {
|
||||
assertTrue(key.compareTo(midkey) >= 0);
|
||||
assertTrue(key.compareTo((HStoreKey)midkey) >= 0);
|
||||
if (first) {
|
||||
first = false;
|
||||
assertTrue(Bytes.equals(((HStoreKey)midkey).getRow(),
|
||||
|
@ -255,7 +253,7 @@ public class TestHStoreFile extends HBaseTestCase {
|
|||
LOG.info("Last in top: " + key.toString());
|
||||
top.getClosest(midkey, value);
|
||||
// Assert value is same as key.
|
||||
assertTrue(Bytes.equals(value.get(), ((HStoreKey) midkey).getRow()));
|
||||
assertTrue(Bytes.equals(value.get(), ((HStoreKey)midkey).getRow()));
|
||||
|
||||
// Next test using a midkey that does not exist in the file.
|
||||
// First, do a key that is < than first key. Ensure splits behave
|
||||
|
@ -270,7 +268,7 @@ public class TestHStoreFile extends HBaseTestCase {
|
|||
HStoreFile.Range.top, badkey, null);
|
||||
first = true;
|
||||
while (top.next(key, value)) {
|
||||
assertTrue(key.compareTo(badkey) >= 0);
|
||||
assertTrue(key.compareTo((HStoreKey)badkey) >= 0);
|
||||
if (first) {
|
||||
first = false;
|
||||
LOG.info("First top when key < bottom: " + key.toString());
|
||||
|
|
Loading…
Reference in New Issue