diff --git a/CHANGES.txt b/CHANGES.txt index 74141ddd5a6..55974fdde43 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -37,6 +37,7 @@ Release 0.19.0 - Unreleased PerformanceEvaluation works as before. HBASE-939 NPE in HStoreKey HBASE-945 Be consistent in use of qualified/unqualified mapfile paths + HBASE-946 Row with 55k deletes timesout scanner lease IMPROVEMENTS HBASE-901 Add a limit to key length, check key and value length on client side diff --git a/src/java/org/apache/hadoop/hbase/HStoreKey.java b/src/java/org/apache/hadoop/hbase/HStoreKey.java index 91dfde7a4fa..e1b47a5ab18 100644 --- a/src/java/org/apache/hadoop/hbase/HStoreKey.java +++ b/src/java/org/apache/hadoop/hbase/HStoreKey.java @@ -32,7 +32,7 @@ import org.apache.hadoop.io.WritableComparator; /** * A Key for a stored row. */ -public class HStoreKey implements WritableComparable { +public class HStoreKey implements WritableComparable { /** * Colon character in UTF-8 */ @@ -332,7 +332,14 @@ public class HStoreKey implements WritableComparable { @Override public boolean equals(Object obj) { - return compareTo(obj) == 0; + HStoreKey other = (HStoreKey)obj; + // Do a quick check. + if (this.row.length != other.row.length || + this.column.length != other.column.length || + this.timestamp != other.timestamp) { + return false; + } + return compareTo(other) == 0; } @Override @@ -345,7 +352,7 @@ public class HStoreKey implements WritableComparable { // Comparable - public int compareTo(Object o) { + public int compareTo(final HStoreKey o) { return compareTo(this.regionInfo, this, (HStoreKey)o); } @@ -514,8 +521,7 @@ public class HStoreKey implements WritableComparable { */ public static int compareTwoRowKeys(HRegionInfo regionInfo, byte[] rowA, byte[] rowB) { - if(regionInfo != null && (regionInfo.isMetaRegion() || - regionInfo.isRootRegion())) { + if (regionInfo != null && regionInfo.isMetaRegion()) { byte[][] keysA = stripStartKeyMeta(rowA); byte[][] KeysB = stripStartKeyMeta(rowB); int rowCompare = Bytes.compareTo(keysA[0], KeysB[0]); diff --git a/src/java/org/apache/hadoop/hbase/HTableDescriptor.java b/src/java/org/apache/hadoop/hbase/HTableDescriptor.java index f34f4e074db..0db629c7161 100644 --- a/src/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ b/src/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -37,7 +37,7 @@ import org.apache.hadoop.io.WritableComparable; * HTableDescriptor contains the name of an HTable, and its * column families. */ -public class HTableDescriptor implements WritableComparable { +public class HTableDescriptor implements WritableComparable { // Changes prior to version 3 were not recorded here. // Version 3 adds metadata as a map where keys and values are byte[]. @@ -52,7 +52,7 @@ public class HTableDescriptor implements WritableComparable { public static final String FAMILIES = "FAMILIES"; public static final ImmutableBytesWritable FAMILIES_KEY = - new ImmutableBytesWritable(Bytes.toBytes(FAMILIES));; + new ImmutableBytesWritable(Bytes.toBytes(FAMILIES)); public static final String MAX_FILESIZE = "MAX_FILESIZE"; public static final ImmutableBytesWritable MAX_FILESIZE_KEY = new ImmutableBytesWritable(Bytes.toBytes(MAX_FILESIZE)); @@ -83,7 +83,8 @@ public class HTableDescriptor implements WritableComparable { public static final int DEFAULT_MEMCACHE_FLUSH_SIZE = 1024*1024*64; - private transient Boolean meta = null; + private volatile Boolean meta = null; + private volatile Boolean root = null; // Key is hash of the family name. private final Map families = @@ -190,7 +191,10 @@ public class HTableDescriptor implements WritableComparable { /** @return true if this is the root region */ public boolean isRootRegion() { - return isSomething(IS_ROOT_KEY, false); + if (this.root == null) { + this.root = isSomething(IS_ROOT_KEY, false)? Boolean.TRUE: Boolean.FALSE; + } + return this.root.booleanValue(); } /** @param isRoot true if this is the root region */ @@ -338,7 +342,7 @@ public class HTableDescriptor implements WritableComparable { public boolean isInMemory() { String value = getValue(HConstants.IN_MEMORY); if (value != null) - return Boolean.valueOf(value); + return Boolean.valueOf(value).booleanValue(); return DEFAULT_IN_MEMORY; } @@ -472,7 +476,7 @@ public class HTableDescriptor implements WritableComparable { @Override public boolean equals(Object obj) { - return compareTo(obj) == 0; + return compareTo((HTableDescriptor)obj) == 0; } @Override @@ -538,8 +542,7 @@ public class HTableDescriptor implements WritableComparable { // Comparable - public int compareTo(Object o) { - HTableDescriptor other = (HTableDescriptor) o; + public int compareTo(final HTableDescriptor other) { int result = Bytes.compareTo(this.name, other.name); if (result == 0) { result = families.size() - other.families.size(); diff --git a/src/java/org/apache/hadoop/hbase/regionserver/BeforeThisStoreKey.java b/src/java/org/apache/hadoop/hbase/regionserver/BeforeThisStoreKey.java index f863cbe27c6..0151afc56d9 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/BeforeThisStoreKey.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/BeforeThisStoreKey.java @@ -48,7 +48,7 @@ public class BeforeThisStoreKey extends HStoreKey { } @Override - public int compareTo(Object o) { + public int compareTo(final HStoreKey o) { int result = this.beforeThisKey.compareTo(o); return result == 0? -1: result; } diff --git a/src/java/org/apache/hadoop/hbase/regionserver/HStoreScanner.java b/src/java/org/apache/hadoop/hbase/regionserver/HStoreScanner.java index dc1931ea125..ce9f9094c9f 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/HStoreScanner.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/HStoreScanner.java @@ -22,8 +22,10 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; import java.util.ArrayList; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.SortedMap; import java.util.TreeMap; @@ -146,7 +148,7 @@ class HStoreScanner implements InternalScanner { // are only keeping rows and columns that match those set on the // scanner and which have delete values. If memory usage becomes a // problem, could redo as bloom filter. - List deletes = new ArrayList(); + Set deletes = new HashSet(); for (int i = 0; i < scanners.length && !filtered; i++) { while ((scanners[i] != null && !filtered @@ -166,16 +168,14 @@ class HStoreScanner implements InternalScanner { // but this had the effect of overwriting newer // values with older ones. So now we only insert // a result if the map does not contain the key. - HStoreKey hsk = new HStoreKey(key.getRow(), HConstants.EMPTY_BYTE_ARRAY, + HStoreKey hsk = new HStoreKey(key.getRow(), + HConstants.EMPTY_BYTE_ARRAY, key.getTimestamp(), this.store.getHRegionInfo()); for (Map.Entry e : resultSets[i].entrySet()) { hsk.setColumn(e.getKey()); if (HLogEdit.isDeleted(e.getValue().getValue())) { - if (!deletes.contains(hsk)) { - // Key changes as we cycle the for loop so add a copy to - // the set of deletes. - deletes.add(new HStoreKey(hsk)); - } + // Only first key encountered is added; deletes is a Set. + deletes.add(new HStoreKey(hsk)); } else if (!deletes.contains(hsk) && !filtered && moreToFollow && diff --git a/src/test/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java b/src/test/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java index 9722f36b395..cc47f077b4c 100644 --- a/src/test/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java +++ b/src/test/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java @@ -25,17 +25,15 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.dfs.MiniDFSCluster; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.io.ImmutableBytesWritable; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.JenkinsHash; -import org.apache.hadoop.io.MapFile; -import org.apache.hadoop.io.SequenceFile; -import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.hbase.HBaseTestCase; - import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HStoreKey; +import org.apache.hadoop.hbase.io.ImmutableBytesWritable; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.io.MapFile; +import org.apache.hadoop.io.SequenceFile; +import org.apache.hadoop.io.WritableComparable; /** * Test HStoreFile */ @@ -234,7 +232,7 @@ public class TestHStoreFile extends HBaseTestCase { first = false; LOG.info("First in bottom: " + previous); } - assertTrue(key.compareTo(midkey) < 0); + assertTrue(key.compareTo((HStoreKey)midkey) < 0); } if (previous != null) { LOG.info("Last in bottom: " + previous.toString()); @@ -244,7 +242,7 @@ public class TestHStoreFile extends HBaseTestCase { HStoreFile.Range.top, midkey, null); first = true; while (top.next(key, value)) { - assertTrue(key.compareTo(midkey) >= 0); + assertTrue(key.compareTo((HStoreKey)midkey) >= 0); if (first) { first = false; assertTrue(Bytes.equals(((HStoreKey)midkey).getRow(), @@ -255,7 +253,7 @@ public class TestHStoreFile extends HBaseTestCase { LOG.info("Last in top: " + key.toString()); top.getClosest(midkey, value); // Assert value is same as key. - assertTrue(Bytes.equals(value.get(), ((HStoreKey) midkey).getRow())); + assertTrue(Bytes.equals(value.get(), ((HStoreKey)midkey).getRow())); // Next test using a midkey that does not exist in the file. // First, do a key that is < than first key. Ensure splits behave @@ -270,7 +268,7 @@ public class TestHStoreFile extends HBaseTestCase { HStoreFile.Range.top, badkey, null); first = true; while (top.next(key, value)) { - assertTrue(key.compareTo(badkey) >= 0); + assertTrue(key.compareTo((HStoreKey)badkey) >= 0); if (first) { first = false; LOG.info("First top when key < bottom: " + key.toString());