HBASE-1321 hbase-1234 broke TestCompaction; fix and reenable
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@764563 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
44bbcbded7
commit
8aeb808e00
|
@ -76,6 +76,7 @@ Release 0.20.0 - Unreleased
|
|||
HBASE-1310 Off by one error in Bytes.vintToBytes
|
||||
HBASE-1202 getRow does not always work when specifying number of versions
|
||||
HBASE-1324 hbase-1234 broke testget2 unit test (and broke the build)
|
||||
HBASE-1321 hbase-1234 broke TestCompaction; fix and reenable
|
||||
|
||||
IMPROVEMENTS
|
||||
HBASE-1089 Add count of regions on filesystem to master UI; add percentage
|
||||
|
|
|
@ -882,7 +882,7 @@ public class Store implements HConstants {
|
|||
if (timesSeen <= maxVersions && !(expired = isExpired(kv, ttl, now))) {
|
||||
// If this value key is same as a deleted key, skip
|
||||
if (lastDelete != null &&
|
||||
this.comparator.compare(kv, lastDelete) == 0) {
|
||||
this.comparatorIgnoringType.compare(kv, lastDelete) == 0) {
|
||||
deleted = true;
|
||||
} else if (kv.isDeleteType()) {
|
||||
// If a deleted value, skip
|
||||
|
|
|
@ -25,7 +25,6 @@ import org.apache.commons.logging.Log;
|
|||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.HBaseTestCase;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
||||
|
@ -35,20 +34,18 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
|
|||
/**
|
||||
* Test compactions
|
||||
*/
|
||||
public class DisableTestCompaction extends HBaseTestCase {
|
||||
static final Log LOG = LogFactory.getLog(DisableTestCompaction.class.getName());
|
||||
public class TestCompaction extends HBaseTestCase {
|
||||
static final Log LOG = LogFactory.getLog(TestCompaction.class.getName());
|
||||
private HRegion r = null;
|
||||
private static final byte [] COLUMN_FAMILY = COLFAMILY_NAME1;
|
||||
private final byte [] STARTROW = Bytes.toBytes(START_KEY);
|
||||
private static final byte [] COLUMN_FAMILY_TEXT = COLUMN_FAMILY;
|
||||
private static final byte [] COLUMN_FAMILY_TEXT_MINUS_COLON =
|
||||
Bytes.toBytes(Bytes.toString(COLUMN_FAMILY).substring(0, COLUMN_FAMILY.length - 1));
|
||||
private static final int COMPACTION_THRESHOLD = MAXVERSIONS;
|
||||
|
||||
private MiniDFSCluster cluster;
|
||||
|
||||
/** constructor */
|
||||
public DisableTestCompaction() {
|
||||
public TestCompaction() {
|
||||
super();
|
||||
|
||||
// Set cache flush size to 1MB
|
||||
|
@ -124,8 +121,8 @@ public class DisableTestCompaction extends HBaseTestCase {
|
|||
createSmallerStoreFile(this.r);
|
||||
r.flushcache();
|
||||
// Assert that the second row is still deleted.
|
||||
// FIX
|
||||
cellValues = Cell.createSingleCellArray(r.get(secondRowBytes, COLUMN_FAMILY_TEXT, -1, 100 /*Too many*/));
|
||||
cellValues = Cell.createSingleCellArray(r.get(secondRowBytes,
|
||||
COLUMN_FAMILY_TEXT, -1, 100 /*Too many*/));
|
||||
assertNull(r.get(secondRowBytes, COLUMN_FAMILY_TEXT, -1, 100 /*Too many*/));
|
||||
// Force major compaction.
|
||||
r.compactStores(true);
|
||||
|
@ -136,19 +133,19 @@ public class DisableTestCompaction extends HBaseTestCase {
|
|||
// they were deleted.
|
||||
int count = 0;
|
||||
boolean containsStartRow = false;
|
||||
for (StoreFile f: this.r.stores.
|
||||
get(Bytes.mapKey(COLUMN_FAMILY_TEXT_MINUS_COLON)).getStorefiles().values()) {
|
||||
for (StoreFile f: this.r.stores.get(COLUMN_FAMILY_TEXT).getStorefiles().
|
||||
values()) {
|
||||
HFileScanner scanner = f.getReader().getScanner();
|
||||
scanner.seekTo();
|
||||
do {
|
||||
HStoreKey key = HStoreKey.create(scanner.getKey());
|
||||
if (Bytes.equals(key.getRow(), STARTROW)) {
|
||||
byte [] row = scanner.getKeyValue().getRow();
|
||||
if (Bytes.equals(row, STARTROW)) {
|
||||
containsStartRow = true;
|
||||
count++;
|
||||
} else {
|
||||
// After major compaction, should be none of these rows in compacted
|
||||
// file.
|
||||
assertFalse(Bytes.equals(key.getRow(), secondRowBytes));
|
||||
assertFalse(Bytes.equals(row, secondRowBytes));
|
||||
}
|
||||
} while(scanner.next());
|
||||
}
|
||||
|
@ -168,7 +165,7 @@ public class DisableTestCompaction extends HBaseTestCase {
|
|||
private int count() throws IOException {
|
||||
int count = 0;
|
||||
for (StoreFile f: this.r.stores.
|
||||
get(Bytes.mapKey(COLUMN_FAMILY_TEXT_MINUS_COLON)).getStorefiles().values()) {
|
||||
get(COLUMN_FAMILY_TEXT).getStorefiles().values()) {
|
||||
HFileScanner scanner = f.getReader().getScanner();
|
||||
if (!scanner.seekTo()) {
|
||||
continue;
|
Loading…
Reference in New Issue