HBASE-1321 hbase-1234 broke TestCompaction; fix and reenable

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@764563 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2009-04-13 19:01:28 +00:00
parent 44bbcbded7
commit 8aeb808e00
3 changed files with 13 additions and 15 deletions

View File

@ -76,6 +76,7 @@ Release 0.20.0 - Unreleased
HBASE-1310 Off by one error in Bytes.vintToBytes HBASE-1310 Off by one error in Bytes.vintToBytes
HBASE-1202 getRow does not always work when specifying number of versions HBASE-1202 getRow does not always work when specifying number of versions
HBASE-1324 hbase-1234 broke testget2 unit test (and broke the build) HBASE-1324 hbase-1234 broke testget2 unit test (and broke the build)
HBASE-1321 hbase-1234 broke TestCompaction; fix and reenable
IMPROVEMENTS IMPROVEMENTS
HBASE-1089 Add count of regions on filesystem to master UI; add percentage HBASE-1089 Add count of regions on filesystem to master UI; add percentage

View File

@ -882,7 +882,7 @@ public class Store implements HConstants {
if (timesSeen <= maxVersions && !(expired = isExpired(kv, ttl, now))) { if (timesSeen <= maxVersions && !(expired = isExpired(kv, ttl, now))) {
// If this value key is same as a deleted key, skip // If this value key is same as a deleted key, skip
if (lastDelete != null && if (lastDelete != null &&
this.comparator.compare(kv, lastDelete) == 0) { this.comparatorIgnoringType.compare(kv, lastDelete) == 0) {
deleted = true; deleted = true;
} else if (kv.isDeleteType()) { } else if (kv.isDeleteType()) {
// If a deleted value, skip // If a deleted value, skip

View File

@ -25,7 +25,6 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HBaseTestCase; import org.apache.hadoop.hbase.HBaseTestCase;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HStoreKey;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.io.Cell; import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.io.hfile.HFileScanner;
@ -35,20 +34,18 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
/** /**
* Test compactions * Test compactions
*/ */
public class DisableTestCompaction extends HBaseTestCase { public class TestCompaction extends HBaseTestCase {
static final Log LOG = LogFactory.getLog(DisableTestCompaction.class.getName()); static final Log LOG = LogFactory.getLog(TestCompaction.class.getName());
private HRegion r = null; private HRegion r = null;
private static final byte [] COLUMN_FAMILY = COLFAMILY_NAME1; private static final byte [] COLUMN_FAMILY = COLFAMILY_NAME1;
private final byte [] STARTROW = Bytes.toBytes(START_KEY); private final byte [] STARTROW = Bytes.toBytes(START_KEY);
private static final byte [] COLUMN_FAMILY_TEXT = COLUMN_FAMILY; private static final byte [] COLUMN_FAMILY_TEXT = COLUMN_FAMILY;
private static final byte [] COLUMN_FAMILY_TEXT_MINUS_COLON =
Bytes.toBytes(Bytes.toString(COLUMN_FAMILY).substring(0, COLUMN_FAMILY.length - 1));
private static final int COMPACTION_THRESHOLD = MAXVERSIONS; private static final int COMPACTION_THRESHOLD = MAXVERSIONS;
private MiniDFSCluster cluster; private MiniDFSCluster cluster;
/** constructor */ /** constructor */
public DisableTestCompaction() { public TestCompaction() {
super(); super();
// Set cache flush size to 1MB // Set cache flush size to 1MB
@ -124,8 +121,8 @@ public class DisableTestCompaction extends HBaseTestCase {
createSmallerStoreFile(this.r); createSmallerStoreFile(this.r);
r.flushcache(); r.flushcache();
// Assert that the second row is still deleted. // Assert that the second row is still deleted.
// FIX cellValues = Cell.createSingleCellArray(r.get(secondRowBytes,
cellValues = Cell.createSingleCellArray(r.get(secondRowBytes, COLUMN_FAMILY_TEXT, -1, 100 /*Too many*/)); COLUMN_FAMILY_TEXT, -1, 100 /*Too many*/));
assertNull(r.get(secondRowBytes, COLUMN_FAMILY_TEXT, -1, 100 /*Too many*/)); assertNull(r.get(secondRowBytes, COLUMN_FAMILY_TEXT, -1, 100 /*Too many*/));
// Force major compaction. // Force major compaction.
r.compactStores(true); r.compactStores(true);
@ -136,19 +133,19 @@ public class DisableTestCompaction extends HBaseTestCase {
// they were deleted. // they were deleted.
int count = 0; int count = 0;
boolean containsStartRow = false; boolean containsStartRow = false;
for (StoreFile f: this.r.stores. for (StoreFile f: this.r.stores.get(COLUMN_FAMILY_TEXT).getStorefiles().
get(Bytes.mapKey(COLUMN_FAMILY_TEXT_MINUS_COLON)).getStorefiles().values()) { values()) {
HFileScanner scanner = f.getReader().getScanner(); HFileScanner scanner = f.getReader().getScanner();
scanner.seekTo(); scanner.seekTo();
do { do {
HStoreKey key = HStoreKey.create(scanner.getKey()); byte [] row = scanner.getKeyValue().getRow();
if (Bytes.equals(key.getRow(), STARTROW)) { if (Bytes.equals(row, STARTROW)) {
containsStartRow = true; containsStartRow = true;
count++; count++;
} else { } else {
// After major compaction, should be none of these rows in compacted // After major compaction, should be none of these rows in compacted
// file. // file.
assertFalse(Bytes.equals(key.getRow(), secondRowBytes)); assertFalse(Bytes.equals(row, secondRowBytes));
} }
} while(scanner.next()); } while(scanner.next());
} }
@ -168,7 +165,7 @@ public class DisableTestCompaction extends HBaseTestCase {
private int count() throws IOException { private int count() throws IOException {
int count = 0; int count = 0;
for (StoreFile f: this.r.stores. for (StoreFile f: this.r.stores.
get(Bytes.mapKey(COLUMN_FAMILY_TEXT_MINUS_COLON)).getStorefiles().values()) { get(COLUMN_FAMILY_TEXT).getStorefiles().values()) {
HFileScanner scanner = f.getReader().getScanner(); HFileScanner scanner = f.getReader().getScanner();
if (!scanner.seekTo()) { if (!scanner.seekTo()) {
continue; continue;