HADOOP-1784 delete

Previous commit against this issue applied v3 rather than v4.
This commit applies the difference which should fix the currently
broken build.


git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk/src/contrib/hbase@574376 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2007-09-10 21:16:20 +00:00
parent 5df7ca8c43
commit 223a3e46d0
1 changed files with 5 additions and 2 deletions

View File

@ -108,7 +108,9 @@ public class TestCompaction extends HBaseTestCase {
t2.join();
// Now assert that there are 4 versions of a record only: thats the
// 3 versions that should be in the compacted store and then the one more
// we added when we compacted.
// we added when we flushed. But could be 3 only if the flush happened
// before the compaction started though we tried to have the threads run
// concurrently (On hudson this happens).
byte [] secondRowBytes = new byte[START_KEY_BYTES.length];
System.arraycopy(START_KEY_BYTES, 0, secondRowBytes, 0,
START_KEY_BYTES.length);
@ -116,7 +118,8 @@ public class TestCompaction extends HBaseTestCase {
secondRowBytes[START_KEY_BYTES.length - 1]++;
Text secondRow = new Text(secondRowBytes);
bytes = this.r.get(secondRow, COLUMN_FAMILY_TEXT, 100/*Too many*/);
assertTrue(bytes.length == 4);
LOG.info("Count of " + secondRow + ": " + bytes.length);
assertTrue(bytes.length == 3 || bytes.length == 4);
// Now add deletes to memcache and then flush it. That will put us over
// the compaction threshold of 3 store files. Compacting these store files
// should result in a compacted store file that has no references to the