HADOOP-1872 TestCompaction assertions fail on some hosts

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk/src/contrib/hbase@585276 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2007-10-16 22:27:30 +00:00
parent a5701cbbb0
commit 04cac52745
1 changed files with 22 additions and 5 deletions

View File

@ -119,11 +119,12 @@ public class TestCompaction extends HBaseTestCase {
Text secondRow = new Text(secondRowBytes); Text secondRow = new Text(secondRowBytes);
bytes = this.r.get(secondRow, COLUMN_FAMILY_TEXT, 100/*Too many*/); bytes = this.r.get(secondRow, COLUMN_FAMILY_TEXT, 100/*Too many*/);
LOG.info("Count of " + secondRow + ": " + bytes.length); LOG.info("Count of " + secondRow + ": " + bytes.length);
// Commented out because fails on an hp+ubuntu though passes on all local // Commented out because fails on an hp+ubuntu single-processor w/ 1G and
// "Intel(R) Pentium(R) 4 CPU 3.20GHz" though passes on all local
// machines and even on hudson. On said machine, its reporting in the // machines and even on hudson. On said machine, its reporting in the
// LOG line above that there are 3 items in row so it should pass the // LOG line above that there are 3 items in row so it should pass the
// below test. // below test.
// assertTrue(bytes.length == 3 || bytes.length == 4); assertTrue(bytes.length == 3 || bytes.length == 4);
// Now add deletes to memcache and then flush it. That will put us over // Now add deletes to memcache and then flush it. That will put us over
// the compaction threshold of 3 store files. Compacting these store files // the compaction threshold of 3 store files. Compacting these store files
@ -136,9 +137,13 @@ public class TestCompaction extends HBaseTestCase {
assertNull(this.r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/)); assertNull(this.r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/));
this.r.flushcache(false); this.r.flushcache(false);
assertNull(this.r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/)); assertNull(this.r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/));
// Commenting out to fix build. Failing on hp+ubunutu combination // Add a bit of data and flush it so we for sure have the compaction limit
// "Intel(R) Pentium(R) 4 CPU 3.20GHz". // for store files. Usually by this time we will have but if compaction
// assertTrue(this.r.needsCompaction()); // included the flush that ran 'concurrently', there may be just the
// compacted store and the flush above when we added deletes. Add more
// content to be certain.
createBunchOfSmallStoreFiles(this.r);
assertTrue(this.r.needsCompaction());
this.r.compactStores(); this.r.compactStores();
// Assert that the first row is still deleted. // Assert that the first row is still deleted.
bytes = this.r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/); bytes = this.r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/);
@ -162,4 +167,16 @@ public class TestCompaction extends HBaseTestCase {
} }
region.flushcache(false); region.flushcache(false);
} }
private void createBunchOfSmallStoreFiles(final HRegion region)
throws IOException {
final String xyz = new String("xyz");
byte [] bytes = xyz.getBytes();
for (int i = 0; i < 1; i++) {
long lid = region.startUpdate(new Text(xyz));
region.put(lid, COLUMN_FAMILY_TEXT, bytes);
region.commit(lid);
region.flushcache(false);
}
}
} }