HBASE-4081 Issues with HRegion.compactStores methods

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1147350 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2011-07-16 00:28:26 +00:00
parent c8ce6dbd7d
commit 6f476bfd0e
4 changed files with 22 additions and 25 deletions

View File

@ -314,6 +314,7 @@ Release 0.91.0 - Unreleased
HBASE-4054 Usability improvement to HTablePool (Daniel Iancu)
HBASE-4079 HTableUtil - helper class for loading data (Doug Meil via Ted Yu)
HBASE-3871 Speedup LoadIncrementalHFiles by parallelizing HFile splitting
HBASE-4081 Issues with HRegion.compactStores methods (Ming Ma)
TASKS
HBASE-3559 Move report of split to master OFF the heartbeat channel

View File

@ -815,33 +815,27 @@ public class HRegion implements HeapSize { // , Writable{
}
/**
* Called by compaction thread and after region is opened to compact the
* HStores if necessary.
*
* <p>This operation could block for a long time, so don't call it from a
* time-sensitive thread.
*
* Note that no locking is necessary at this level because compaction only
* conflicts with a region split, and that cannot happen because the region
* server does them sequentially and not in parallel.
* This is a helper function that compact all the stores synchronously
* It is used by utilities and testing
*
* @param majorCompaction True to force a major compaction regardless of thresholds
* @return split row if split is needed
* @throws IOException e
*/
byte [] compactStores(final boolean majorCompaction)
void compactStores(final boolean majorCompaction)
throws IOException {
if (majorCompaction) {
this.triggerMajorCompaction();
}
return compactStores();
compactStores();
}
/**
* Compact all the stores and return the split key of the first store that needs
* to be split.
* This is a helper function that compact all the stores synchronously
* It is used by utilities and testing
*
* @throws IOException e
*/
public byte[] compactStores() throws IOException {
public void compactStores() throws IOException {
for(Store s : getStores().values()) {
CompactionRequest cr = s.requestCompaction();
if(cr != null) {
@ -851,12 +845,7 @@ public class HRegion implements HeapSize { // , Writable{
s.finishRequest(cr);
}
}
byte[] splitRow = s.checkSplit();
if (splitRow != null) {
return splitRow;
}
}
return null;
}
/*
@ -3829,7 +3818,7 @@ public class HRegion implements HeapSize { // , Writable{
// nothing
}
byte[] checkSplit() {
public byte[] checkSplit() {
if (this.splitPoint != null) {
return this.splitPoint;
}

View File

@ -146,7 +146,11 @@ public class TestCoprocessorInterface extends HBaseTestCase {
addContent(region, fam3);
region.flushcache();
}
byte [] splitRow = region.compactStores();
region.compactStores();
byte [] splitRow = region.checkSplit();
assertNotNull(splitRow);
HRegion [] regions = split(region, splitRow);
for (int i = 0; i < regions.length; i++) {

View File

@ -1290,7 +1290,8 @@ public class TestHRegion extends HBaseTestCase {
try {
LOG.info("" + addContent(region, fam3));
region.flushcache();
byte [] splitRow = region.compactStores();
region.compactStores();
byte [] splitRow = region.checkSplit();
assertNotNull(splitRow);
LOG.info("SplitRow: " + Bytes.toString(splitRow));
HRegion [] subregions = splitRegion(region, splitRow);
@ -2295,7 +2296,8 @@ public class TestHRegion extends HBaseTestCase {
try {
LOG.info("" + addContent(region, fam3));
region.flushcache();
byte [] splitRow = region.compactStores();
region.compactStores();
byte [] splitRow = region.checkSplit();
assertNotNull(splitRow);
LOG.info("SplitRow: " + Bytes.toString(splitRow));
HRegion [] regions = splitRegion(region, splitRow);
@ -2329,7 +2331,8 @@ public class TestHRegion extends HBaseTestCase {
byte [][] midkeys = new byte [regions.length][];
// To make regions splitable force compaction.
for (int i = 0; i < regions.length; i++) {
midkeys[i] = regions[i].compactStores();
regions[i].compactStores();
midkeys[i] = regions[i].checkSplit();
}
TreeMap<String, HRegion> sortedMap = new TreeMap<String, HRegion>();