HBASE-618 We always compact if 2 files, regardless of the compaction threshold setting

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@654169 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2008-05-07 16:58:10 +00:00
parent 2eef957d36
commit 20ee89aa4c
3 changed files with 25 additions and 16 deletions

View File

@ -31,6 +31,7 @@ Hbase Change Log
HBASE-607 MultiRegionTable.makeMultiRegionTable is not deterministic enough
for regression tests
HBASE-405 TIF and TOF use log4j directly rather than apache commons-logging
HBASE-618 We always compact if 2 files, regardless of the compaction threshold setting
IMPROVEMENTS
HBASE-559 MR example job to count table rows

View File

@ -860,7 +860,7 @@ public class HRegion implements HConstants {
return midKey;
}
}
LOG.info("starting compaction on region " + getRegionName());
LOG.info("checking compaction on region " + getRegionName());
long startTime = System.currentTimeMillis();
doRegionCompactionPrep();
for (HStore store: stores.values()) {
@ -870,7 +870,7 @@ public class HRegion implements HConstants {
}
}
doRegionCompactionCleanup();
LOG.info("compaction completed on region " + getRegionName() + " in " +
LOG.info("compaction checking completed on region " + getRegionName() + " in " +
StringUtils.formatTimeDiff(System.currentTimeMillis(), startTime));
} finally {
synchronized (writestate) {

View File

@ -26,6 +26,7 @@ import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Collection;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
@ -737,6 +738,21 @@ public class HStore implements HConstants {
//////////////////////////////////////////////////////////////////////////////
// Compaction
//////////////////////////////////////////////////////////////////////////////
/*
* @param files
* @return True if any of the files in <code>files</code> are References.
*/
private boolean hasReferences(Collection<HStoreFile> files) {
if (files != null && files.size() > 0) {
for (HStoreFile hsf: files) {
if (hsf.isReference()) {
return true;
}
}
}
return false;
}
/**
* Compact the back-HStores. This method may take some time, so the calling
@ -763,30 +779,22 @@ public class HStore implements HConstants {
List<HStoreFile> filesToCompact = null;
synchronized (storefiles) {
filesToCompact = new ArrayList<HStoreFile>(this.storefiles.values());
if (filesToCompact.size() < 1) {
return checkSplit();
} else if (filesToCompact.size() == 1) {
if (!filesToCompact.get(0).isReference()) {
return checkSplit();
}
} else if (filesToCompact.size() < compactionThreshold) {
if (!hasReferences(filesToCompact) &&
filesToCompact.size() < compactionThreshold) {
return checkSplit();
}
if (!fs.exists(compactionDir) && !fs.mkdirs(compactionDir)) {
LOG.warn("Mkdir on " + compactionDir.toString() + " failed");
return checkSplit();
}
// Storefiles are keyed by sequence id. The oldest file comes first.
// We need to return out of here a List that has the newest file first.
Collections.reverse(filesToCompact);
// The max-sequenceID in any of the to-be-compacted TreeMaps is the
// last key of storefiles.
maxId = this.storefiles.lastKey().longValue();
}
// Storefiles are keyed by sequence id. The oldest file comes first.
// We need to return out of here a List that has the newest file first.
Collections.reverse(filesToCompact);
// Step through them, writing to the brand-new MapFile
HStoreFile compactedOutputFile = new HStoreFile(conf, fs,
@ -1801,4 +1809,4 @@ public class HStore implements HConstants {
return copy;
}
}
}
}