HBASE-618 We always compact if 2 files, regardless of the compaction threshold setting
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@654169 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
2eef957d36
commit
20ee89aa4c
|
@ -31,6 +31,7 @@ Hbase Change Log
|
|||
HBASE-607 MultiRegionTable.makeMultiRegionTable is not deterministic enough
|
||||
for regression tests
|
||||
HBASE-405 TIF and TOF use log4j directly rather than apache commons-logging
|
||||
HBASE-618 We always compact if 2 files, regardless of the compaction threshold setting
|
||||
|
||||
IMPROVEMENTS
|
||||
HBASE-559 MR example job to count table rows
|
||||
|
|
|
@ -860,7 +860,7 @@ public class HRegion implements HConstants {
|
|||
return midKey;
|
||||
}
|
||||
}
|
||||
LOG.info("starting compaction on region " + getRegionName());
|
||||
LOG.info("checking compaction on region " + getRegionName());
|
||||
long startTime = System.currentTimeMillis();
|
||||
doRegionCompactionPrep();
|
||||
for (HStore store: stores.values()) {
|
||||
|
@ -870,7 +870,7 @@ public class HRegion implements HConstants {
|
|||
}
|
||||
}
|
||||
doRegionCompactionCleanup();
|
||||
LOG.info("compaction completed on region " + getRegionName() + " in " +
|
||||
LOG.info("compaction checking completed on region " + getRegionName() + " in " +
|
||||
StringUtils.formatTimeDiff(System.currentTimeMillis(), startTime));
|
||||
} finally {
|
||||
synchronized (writestate) {
|
||||
|
|
|
@ -26,6 +26,7 @@ import java.util.Collections;
|
|||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.SortedMap;
|
||||
|
@ -738,6 +739,21 @@ public class HStore implements HConstants {
|
|||
// Compaction
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
/*
|
||||
* @param files
|
||||
* @return True if any of the files in <code>files</code> are References.
|
||||
*/
|
||||
private boolean hasReferences(Collection<HStoreFile> files) {
|
||||
if (files != null && files.size() > 0) {
|
||||
for (HStoreFile hsf: files) {
|
||||
if (hsf.isReference()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compact the back-HStores. This method may take some time, so the calling
|
||||
* thread must be able to block for long periods.
|
||||
|
@ -763,31 +779,23 @@ public class HStore implements HConstants {
|
|||
List<HStoreFile> filesToCompact = null;
|
||||
synchronized (storefiles) {
|
||||
filesToCompact = new ArrayList<HStoreFile>(this.storefiles.values());
|
||||
if (filesToCompact.size() < 1) {
|
||||
return checkSplit();
|
||||
} else if (filesToCompact.size() == 1) {
|
||||
if (!filesToCompact.get(0).isReference()) {
|
||||
if (!hasReferences(filesToCompact) &&
|
||||
filesToCompact.size() < compactionThreshold) {
|
||||
return checkSplit();
|
||||
}
|
||||
} else if (filesToCompact.size() < compactionThreshold) {
|
||||
return checkSplit();
|
||||
}
|
||||
|
||||
if (!fs.exists(compactionDir) && !fs.mkdirs(compactionDir)) {
|
||||
LOG.warn("Mkdir on " + compactionDir.toString() + " failed");
|
||||
return checkSplit();
|
||||
}
|
||||
|
||||
// The max-sequenceID in any of the to-be-compacted TreeMaps is the
|
||||
// last key of storefiles.
|
||||
maxId = this.storefiles.lastKey().longValue();
|
||||
}
|
||||
// Storefiles are keyed by sequence id. The oldest file comes first.
|
||||
// We need to return out of here a List that has the newest file first.
|
||||
Collections.reverse(filesToCompact);
|
||||
|
||||
// The max-sequenceID in any of the to-be-compacted TreeMaps is the
|
||||
// last key of storefiles.
|
||||
|
||||
maxId = this.storefiles.lastKey().longValue();
|
||||
}
|
||||
|
||||
// Step through them, writing to the brand-new MapFile
|
||||
HStoreFile compactedOutputFile = new HStoreFile(conf, fs,
|
||||
this.compactionDir, info.getEncodedName(), family.getFamilyName(),
|
||||
|
|
Loading…
Reference in New Issue