HBASE-620 testmergetool failing in branch and trunk since hbase-618 went in
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@654326 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
5a8da86833
commit
a390d36ef5
|
@ -165,12 +165,12 @@ public class HRegion implements HConstants {
|
|||
|
||||
// Compact each region so we only have one store file per family
|
||||
|
||||
a.compactStores();
|
||||
a.compactStores(true);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Files for region: " + a.getRegionName());
|
||||
listPaths(fs, a.getRegionDir());
|
||||
}
|
||||
b.compactStores();
|
||||
b.compactStores(true);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Files for region: " + b.getRegionName());
|
||||
listPaths(fs, b.getRegionDir());
|
||||
|
@ -845,6 +845,26 @@ public class HRegion implements HConstants {
|
|||
* @throws IOException
|
||||
*/
|
||||
public Text compactStores() throws IOException {
|
||||
return compactStores(false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Called by compaction thread and after region is opened to compact the
|
||||
* HStores if necessary.
|
||||
*
|
||||
* <p>This operation could block for a long time, so don't call it from a
|
||||
* time-sensitive thread.
|
||||
*
|
||||
* Note that no locking is necessary at this level because compaction only
|
||||
* conflicts with a region split, and that cannot happen because the region
|
||||
* server does them sequentially and not in parallel.
|
||||
*
|
||||
* @param force True to force a compaction regardless of thresholds (Needed
|
||||
* by merge).
|
||||
* @return mid key if split is needed
|
||||
* @throws IOException
|
||||
*/
|
||||
private Text compactStores(final boolean force) throws IOException {
|
||||
Text midKey = null;
|
||||
if (this.closed.get()) {
|
||||
return midKey;
|
||||
|
@ -864,7 +884,7 @@ public class HRegion implements HConstants {
|
|||
long startTime = System.currentTimeMillis();
|
||||
doRegionCompactionPrep();
|
||||
for (HStore store: stores.values()) {
|
||||
Text key = store.compact();
|
||||
Text key = store.compact(force);
|
||||
if (key != null && midKey == null) {
|
||||
midKey = key;
|
||||
}
|
||||
|
@ -1062,7 +1082,6 @@ public class HRegion implements HConstants {
|
|||
Cell[] results = get(row, column, Long.MAX_VALUE, 1);
|
||||
return (results == null || results.length == 0)? null: results[0];
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch multiple versions of a single data item
|
||||
*
|
||||
|
|
|
@ -770,16 +770,18 @@ public class HStore implements HConstants {
|
|||
* We don't want to hold the structureLock for the whole time, as a compact()
|
||||
* can be lengthy and we want to allow cache-flushes during this period.
|
||||
*
|
||||
* @param force True to force a compaction regardless of thresholds (Needed
|
||||
* by merge).
|
||||
* @return mid key if a split is needed, null otherwise
|
||||
* @throws IOException
|
||||
*/
|
||||
Text compact() throws IOException {
|
||||
Text compact(final boolean force) throws IOException {
|
||||
synchronized (compactLock) {
|
||||
long maxId = -1;
|
||||
List<HStoreFile> filesToCompact = null;
|
||||
synchronized (storefiles) {
|
||||
filesToCompact = new ArrayList<HStoreFile>(this.storefiles.values());
|
||||
if (!hasReferences(filesToCompact) &&
|
||||
if (!force && !hasReferences(filesToCompact) &&
|
||||
filesToCompact.size() < compactionThreshold) {
|
||||
return checkSplit();
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue