HBASE-620 testmergetool failing in branch and trunk since hbase-618 went in

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@654326 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2008-05-07 23:33:18 +00:00
parent 5a8da86833
commit a390d36ef5
2 changed files with 27 additions and 6 deletions

View File

@ -165,12 +165,12 @@ public class HRegion implements HConstants {
// Compact each region so we only have one store file per family // Compact each region so we only have one store file per family
a.compactStores(); a.compactStores(true);
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("Files for region: " + a.getRegionName()); LOG.debug("Files for region: " + a.getRegionName());
listPaths(fs, a.getRegionDir()); listPaths(fs, a.getRegionDir());
} }
b.compactStores(); b.compactStores(true);
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("Files for region: " + b.getRegionName()); LOG.debug("Files for region: " + b.getRegionName());
listPaths(fs, b.getRegionDir()); listPaths(fs, b.getRegionDir());
@ -845,6 +845,26 @@ public class HRegion implements HConstants {
* @throws IOException * @throws IOException
*/ */
public Text compactStores() throws IOException { public Text compactStores() throws IOException {
return compactStores(false);
}
/**
* Called by compaction thread and after region is opened to compact the
* HStores if necessary.
*
* <p>This operation could block for a long time, so don't call it from a
* time-sensitive thread.
*
* Note that no locking is necessary at this level because compaction only
* conflicts with a region split, and that cannot happen because the region
* server does them sequentially and not in parallel.
*
* @param force True to force a compaction regardless of thresholds (Needed
* by merge).
* @return mid key if split is needed
* @throws IOException
*/
private Text compactStores(final boolean force) throws IOException {
Text midKey = null; Text midKey = null;
if (this.closed.get()) { if (this.closed.get()) {
return midKey; return midKey;
@ -864,7 +884,7 @@ public class HRegion implements HConstants {
long startTime = System.currentTimeMillis(); long startTime = System.currentTimeMillis();
doRegionCompactionPrep(); doRegionCompactionPrep();
for (HStore store: stores.values()) { for (HStore store: stores.values()) {
Text key = store.compact(); Text key = store.compact(force);
if (key != null && midKey == null) { if (key != null && midKey == null) {
midKey = key; midKey = key;
} }
@ -1062,7 +1082,6 @@ public class HRegion implements HConstants {
Cell[] results = get(row, column, Long.MAX_VALUE, 1); Cell[] results = get(row, column, Long.MAX_VALUE, 1);
return (results == null || results.length == 0)? null: results[0]; return (results == null || results.length == 0)? null: results[0];
} }
/** /**
* Fetch multiple versions of a single data item * Fetch multiple versions of a single data item
* *

View File

@ -770,16 +770,18 @@ public class HStore implements HConstants {
* We don't want to hold the structureLock for the whole time, as a compact() * We don't want to hold the structureLock for the whole time, as a compact()
* can be lengthy and we want to allow cache-flushes during this period. * can be lengthy and we want to allow cache-flushes during this period.
* *
* @param force True to force a compaction regardless of thresholds (Needed
* by merge).
* @return mid key if a split is needed, null otherwise * @return mid key if a split is needed, null otherwise
* @throws IOException * @throws IOException
*/ */
Text compact() throws IOException { Text compact(final boolean force) throws IOException {
synchronized (compactLock) { synchronized (compactLock) {
long maxId = -1; long maxId = -1;
List<HStoreFile> filesToCompact = null; List<HStoreFile> filesToCompact = null;
synchronized (storefiles) { synchronized (storefiles) {
filesToCompact = new ArrayList<HStoreFile>(this.storefiles.values()); filesToCompact = new ArrayList<HStoreFile>(this.storefiles.values());
if (!hasReferences(filesToCompact) && if (!force && !hasReferences(filesToCompact) &&
filesToCompact.size() < compactionThreshold) { filesToCompact.size() < compactionThreshold) {
return checkSplit(); return checkSplit();
} }