HADOOP-2619 Compaction errors after a region splits
git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk/src/contrib/hbase@612995 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
74df13a369
commit
40b4c04da8
|
@ -137,6 +137,7 @@ Trunk (unreleased changes)
|
|||
HADOOP-2493 hbase will split on row when the start and end row is the
|
||||
same cause data loss (Bryan Duxbury via Stack)
|
||||
HADOOP-2629 Shell digests garbage without complaint
|
||||
HADOOP-2619 Compaction errors after a region splits
|
||||
|
||||
IMPROVEMENTS
|
||||
HADOOP-2401 Add convenience put method that takes writable
|
||||
|
|
|
@ -204,6 +204,7 @@ public class HRegion implements HConstants {
|
|||
final HBaseConfiguration conf;
|
||||
final HRegionInfo regionInfo;
|
||||
final Path regiondir;
|
||||
private final Path regionCompactionDir;
|
||||
|
||||
static class WriteState {
|
||||
// Set while a memcache flush is happening.
|
||||
|
@ -267,6 +268,9 @@ public class HRegion implements HConstants {
|
|||
this.regiondir = new Path(basedir, this.regionInfo.getEncodedName());
|
||||
Path oldLogFile = new Path(regiondir, HREGION_OLDLOGFILE_NAME);
|
||||
|
||||
this.regionCompactionDir =
|
||||
new Path(getCompactionDir(basedir), this.regionInfo.getEncodedName());
|
||||
|
||||
// Move prefab HStore files into place (if any). This picks up split files
|
||||
// and any merges from splits and merges dirs.
|
||||
if(initialFiles != null && fs.exists(initialFiles)) {
|
||||
|
@ -706,6 +710,33 @@ public class HRegion implements HConstants {
|
|||
return compactStores();
|
||||
}
|
||||
|
||||
/*
|
||||
* @param dir
|
||||
* @return compaction directory for the passed in <code>dir</code>
|
||||
*/
|
||||
static Path getCompactionDir(final Path dir) {
|
||||
return new Path(dir, "compaction.dir");
|
||||
}
|
||||
|
||||
/*
|
||||
* Do preparation for pending compaction.
|
||||
* Clean out any vestiges of previous failed compactions.
|
||||
* @throws IOException
|
||||
*/
|
||||
private void doRegionCompactionPrep() throws IOException {
|
||||
doRegionCompactionCleanup();
|
||||
}
|
||||
|
||||
/*
|
||||
* Removes the compaction directory for this Store.
|
||||
* @throws IOException
|
||||
*/
|
||||
private void doRegionCompactionCleanup() throws IOException {
|
||||
if (this.fs.exists(this.regionCompactionDir)) {
|
||||
this.fs.delete(this.regionCompactionDir);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Compact all the stores. This should be called periodically to make sure
|
||||
* the stores are kept manageable.
|
||||
|
@ -743,11 +774,13 @@ public class HRegion implements HConstants {
|
|||
LOG.info("starting compaction on region " +
|
||||
this.regionInfo.getRegionName().toString());
|
||||
boolean status = true;
|
||||
doRegionCompactionPrep();
|
||||
for (HStore store : stores.values()) {
|
||||
if(!store.compact()) {
|
||||
status = false;
|
||||
}
|
||||
}
|
||||
doRegionCompactionCleanup();
|
||||
LOG.info("compaction completed on region " +
|
||||
this.regionInfo.getRegionName().toString() + ". Took " +
|
||||
StringUtils.formatTimeDiff(System.currentTimeMillis(), startTime));
|
||||
|
|
|
@ -326,7 +326,7 @@ public class HStore implements HConstants {
|
|||
for (Map.Entry<HStoreKey, byte []> es: tailMap.entrySet()) {
|
||||
HStoreKey itKey = es.getKey();
|
||||
if (itKey.matchesRowCol(key)) {
|
||||
if (!HLogEdit.isDeleted(es.getValue())) {
|
||||
if (!HLogEdit.isDeleted(es.getValue())) {
|
||||
result.add(tailMap.get(itKey));
|
||||
}
|
||||
}
|
||||
|
@ -657,7 +657,7 @@ public class HStore implements HConstants {
|
|||
this.fs = fs;
|
||||
this.conf = conf;
|
||||
|
||||
this.compactionDir = new Path(basedir, "compaction.dir");
|
||||
this.compactionDir = HRegion.getCompactionDir(basedir);
|
||||
this.storeName =
|
||||
this.info.getEncodedName() + "/" + this.family.getFamilyName();
|
||||
|
||||
|
@ -1192,15 +1192,7 @@ public class HStore implements HConstants {
|
|||
" files using " + compactionDir.toString() + " for " +
|
||||
this.storeName);
|
||||
}
|
||||
if (this.fs.exists(compactionDir)) {
|
||||
// Clean out its content in prep. for this new compaction. Has either
|
||||
// aborted previous compaction or it has content of a previous
|
||||
// compaction.
|
||||
Path [] toRemove = this.fs.listPaths(new Path [] {compactionDir});
|
||||
for (int i = 0; i < toRemove.length; i++) {
|
||||
this.fs.delete(toRemove[i]);
|
||||
}
|
||||
}
|
||||
|
||||
// Storefiles are keyed by sequence id. The oldest file comes first.
|
||||
// We need to return out of here a List that has the newest file first.
|
||||
List<HStoreFile> filesToCompact =
|
||||
|
@ -1215,15 +1207,14 @@ public class HStore implements HConstants {
|
|||
}
|
||||
|
||||
if (!fs.exists(compactionDir) && !fs.mkdirs(compactionDir)) {
|
||||
LOG.warn("Mkdir on " + compactionDir.toString() + " for " +
|
||||
this.storeName + " failed");
|
||||
LOG.warn("Mkdir on " + compactionDir.toString() + " failed");
|
||||
return false;
|
||||
}
|
||||
|
||||
// Step through them, writing to the brand-new MapFile
|
||||
HStoreFile compactedOutputFile = new HStoreFile(conf, fs,
|
||||
this.compactionDir, info.getEncodedName(), family.getFamilyName(),
|
||||
-1L, null);
|
||||
this.compactionDir, info.getEncodedName(), family.getFamilyName(),
|
||||
-1L, null);
|
||||
MapFile.Writer compactedOut = compactedOutputFile.getWriter(this.fs,
|
||||
this.compression, this.bloomFilter);
|
||||
try {
|
||||
|
|
Loading…
Reference in New Issue