HBASE-1480 compaction file not cleaned up after a crash/OOME server

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@785011 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2009-06-15 22:29:30 +00:00
parent aed9c07cd8
commit 20544ecc25
3 changed files with 19 additions and 8 deletions

View File

@ -188,6 +188,8 @@ Release 0.20.0 - Unreleased
HBASE-1526 mapreduce fixup
HBASE-1503 hbase-1304 dropped updating list of store files on flush
(jgray via stack)
HBASE-1480 compaction file not cleaned up after a crash/OOME server
(Evgeny Ryabitskiy via Stack)
IMPROVEMENTS
HBASE-1089 Add count of regions on filesystem to master UI; add percentage

View File

@ -126,7 +126,7 @@ public class Store implements HConstants {
// reflected in the TreeMaps).
private volatile long maxSeqId = -1;
private final Path compactionDir;
private final Path regionCompactionDir;
private final Object compactLock = new Object();
private final int compactionThreshold;
private final int blocksize;
@ -180,7 +180,8 @@ public class Store implements HConstants {
this.ttl *= 1000;
}
this.memcache = new Memcache(this.ttl, this.comparator);
this.compactionDir = HRegion.getCompactionDir(basedir);
this.regionCompactionDir = new Path(HRegion.getCompactionDir(basedir),
Integer.toString(info.getEncodedName()));
this.storeName = this.family.getName();
this.storeNameStr = Bytes.toString(this.storeName);
@ -653,8 +654,8 @@ public class Store implements HConstants {
(forceSplit || (filesToCompact.size() < compactionThreshold))) {
return checkSplit(forceSplit);
}
if (!fs.exists(this.compactionDir) && !fs.mkdirs(this.compactionDir)) {
LOG.warn("Mkdir on " + this.compactionDir.toString() + " failed");
if (!fs.exists(this.regionCompactionDir) && !fs.mkdirs(this.regionCompactionDir)) {
LOG.warn("Mkdir on " + this.regionCompactionDir.toString() + " failed");
return checkSplit(forceSplit);
}
@ -707,7 +708,7 @@ public class Store implements HConstants {
}
// Step through them, writing to the brand-new file
HFile.Writer writer = getWriter(this.compactionDir);
HFile.Writer writer = getWriter(this.regionCompactionDir);
if (LOG.isDebugEnabled()) {
LOG.debug("Started compaction of " + filesToCompact.size() + " file(s)" +
(references? ", hasReferences=true,": " ") + " into " +

View File

@ -23,14 +23,13 @@ import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestCase;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hdfs.MiniDFSCluster;
@ -41,6 +40,8 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
public class TestCompaction extends HBaseTestCase {
static final Log LOG = LogFactory.getLog(TestCompaction.class.getName());
private HRegion r = null;
private Path compactionDir = null;
private Path regionCompactionDir = null;
private static final byte [] COLUMN_FAMILY = fam1;
private final byte [] STARTROW = Bytes.toBytes(START_KEY);
private static final byte [] COLUMN_FAMILY_TEXT = COLUMN_FAMILY;
@ -67,6 +68,9 @@ public class TestCompaction extends HBaseTestCase {
super.setUp();
HTableDescriptor htd = createTableDescriptor(getName());
this.r = createNewHRegion(htd, null, null);
this.compactionDir = HRegion.getCompactionDir(this.r.getBaseDir());
this.regionCompactionDir = new Path(this.compactionDir,
Integer.toString(this.r.getRegionInfo().getEncodedName()));
}
@Override
@ -102,10 +106,14 @@ public class TestCompaction extends HBaseTestCase {
Result result = r.get(new Get(STARTROW).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100), null);
// Assert that I can get 3 versions since it is the max I should get
assertEquals(3, result.size());
assertEquals(COMPACTION_THRESHOLD, result.size());
// assertEquals(cellValues.length, 3);
r.flushcache();
r.compactStores();
// check compaction dir is exists
assertTrue(this.cluster.getFileSystem().exists(this.compactionDir));
// check Compaction Dir for this Regions is cleaned up
assertTrue(!this.cluster.getFileSystem().exists(this.regionCompactionDir));
// Always 3 versions if that is what max versions is.
byte [] secondRowBytes = START_KEY.getBytes(HConstants.UTF8_ENCODING);
// Increment the least significant character so we get to next row.