HADOOP-2283 AlreadyBeingCreatedException (Was: Stuck replay of failed

regionserver edits)


git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk/src/contrib/hbase@603077 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2007-12-10 22:36:03 +00:00
parent 6a65a514a8
commit f9fc02ba8b
5 changed files with 76 additions and 25 deletions

View File

@ -68,6 +68,8 @@ Trunk (unreleased changes)
(Bryan Duxbury via Stack)
HADOOP-2350 Scanner api returns null row names, or skips row names if
different column families do not have entries for some rows
HADOOP-2283 AlreadyBeingCreatedException (Was: Stuck replay of failed
regionserver edits)
IMPROVEMENTS
HADOOP-2401 Add convenience put method that takes writable

View File

@ -160,11 +160,10 @@ public class HLog implements HConstants {
" because zero length");
continue;
}
SequenceFile.Reader in =
new SequenceFile.Reader(fs, logfiles[i], conf);
HLogKey key = new HLogKey();
HLogEdit val = new HLogEdit();
SequenceFile.Reader in = new SequenceFile.Reader(fs, logfiles[i], conf);
try {
HLogKey key = new HLogKey();
HLogEdit val = new HLogEdit();
int count = 0;
for (; in.next(key, val); count++) {
Text regionName = key.getRegionName();
@ -174,13 +173,16 @@ public class HLog implements HConstants {
HRegionInfo.encodeRegionName(regionName)),
HREGION_OLDLOGFILE_NAME);
if (LOG.isDebugEnabled()) {
LOG.debug("Creating new log file writer for path " + logfile);
LOG.debug("Creating new log file writer for path " + logfile +
"; map content " + logWriters.toString());
}
w = SequenceFile.createWriter(fs, conf, logfile, HLogKey.class,
HLogEdit.class);
logWriters.put(regionName, w);
// Use copy of regionName; regionName object is reused inside in
// HStoreKey.getRegionName so its content changes as we iterate.
logWriters.put(new Text(regionName), w);
}
if (count % 100 == 0 && count > 0 && LOG.isDebugEnabled()) {
if (count % 10000 == 0 && count > 0 && LOG.isDebugEnabled()) {
LOG.debug("Applied " + count + " edits");
}
w.append(key, val);

View File

@ -823,9 +823,11 @@ class HStore implements HConstants {
}
FSDataOutputStream out =
fs.create(new Path(filterDir, BLOOMFILTER_FILE_NAME));
bloomFilter.write(out);
out.close();
try {
bloomFilter.write(out);
} finally {
out.close();
}
if (LOG.isDebugEnabled()) {
LOG.debug("flushed bloom filter for " + this.storeName);
}

View File

@ -492,11 +492,14 @@ public class HStoreFile implements HConstants, WritableComparable {
throw new IOException("File already exists " + p.toString());
}
FSDataOutputStream out = fs.create(p);
out.writeUTF(getReference().getEncodedRegionName());
getReference().getMidkey().write(out);
out.writeLong(getReference().getFileId());
out.writeBoolean(isTopFileRegion(getReference().getFileRegion()));
out.close();
try {
out.writeUTF(getReference().getEncodedRegionName());
getReference().getMidkey().write(out);
out.writeLong(getReference().getFileId());
out.writeBoolean(isTopFileRegion(getReference().getFileRegion()));
} finally {
out.close();
}
}
/*
@ -559,7 +562,6 @@ public class HStoreFile implements HConstants, WritableComparable {
in.close();
}
}
} finally {
out.close();
}
@ -867,7 +869,6 @@ public class HStoreFile implements HConstants, WritableComparable {
static class Writer extends MapFile.Writer {
private final Filter bloomFilter;
/**
* Constructor
*

View File

@ -30,16 +30,63 @@ import org.apache.hadoop.io.SequenceFile.Reader;
/** JUnit test case for HLog */
public class TestHLog extends HBaseTestCase implements HConstants {
private Path dir;
private FileSystem fs;
@Override
protected void setUp() throws Exception {
super.setUp();
this.dir = getUnitTestdir(getName());
this.fs = FileSystem.get(this.conf);
if (fs.exists(dir)) {
fs.delete(dir);
}
}
@Override
protected void tearDown() throws Exception {
if (this.fs.exists(this.dir)) {
this.fs.delete(this.dir);
}
super.tearDown();
}
/**
* Just write multiple logs then split. Before fix for HADOOP-2283, this
* would fail.
* @throws IOException
*/
public void testSplit() throws IOException {
final Text tableName = new Text(getName());
final Text rowName = tableName;
HLog log = new HLog(this.fs, this.dir, this.conf, null);
// Add edits for three regions.
try {
for (int ii = 0; ii < 3; ii++) {
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
TreeMap<HStoreKey, byte[]> edit = new TreeMap<HStoreKey, byte[]>();
Text column = new Text(Integer.toString(j));
edit.put(
new HStoreKey(rowName, column, System.currentTimeMillis()),
column.getBytes());
log.append(new Text(Integer.toString(i)), tableName, edit);
}
}
log.rollWriter();
}
HLog.splitLog(this.testDir, this.dir, this.fs, this.conf);
} finally {
if (log != null) {
log.closeAndDelete();
}
}
}
/**
* @throws IOException
*/
public void testAppend() throws IOException {
Path dir = getUnitTestdir(getName());
FileSystem fs = FileSystem.get(this.conf);
if (fs.exists(dir)) {
fs.delete(dir);
}
final int COL_COUNT = 10;
final Text regionName = new Text("regionname");
final Text tableName = new Text("tablename");
@ -89,9 +136,6 @@ public class TestHLog extends HBaseTestCase implements HConstants {
if (reader != null) {
reader.close();
}
if (fs.exists(dir)) {
fs.delete(dir);
}
}
}