HADOOP-2283 AlreadyBeingCreatedException (Was: Stuck replay of failed
regionserver edits) git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk/src/contrib/hbase@603077 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
6a65a514a8
commit
f9fc02ba8b
|
@ -68,6 +68,8 @@ Trunk (unreleased changes)
|
||||||
(Bryan Duxbury via Stack)
|
(Bryan Duxbury via Stack)
|
||||||
HADOOP-2350 Scanner api returns null row names, or skips row names if
|
HADOOP-2350 Scanner api returns null row names, or skips row names if
|
||||||
different column families do not have entries for some rows
|
different column families do not have entries for some rows
|
||||||
|
HADOOP-2283 AlreadyBeingCreatedException (Was: Stuck replay of failed
|
||||||
|
regionserver edits)
|
||||||
|
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
HADOOP-2401 Add convenience put method that takes writable
|
HADOOP-2401 Add convenience put method that takes writable
|
||||||
|
|
|
@ -160,11 +160,10 @@ public class HLog implements HConstants {
|
||||||
" because zero length");
|
" because zero length");
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
SequenceFile.Reader in =
|
HLogKey key = new HLogKey();
|
||||||
new SequenceFile.Reader(fs, logfiles[i], conf);
|
HLogEdit val = new HLogEdit();
|
||||||
|
SequenceFile.Reader in = new SequenceFile.Reader(fs, logfiles[i], conf);
|
||||||
try {
|
try {
|
||||||
HLogKey key = new HLogKey();
|
|
||||||
HLogEdit val = new HLogEdit();
|
|
||||||
int count = 0;
|
int count = 0;
|
||||||
for (; in.next(key, val); count++) {
|
for (; in.next(key, val); count++) {
|
||||||
Text regionName = key.getRegionName();
|
Text regionName = key.getRegionName();
|
||||||
|
@ -174,13 +173,16 @@ public class HLog implements HConstants {
|
||||||
HRegionInfo.encodeRegionName(regionName)),
|
HRegionInfo.encodeRegionName(regionName)),
|
||||||
HREGION_OLDLOGFILE_NAME);
|
HREGION_OLDLOGFILE_NAME);
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("Creating new log file writer for path " + logfile);
|
LOG.debug("Creating new log file writer for path " + logfile +
|
||||||
|
"; map content " + logWriters.toString());
|
||||||
}
|
}
|
||||||
w = SequenceFile.createWriter(fs, conf, logfile, HLogKey.class,
|
w = SequenceFile.createWriter(fs, conf, logfile, HLogKey.class,
|
||||||
HLogEdit.class);
|
HLogEdit.class);
|
||||||
logWriters.put(regionName, w);
|
// Use copy of regionName; regionName object is reused inside in
|
||||||
|
// HStoreKey.getRegionName so its content changes as we iterate.
|
||||||
|
logWriters.put(new Text(regionName), w);
|
||||||
}
|
}
|
||||||
if (count % 100 == 0 && count > 0 && LOG.isDebugEnabled()) {
|
if (count % 10000 == 0 && count > 0 && LOG.isDebugEnabled()) {
|
||||||
LOG.debug("Applied " + count + " edits");
|
LOG.debug("Applied " + count + " edits");
|
||||||
}
|
}
|
||||||
w.append(key, val);
|
w.append(key, val);
|
||||||
|
|
|
@ -823,9 +823,11 @@ class HStore implements HConstants {
|
||||||
}
|
}
|
||||||
FSDataOutputStream out =
|
FSDataOutputStream out =
|
||||||
fs.create(new Path(filterDir, BLOOMFILTER_FILE_NAME));
|
fs.create(new Path(filterDir, BLOOMFILTER_FILE_NAME));
|
||||||
|
try {
|
||||||
bloomFilter.write(out);
|
bloomFilter.write(out);
|
||||||
out.close();
|
} finally {
|
||||||
|
out.close();
|
||||||
|
}
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("flushed bloom filter for " + this.storeName);
|
LOG.debug("flushed bloom filter for " + this.storeName);
|
||||||
}
|
}
|
||||||
|
|
|
@ -492,11 +492,14 @@ public class HStoreFile implements HConstants, WritableComparable {
|
||||||
throw new IOException("File already exists " + p.toString());
|
throw new IOException("File already exists " + p.toString());
|
||||||
}
|
}
|
||||||
FSDataOutputStream out = fs.create(p);
|
FSDataOutputStream out = fs.create(p);
|
||||||
out.writeUTF(getReference().getEncodedRegionName());
|
try {
|
||||||
getReference().getMidkey().write(out);
|
out.writeUTF(getReference().getEncodedRegionName());
|
||||||
out.writeLong(getReference().getFileId());
|
getReference().getMidkey().write(out);
|
||||||
out.writeBoolean(isTopFileRegion(getReference().getFileRegion()));
|
out.writeLong(getReference().getFileId());
|
||||||
out.close();
|
out.writeBoolean(isTopFileRegion(getReference().getFileRegion()));
|
||||||
|
} finally {
|
||||||
|
out.close();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -559,7 +562,6 @@ public class HStoreFile implements HConstants, WritableComparable {
|
||||||
in.close();
|
in.close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
out.close();
|
out.close();
|
||||||
}
|
}
|
||||||
|
@ -867,7 +869,6 @@ public class HStoreFile implements HConstants, WritableComparable {
|
||||||
static class Writer extends MapFile.Writer {
|
static class Writer extends MapFile.Writer {
|
||||||
private final Filter bloomFilter;
|
private final Filter bloomFilter;
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructor
|
* Constructor
|
||||||
*
|
*
|
||||||
|
|
|
@ -30,16 +30,63 @@ import org.apache.hadoop.io.SequenceFile.Reader;
|
||||||
|
|
||||||
/** JUnit test case for HLog */
|
/** JUnit test case for HLog */
|
||||||
public class TestHLog extends HBaseTestCase implements HConstants {
|
public class TestHLog extends HBaseTestCase implements HConstants {
|
||||||
|
private Path dir;
|
||||||
|
private FileSystem fs;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void setUp() throws Exception {
|
||||||
|
super.setUp();
|
||||||
|
this.dir = getUnitTestdir(getName());
|
||||||
|
this.fs = FileSystem.get(this.conf);
|
||||||
|
if (fs.exists(dir)) {
|
||||||
|
fs.delete(dir);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void tearDown() throws Exception {
|
||||||
|
if (this.fs.exists(this.dir)) {
|
||||||
|
this.fs.delete(this.dir);
|
||||||
|
}
|
||||||
|
super.tearDown();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Just write multiple logs then split. Before fix for HADOOP-2283, this
|
||||||
|
* would fail.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public void testSplit() throws IOException {
|
||||||
|
final Text tableName = new Text(getName());
|
||||||
|
final Text rowName = tableName;
|
||||||
|
HLog log = new HLog(this.fs, this.dir, this.conf, null);
|
||||||
|
// Add edits for three regions.
|
||||||
|
try {
|
||||||
|
for (int ii = 0; ii < 3; ii++) {
|
||||||
|
for (int i = 0; i < 3; i++) {
|
||||||
|
for (int j = 0; j < 3; j++) {
|
||||||
|
TreeMap<HStoreKey, byte[]> edit = new TreeMap<HStoreKey, byte[]>();
|
||||||
|
Text column = new Text(Integer.toString(j));
|
||||||
|
edit.put(
|
||||||
|
new HStoreKey(rowName, column, System.currentTimeMillis()),
|
||||||
|
column.getBytes());
|
||||||
|
log.append(new Text(Integer.toString(i)), tableName, edit);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.rollWriter();
|
||||||
|
}
|
||||||
|
HLog.splitLog(this.testDir, this.dir, this.fs, this.conf);
|
||||||
|
} finally {
|
||||||
|
if (log != null) {
|
||||||
|
log.closeAndDelete();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public void testAppend() throws IOException {
|
public void testAppend() throws IOException {
|
||||||
Path dir = getUnitTestdir(getName());
|
|
||||||
FileSystem fs = FileSystem.get(this.conf);
|
|
||||||
if (fs.exists(dir)) {
|
|
||||||
fs.delete(dir);
|
|
||||||
}
|
|
||||||
final int COL_COUNT = 10;
|
final int COL_COUNT = 10;
|
||||||
final Text regionName = new Text("regionname");
|
final Text regionName = new Text("regionname");
|
||||||
final Text tableName = new Text("tablename");
|
final Text tableName = new Text("tablename");
|
||||||
|
@ -89,9 +136,6 @@ public class TestHLog extends HBaseTestCase implements HConstants {
|
||||||
if (reader != null) {
|
if (reader != null) {
|
||||||
reader.close();
|
reader.close();
|
||||||
}
|
}
|
||||||
if (fs.exists(dir)) {
|
|
||||||
fs.delete(dir);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue