HBASE-2646 Compaction requests should be prioritized to prevent blocking; remove of hbase-3038 added by mistake
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1002020 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
f4ac670a42
commit
8032206236
|
@ -20,7 +20,7 @@
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.regionserver.wal;
|
package org.apache.hadoop.hbase.regionserver.wal;
|
||||||
|
|
||||||
import java.io.FilterInputStream;
|
import java.io.EOFException;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.lang.Class;
|
import java.lang.Class;
|
||||||
import java.lang.reflect.Constructor;
|
import java.lang.reflect.Constructor;
|
||||||
|
@ -78,43 +78,18 @@ public class SequenceFileLogReader implements HLog.Reader {
|
||||||
this.length = l;
|
this.length = l;
|
||||||
}
|
}
|
||||||
|
|
||||||
// This section can be confusing. It is specific to how HDFS works.
|
|
||||||
// Let me try to break it down. This is the problem:
|
|
||||||
//
|
|
||||||
// 1. HDFS DataNodes update the NameNode about a filename's length
|
|
||||||
// on block boundaries or when a file is closed. Therefore,
|
|
||||||
// if an RS dies, then the NN's fs.getLength() can be out of date
|
|
||||||
// 2. this.in.available() would work, but it returns int &
|
|
||||||
// therefore breaks for files > 2GB (happens on big clusters)
|
|
||||||
// 3. DFSInputStream.getFileLength() gets the actual length from the DNs
|
|
||||||
// 4. DFSInputStream is wrapped 2 levels deep : this.in.in
|
|
||||||
//
|
|
||||||
// So, here we adjust getPos() using getFileLength() so the
|
|
||||||
// SequenceFile.Reader constructor (aka: first invocation) comes out
|
|
||||||
// with the correct end of the file:
|
|
||||||
// this.end = in.getPos() + length;
|
|
||||||
@Override
|
@Override
|
||||||
public long getPos() throws IOException {
|
public long getPos() throws IOException {
|
||||||
if (this.firstGetPosInvocation) {
|
if (this.firstGetPosInvocation) {
|
||||||
this.firstGetPosInvocation = false;
|
this.firstGetPosInvocation = false;
|
||||||
long adjust = 0;
|
// Tell a lie. We're doing this just so that this line up in
|
||||||
|
// SequenceFile.Reader constructor comes out with the correct length
|
||||||
try {
|
// on the file:
|
||||||
Field fIn = FilterInputStream.class.getDeclaredField("in");
|
// this.end = in.getPos() + length;
|
||||||
fIn.setAccessible(true);
|
long available = this.in.available();
|
||||||
Object realIn = fIn.get(this.in);
|
// Length gets added up in the SF.Reader constructor so subtract the
|
||||||
long realLength = ((Long)realIn.getClass().
|
// difference. If available < this.length, then return this.length.
|
||||||
getMethod("getFileLength", new Class<?> []{}).
|
return available >= this.length? available - this.length: this.length;
|
||||||
invoke(realIn, new Object []{})).longValue();
|
|
||||||
assert(realLength >= this.length);
|
|
||||||
adjust = realLength - this.length;
|
|
||||||
} catch(Exception e) {
|
|
||||||
SequenceFileLogReader.LOG.warn(
|
|
||||||
"Error while trying to get accurate file length. " +
|
|
||||||
"Truncation / data loss may occur if RegionServers die.", e);
|
|
||||||
}
|
|
||||||
|
|
||||||
return adjust + super.getPos();
|
|
||||||
}
|
}
|
||||||
return super.getPos();
|
return super.getPos();
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue