HBASE-1127 OOME running randomRead PE
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@735089 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
b64a28ff69
commit
1381d23645
|
@ -143,6 +143,7 @@ Release 0.19.0 - Unreleased
|
|||
HBASE-1125 IllegalStateException: Cannot set a region to be closed if it was
|
||||
not already marked as pending close
|
||||
HBASE-1124 Balancer kicks in way too early
|
||||
HBASE-1127 OOME running randomRead PE
|
||||
|
||||
IMPROVEMENTS
|
||||
HBASE-901 Add a limit to key length, check key and value length on client side
|
||||
|
|
|
@ -319,8 +319,7 @@
|
|||
<description>The size of each block in the block cache.
|
||||
Enable blockcaching on a per column family basis; see the BLOCKCACHE setting
|
||||
in HColumnDescriptor. Blocks are kept in a java Soft Reference cache so are
|
||||
let go when high pressure on memory. Block caching is enabled by default
|
||||
as of hbase 0.19.0.
|
||||
let go when high pressure on memory. Block caching is not enabled by default.
|
||||
</description>
|
||||
</property>
|
||||
<property>
|
||||
|
|
|
@ -103,7 +103,7 @@ public class HColumnDescriptor implements WritableComparable {
|
|||
/**
|
||||
* Default setting for whether to use a block cache or not.
|
||||
*/
|
||||
public static final boolean DEFAULT_BLOCKCACHE = true;
|
||||
public static final boolean DEFAULT_BLOCKCACHE = false;
|
||||
|
||||
/**
|
||||
* Default setting for whether or not to use bloomfilters.
|
||||
|
|
|
@ -53,10 +53,9 @@ public class BlockFSInputStream extends FSInputStream {
|
|||
public Thread newThread(Runnable r) {
|
||||
Thread t = new Thread(r);
|
||||
t.setDaemon(true);
|
||||
t.setName("BlockFSInputStream referenceQueue Checker");
|
||||
t.setName("BlockFSInputStreamReferenceQueueChecker");
|
||||
return t;
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
/*
|
||||
|
@ -92,7 +91,7 @@ public class BlockFSInputStream extends FSInputStream {
|
|||
}
|
||||
this.fileLength = fileLength;
|
||||
this.blockSize = blockSize;
|
||||
// a memory-sensitive map that has soft references to values
|
||||
// A memory-sensitive map that has soft references to values
|
||||
this.blocks = new SoftValueMap<Long, byte []>() {
|
||||
private long hits, misses;
|
||||
public byte [] get(Object key) {
|
||||
|
@ -111,14 +110,14 @@ public class BlockFSInputStream extends FSInputStream {
|
|||
};
|
||||
// Register a Runnable that runs checkReferences on a period.
|
||||
final int hashcode = hashCode();
|
||||
this.registration = EXECUTOR.scheduleAtFixedRate(new Runnable() {
|
||||
this.registration = EXECUTOR.scheduleWithFixedDelay(new Runnable() {
|
||||
public void run() {
|
||||
int cleared = checkReferences();
|
||||
if (LOG.isDebugEnabled() && cleared > 0) {
|
||||
LOG.debug("Cleared " + cleared + " in " + hashcode);
|
||||
LOG.debug("Checker cleared " + cleared + " in " + hashcode);
|
||||
}
|
||||
}
|
||||
}, 10, 10, TimeUnit.SECONDS);
|
||||
}, 1, 1, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -214,6 +213,10 @@ public class BlockFSInputStream extends FSInputStream {
|
|||
if (!this.registration.cancel(false)) {
|
||||
LOG.warn("Failed cancel of " + this.registration);
|
||||
}
|
||||
int cleared = checkReferences();
|
||||
if (LOG.isDebugEnabled() && cleared > 0) {
|
||||
LOG.debug("Close cleared " + cleared + " in " + hashCode());
|
||||
}
|
||||
if (blockStream != null) {
|
||||
blockStream.close();
|
||||
blockStream = null;
|
||||
|
@ -246,7 +249,7 @@ public class BlockFSInputStream extends FSInputStream {
|
|||
* @return Count of references cleared.
|
||||
*/
|
||||
public synchronized int checkReferences() {
|
||||
if (closed || this.blocks == null) {
|
||||
if (this.closed) {
|
||||
return 0;
|
||||
}
|
||||
return this.blocks.checkReferences();
|
||||
|
|
Loading…
Reference in New Issue