HBASE-8143 HBase on Hadoop 2 with local short circuit reads (ssr) causes OOM

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1545852 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2013-11-26 21:42:51 +00:00
parent 260fa3a539
commit 871792162e
4 changed files with 58 additions and 19 deletions

View File

@ -991,6 +991,20 @@ possible configurations would overwhelm and obscure the important.
datanode, performing block recovery to timeout on a dead datanode; usually datanode, performing block recovery to timeout on a dead datanode; usually
dfs.socket.timeout. See the end of HBASE-8389 for more.</description> dfs.socket.timeout. See the end of HBASE-8389 for more.</description>
</property> </property>
<property>
<name>hbase.dfs.client.read.shortcircuit.buffer.size</name>
<value>131072</value>
<description>If the DFSClient configuration
dfs.client.read.shortcircuit.buffer.size is unset, we will
use what is configured here as the short circuit read default
direct byte buffer size. DFSClient native default is 1MB; HBase
keeps its HDFS files open so number of file blocks * 1MB soon
starts to add up and threaten OOME because of a shortage of
direct memory. So, we set it down from the default. Make
it > the default hbase block size set in the HColumnDescriptor
which is usually 64k.
</description>
</property>
<property> <property>
<name>hbase.regionserver.checksum.verify</name> <name>hbase.regionserver.checksum.verify</name>
<value>true</value> <value>true</value>

View File

@ -410,6 +410,7 @@ MasterServices, Server {
this.conf = new Configuration(conf); this.conf = new Configuration(conf);
// Disable the block cache on the master // Disable the block cache on the master
this.conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f); this.conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
FSUtils.setupShortCircuitRead(conf);
// Server to handle client requests. // Server to handle client requests.
String hostname = Strings.domainNamePointerToHostName(DNS.getDefaultHost( String hostname = Strings.domainNamePointerToHostName(DNS.getDefaultHost(
conf.get("hbase.master.dns.interface", "default"), conf.get("hbase.master.dns.interface", "default"),

View File

@ -357,7 +357,6 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa
protected final Configuration conf; protected final Configuration conf;
private boolean useHBaseChecksum; // verify hbase checksums?
private Path rootDir; private Path rootDir;
protected final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); protected final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
@ -532,19 +531,7 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa
checkCodecs(this.conf); checkCodecs(this.conf);
this.userProvider = UserProvider.instantiate(conf); this.userProvider = UserProvider.instantiate(conf);
// do we use checksum verification in the hbase? If hbase checksum verification FSUtils.setupShortCircuitRead(this.conf);
// is enabled, then we automatically switch off hdfs checksum verification.
this.useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true);
// check that the user has not set the "dfs.client.read.shortcircuit.skip.checksum" property.
boolean shortCircuitSkipChecksum = conf.getBoolean(
"dfs.client.read.shortcircuit.skip.checksum", false);
if (shortCircuitSkipChecksum) {
LOG.warn("Configuration \"dfs.client.read.shortcircuit.skip.checksum\" should not " +
"be set to true." + (this.useHBaseChecksum ? " HBase checksum doesn't require " +
"it, see https://issues.apache.org/jira/browse/HBASE-6868." : ""));
assert !shortCircuitSkipChecksum; //this will fail if assertions are on
}
// Config'ed params // Config'ed params
this.numRetries = this.conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, this.numRetries = this.conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
@ -1240,8 +1227,10 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa
// accessors will be going against wrong filesystem (unless all is set // accessors will be going against wrong filesystem (unless all is set
// to defaults). // to defaults).
FSUtils.setFsDefault(this.conf, FSUtils.getRootDir(this.conf)); FSUtils.setFsDefault(this.conf, FSUtils.getRootDir(this.conf));
// Get fs instance used by this RS // Get fs instance used by this RS. Do we use checksum verification in the hbase? If hbase
this.fs = new HFileSystem(this.conf, this.useHBaseChecksum); // checksum verification enabled, then automatically switch off hdfs checksum verification.
boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true);
this.fs = new HFileSystem(this.conf, useHBaseChecksum);
this.rootDir = FSUtils.getRootDir(this.conf); this.rootDir = FSUtils.getRootDir(this.conf);
this.tableDescriptors = new FSTableDescriptors(this.fs, this.rootDir, true); this.tableDescriptors = new FSTableDescriptors(this.fs, this.rootDir, true);
this.hlog = setupWALAndReplication(); this.hlog = setupWALAndReplication();

View File

@ -44,7 +44,6 @@ import java.util.regex.Pattern;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
@ -56,12 +55,12 @@ import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hbase.ClusterId; import org.apache.hadoop.hbase.ClusterId;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.RemoteExceptionHandler; import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.HMaster;
@ -1876,4 +1875,40 @@ public abstract class FSUtils {
LOG.info(overheadMsg); LOG.info(overheadMsg);
} }
/**
* Do our short circuit read setup.
* Checks buffer size to use and whether to do checksumming in hbase or hdfs.
* @param conf
*/
public static void setupShortCircuitRead(final Configuration conf) {
// Check that the user has not set the "dfs.client.read.shortcircuit.skip.checksum" property.
boolean shortCircuitSkipChecksum =
conf.getBoolean("dfs.client.read.shortcircuit.skip.checksum", false);
boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true);
if (shortCircuitSkipChecksum) {
LOG.warn("Configuration \"dfs.client.read.shortcircuit.skip.checksum\" should not " +
"be set to true." + (useHBaseChecksum ? " HBase checksum doesn't require " +
"it, see https://issues.apache.org/jira/browse/HBASE-6868." : ""));
assert !shortCircuitSkipChecksum; //this will fail if assertions are on
}
checkShortCircuitReadBufferSize(conf);
}
/**
* Check if short circuit read buffer size is set and if not, set it to hbase value.
* @param conf
*/
public static void checkShortCircuitReadBufferSize(final Configuration conf) {
final int defaultSize = HConstants.DEFAULT_BLOCKSIZE * 2;
final int notSet = -1;
// DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_BUFFER_SIZE_KEY is only defined in h2
final String dfsKey = "dfs.client.read.shortcircuit.buffer.size";
int size = conf.getInt(dfsKey, notSet);
// If a size is set, return -- we will use it.
if (size != notSet) return;
// But short circuit buffer size is normally not set. Put in place the hbase wanted size.
int hbaseSize = conf.getInt("hbase." + dfsKey, defaultSize);
conf.setIfUnset(dfsKey, Integer.toString(hbaseSize));
}
} }