[EC2] increase ip_conntrack_max; disable datanode write timeout

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@938982 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Andrew Kyle Purtell 2010-04-28 14:22:21 +00:00
parent 2b88cdcd78
commit 0c50c92879
1 changed files with 25 additions and 15 deletions

View File

@ -49,6 +49,9 @@ echo "root hard nofile 32768" >> /etc/security/limits.conf
# up epoll limits; ok if this fails, only valid for kernels 2.6.27+
sysctl -w fs.epoll.max_user_instances=32768 > /dev/null 2>&1
# up conntrack_max
sysctl -w net.ipv4.netfilter.ip_conntrack_max=65536 > /dev/null 2>&1
[ ! -f /etc/hosts ] && echo "127.0.0.1 localhost" > /etc/hosts
# Extra packages
@ -143,7 +146,7 @@ cat > $HADOOP_HOME/conf/hdfs-site.xml <<EOF
</property>
<property>
<name>dfs.replication</name>
<value>3</value>
<value>2</value>
</property>
<property>
<name>dfs.datanode.handler.count</name>
@ -171,6 +174,10 @@ cat > $HADOOP_HOME/conf/mapred-site.xml <<EOF
<name>io.compression.codec.lzo.class</name>
<value>com.hadoop.compression.lzo.LzoCodec</value>
</property>
<property>
<name>mapred.map.tasks</name>
<value>4</value>
</property>
<property>
<name>mapred.map.tasks.speculative.execution</name>
<value>false</value>
@ -184,11 +191,10 @@ EOF
# Add JVM options
cat >> $HADOOP_HOME/conf/hadoop-env.sh <<EOF
export HADOOP_OPTS="$HADOOP_OPTS -XX:+UseCompressedOops"
export HADOOP_NAMENODE_OPTS="$HADOOP_NAMENODE_OPTS -Xmx3000m -XX:+UseCompressedOops"
EOF
# Update classpath to include HBase jars and config
cat >> $HADOOP_HOME/conf/hadoop-env.sh <<EOF
export HADOOP_CLASSPATH="$HBASE_HOME/hbase-${HBASE_VERSION}.jar:$HBASE_HOME/lib/AgileJSON-2009-03-30.jar:$HBASE_HOME/lib/json.jar:$HBASE_HOME/lib/zookeeper-3.3.0.jar:$HBASE_HOME/conf"
export HADOOP_CLASSPATH="$HBASE_HOME/hbase-${HBASE_VERSION}.jar:$HBASE_HOME/lib/zookeeper-3.3.0.jar:$HBASE_HOME/conf"
EOF
# Configure Hadoop for Ganglia
cat > $HADOOP_HOME/conf/hadoop-metrics.properties <<EOF
@ -240,12 +246,16 @@ cat > $HBASE_HOME/conf/hbase-site.xml <<EOF
</property>
<property>
<name>dfs.replication</name>
<value>3</value>
<value>2</value>
</property>
<property>
<name>dfs.client.block.write.retries</name>
<value>100</value>
</property>
<property>
<name>dfs.datanode.socket.write.timeout</name>
<value>0</value>
</property>
<property>
<name>zookeeper.session.timeout</name>
<value>60000</value>
@ -264,8 +274,8 @@ export HBASE_MASTER_OPTS="-Xmx1000m -XX:+UseCompressedOops -XX:+UseConcMarkSweep
export HBASE_REGIONSERVER_OPTS="-Xmx2000m -XX:+UseCompressedOops -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=88 -XX:NewSize=128m -XX:MaxNewSize=128m -XX:+DoEscapeAnalysis -XX:+AggressiveOpts -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:/mnt/hbase/logs/hbase-regionserver-gc.log"
EOF
# Configure log4j
sed -i -e 's/hadoop.hbase=DEBUG/hadoop.hbase=INFO/g' \
$HBASE_HOME/conf/log4j.properties
sed -i -e 's/hadoop.hbase=DEBUG/hadoop.hbase=INFO/g' $HBASE_HOME/conf/log4j.properties
#sed -i -e 's/#log4j.logger.org.apache.hadoop.dfs=DEBUG/log4j.logger.org.apache.hadoop.dfs=DEBUG/g' $HBASE_HOME/conf/log4j.properties
# Configure HBase for Ganglia
cat > $HBASE_HOME/conf/hadoop-metrics.properties <<EOF
dfs.class=org.apache.hadoop.metrics.ganglia.GangliaContext