HBASE-2032 Support for installation of user packages
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@892399 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
91fa2ad048
commit
5c4f3067cc
|
@ -48,6 +48,12 @@ S3_BUCKET=iridiant-bundles
|
|||
# Enable public access web interfaces
|
||||
ENABLE_WEB_PORTS=false
|
||||
|
||||
# Extra packages
|
||||
# Allows you to add a private Yum repo and pull packages from it as your
|
||||
# instances boot up. Format is <repo-descriptor-URL> <pkg1> ... <pkgN>
|
||||
# The repository descriptor will be fetched into /etc/yum/repos.d.
|
||||
EXTRA_PACKAGES=
|
||||
|
||||
# Use only c1.xlarge unless you know what you are doing
|
||||
MASTER_INSTANCE_TYPE=${MASTER_INSTANCE_TYPE:-c1.xlarge}
|
||||
|
||||
|
|
|
@ -1,12 +1,11 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
###############################################################################
|
||||
# Script that is run on each EC2 instance on boot. It is passed in the EC2 user
|
||||
# data, so should not exceed 16K in size.
|
||||
###############################################################################
|
||||
|
||||
MASTER_HOST=%MASTER_HOST%
|
||||
ZOOKEEPER_QUORUM=%ZOOKEEPER_QUORUM%
|
||||
MASTER_HOST="%MASTER_HOST%"
|
||||
ZOOKEEPER_QUORUM="%ZOOKEEPER_QUORUM%"
|
||||
EXTRA_PACKAGES="%EXTRA_PACKAGES%"
|
||||
SECURITY_GROUPS=`wget -q -O - http://169.254.169.254/latest/meta-data/security-groups`
|
||||
IS_MASTER=`echo $SECURITY_GROUPS | awk '{ a = match ($0, "-master$"); if (a) print "true"; else print "false"; }'`
|
||||
if [ "$IS_MASTER" = "true" ]; then
|
||||
|
@ -17,9 +16,7 @@ HADOOP_VERSION=`echo $HADOOP_HOME | cut -d '-' -f 2`
|
|||
HBASE_HOME=`ls -d /usr/local/hbase-*`
|
||||
HBASE_VERSION=`echo $HBASE_HOME | cut -d '-' -f 2`
|
||||
|
||||
###############################################################################
|
||||
# Hadoop configuration
|
||||
###############################################################################
|
||||
|
||||
cat > $HADOOP_HOME/conf/core-site.xml <<EOF
|
||||
<?xml version="1.0"?>
|
||||
|
@ -76,7 +73,6 @@ HADOOP_CLASSPATH="$HBASE_HOME/hbase-${HBASE_VERSION}.jar:$HBASE_HOME/lib/AgileJS
|
|||
EOF
|
||||
|
||||
# Configure Hadoop for Ganglia
|
||||
# overwrite hadoop-metrics.properties
|
||||
cat > $HADOOP_HOME/conf/hadoop-metrics.properties <<EOF
|
||||
dfs.class=org.apache.hadoop.metrics.ganglia.GangliaContext
|
||||
dfs.period=10
|
||||
|
@ -89,9 +85,7 @@ mapred.period=10
|
|||
mapred.servers=$MASTER_HOST:8649
|
||||
EOF
|
||||
|
||||
###############################################################################
|
||||
# HBase configuration
|
||||
###############################################################################
|
||||
|
||||
cat > $HBASE_HOME/conf/hbase-site.xml <<EOF
|
||||
<?xml version="1.0"?>
|
||||
|
@ -147,7 +141,6 @@ export HBASE_REGIONSERVER_OPTS="-XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupan
|
|||
EOF
|
||||
|
||||
# Configure HBase for Ganglia
|
||||
# overwrite hadoop-metrics.properties
|
||||
cat > $HBASE_HOME/conf/hadoop-metrics.properties <<EOF
|
||||
dfs.class=org.apache.hadoop.metrics.ganglia.GangliaContext
|
||||
dfs.period=10
|
||||
|
@ -160,26 +153,28 @@ jvm.period=10
|
|||
jvm.servers=$MASTER_HOST:8649
|
||||
EOF
|
||||
|
||||
###############################################################################
|
||||
# Start services
|
||||
###############################################################################
|
||||
|
||||
# up open file descriptor limits
|
||||
echo "root soft nofile 32768" >> /etc/security/limits.conf
|
||||
echo "root hard nofile 32768" >> /etc/security/limits.conf
|
||||
|
||||
# up epoll limits
|
||||
# ok if this fails, only valid for kernels 2.6.27+
|
||||
# up epoll limits; ok if this fails, only valid for kernels 2.6.27+
|
||||
sysctl -w fs.epoll.max_user_instances=32768 > /dev/null 2>&1
|
||||
|
||||
mkdir -p /mnt/hadoop/logs
|
||||
mkdir -p /mnt/hbase/logs
|
||||
mkdir -p /mnt/hadoop/logs /mnt/hbase/logs
|
||||
|
||||
[ ! -f /etc/hosts ] && echo "127.0.0.1 localhost" > /etc/hosts
|
||||
|
||||
# not set on boot
|
||||
export USER="root"
|
||||
|
||||
if [ "$EXTRA_PACKAGES" != "" ] ; then
|
||||
# format should be <repo-descriptor-URL> <package1> ... <packageN>
|
||||
# this will only work with bash
|
||||
pkg=( $EXTRA_PACKAGES )
|
||||
wget -nv -O /etc/yum.repos.d/user.repo ${pkg[0]}
|
||||
yum -y update yum
|
||||
yum -y install ${pkg[@]:1}
|
||||
fi
|
||||
|
||||
if [ "$IS_MASTER" = "true" ]; then
|
||||
# MASTER
|
||||
# Prep Ganglia
|
||||
|
@ -199,13 +194,9 @@ if [ "$IS_MASTER" = "true" ]; then
|
|||
[ ! -e /mnt/hadoop/dfs ] && "$HADOOP_HOME"/bin/hadoop namenode -format
|
||||
|
||||
"$HADOOP_HOME"/bin/hadoop-daemon.sh start namenode
|
||||
|
||||
"$HADOOP_HOME"/bin/hadoop-daemon.sh start datanode
|
||||
|
||||
"$HADOOP_HOME"/bin/hadoop-daemon.sh start jobtracker
|
||||
|
||||
sleep 10
|
||||
|
||||
"$HBASE_HOME"/bin/hbase-daemon.sh start master
|
||||
|
||||
else
|
||||
|
@ -218,11 +209,8 @@ else
|
|||
-e "s|\(udp_send_channel {\)|\1\n host=$MASTER_HOST|" \
|
||||
/etc/gmond.conf
|
||||
service gmond start
|
||||
|
||||
"$HADOOP_HOME"/bin/hadoop-daemon.sh start datanode
|
||||
|
||||
"$HBASE_HOME"/bin/hbase-daemon.sh start regionserver
|
||||
|
||||
"$HADOOP_HOME"/bin/hadoop-daemon.sh start tasktracker
|
||||
|
||||
fi
|
||||
|
|
|
@ -73,7 +73,7 @@ rm -f hbase-$HBASE_VERSION.tar.gz
|
|||
# Configure HBase
|
||||
sed -i \
|
||||
-e "s|# export JAVA_HOME=.*|export JAVA_HOME=/usr/local/jdk${JAVA_VERSION}|" \
|
||||
-e 's|# export HBASE_OPTS=.*|export HBASE_OPTS="$HBASE_OPTS -server -XX:+UseConcMarkSweepGC -XX:+DoEscapeAnalysis -XX:+AggressiveOpts"|' \
|
||||
-e 's|# export HBASE_OPTS=.*|export HBASE_OPTS="$HBASE_OPTS -server -XX:+HeapDumpOnOutOfMemoryError -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=90 -XX:NewSize=64m -XX:MaxNewSize=64m -XX:+DoEscapeAnalysis -XX:+AggressiveOpts"|' \
|
||||
-e 's|# export HBASE_LOG_DIR=.*|export HBASE_LOG_DIR=/mnt/hbase/logs|' \
|
||||
-e 's|# export HBASE_SLAVE_SLEEP=.*|export HBASE_SLAVE_SLEEP=1|' \
|
||||
/usr/local/hbase-$HBASE_VERSION/conf/hbase-env.sh
|
||||
|
@ -94,12 +94,12 @@ rm -f /root/.ssh/authorized_keys
|
|||
# Ensure logging in to new hosts is seamless.
|
||||
echo ' StrictHostKeyChecking no' >> /etc/ssh/ssh_config
|
||||
|
||||
# Install LZO for HBase
|
||||
echo "Installing LZO codec support for HBase"
|
||||
cd /usr/local/hbase-${HBASE_VERSION}
|
||||
wget -nv http://iridiant.s3.amazonaws.com/hbase/lzo-linux-${HADOOP_VERSION}.tar.gz
|
||||
tar xzf lzo-linux-${HADOOP_VERSION}.tar.gz
|
||||
rm lzo-linux-${HADOOP_VERSION}.tar.gz
|
||||
# Install LZO
|
||||
echo "Installing LZO codec support"
|
||||
wget -nv -O /tmp/lzo-linux-${HADOOP_VERSION}.tar.gz http://iridiant.s3.amazonaws.com/hbase/lzo-linux-${HADOOP_VERSION}.tar.gz
|
||||
cd /usr/local/hadoop-${HADOOP_VERSION} && tar xzf /tmp/lzo-linux-${HADOOP_VERSION}.tar.gz
|
||||
cd /usr/local/hbase-${HBASE_VERSION} && tar xzf /tmp/lzo-linux-${HADOOP_VERSION}.tar.gz
|
||||
rm -f /tmp/lzo-linux-${HADOOP_VERSION}.tar.gz
|
||||
|
||||
# Bundle and upload image
|
||||
cd ~root
|
||||
|
|
|
@ -59,6 +59,7 @@ echo "Starting master with AMI $AMI_IMAGE (arch $arch)"
|
|||
# Substituting zookeeper quorum
|
||||
ZOOKEEPER_QUORUM=`cat $ZOOKEEPER_QUORUM_PATH`
|
||||
sed -e "s|%ZOOKEEPER_QUORUM%|$ZOOKEEPER_QUORUM|" \
|
||||
-e "s|%EXTRA_PACKAGES%|$EXTRA_PACKAGES|" \
|
||||
"$bin"/$USER_DATA_FILE > "$bin"/$USER_DATA_FILE.master
|
||||
INSTANCE=`ec2-run-instances $AMI_IMAGE $TOOL_OPTS -n 1 -g $CLUSTER_MASTER -k root -f "$bin"/$USER_DATA_FILE.master -t $type | grep INSTANCE | awk '{print $2}'`
|
||||
echo -n "Waiting for instance $INSTANCE to start"
|
||||
|
|
|
@ -50,6 +50,7 @@ ZOOKEEPER_QUORUM=`cat $ZOOKEEPER_QUORUM_PATH`
|
|||
# Substituting master hostname and zookeeper quorum
|
||||
sed -e "s|%MASTER_HOST%|$MASTER_HOST|" \
|
||||
-e "s|%ZOOKEEPER_QUORUM%|$ZOOKEEPER_QUORUM|" \
|
||||
-e "s|%EXTRA_PACKAGES%|$EXTRA_PACKAGES|" \
|
||||
"$bin"/$USER_DATA_FILE > "$bin"/$USER_DATA_FILE.slave
|
||||
|
||||
# Start slaves
|
||||
|
|
Loading…
Reference in New Issue