HBASE-1961 HBase EC2 scripts
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@836183 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
947b62178d
commit
b83cda4bd8
|
@ -192,6 +192,7 @@ Release 0.21.0 - Unreleased
|
|||
HBASE-1975 SingleColumnValueFilter: Add ability to match the value of
|
||||
previous versions of the specified column
|
||||
(Jeremiah Jacquet via Stack)
|
||||
HBASE-1961 HBase EC2 scripts
|
||||
|
||||
OPTIMIZATIONS
|
||||
HBASE-410 [testing] Speed up the test suite
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
HBase EC2
|
||||
|
||||
This collection of scripts allows you to run HBase clusters on Amazon.com's Elastic Compute Cloud (EC2) service described at:
|
||||
|
||||
http://aws.amazon.com/ec2
|
||||
|
||||
To get help, type the following in a shell:
|
||||
|
||||
bin/hbase-ec2
|
||||
|
||||
You need both the EC2 API and AMI tools
|
||||
|
||||
http://developer.amazonwebservices.com/connect/entry.jspa?externalID=351
|
||||
|
||||
http://developer.amazonwebservices.com/connect/entry.jspa?externalID=368&categoryID=88
|
||||
|
||||
installed and on the path. For Ubuntu, "apt-get install ec2-ami-tools
|
||||
ec2-api-tools".
|
|
@ -0,0 +1,69 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Run commands on master or specified node of a running HBase EC2 cluster.
|
||||
|
||||
# if no args specified, show usage
|
||||
if [ $# = 0 ]; then
|
||||
echo "Command required!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# get arguments
|
||||
COMMAND="$1"
|
||||
shift
|
||||
# get group
|
||||
CLUSTER="$1"
|
||||
shift
|
||||
|
||||
if [ -z $CLUSTER ]; then
|
||||
echo "Cluster name or instance id required!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Import variables
|
||||
bin=`dirname "$0"`
|
||||
bin=`cd "$bin"; pwd`
|
||||
. "$bin"/hbase-ec2-env.sh
|
||||
|
||||
if [[ "$CLUSTER" = "i-*" ]]; then
|
||||
HOST=`ec2-describe-instances $CLUSTER | grep running | awk '{print $4}'`
|
||||
[ -z $HOST ] && echo "Instance still pending or no longer running: $CLUSTER" && exit 1
|
||||
else
|
||||
[ ! -f $MASTER_IP_PATH ] && echo "Wrong group name, or cluster not launched! $CLUSTER" && exit 1
|
||||
HOST=`cat $MASTER_IP_PATH`
|
||||
fi
|
||||
|
||||
if [ "$COMMAND" = "login" ] ; then
|
||||
echo "Logging in to host $HOST."
|
||||
ssh $SSH_OPTS "root@$HOST"
|
||||
elif [ "$COMMAND" = "proxy" ] ; then
|
||||
echo "Proxying to host $HOST via local port 6666"
|
||||
echo "Gangia: http://$HOST/ganglia"
|
||||
echo "JobTracker: http://$HOST:50030/"
|
||||
echo "NameNode: http://$HOST:50070/"
|
||||
ssh $SSH_OPTS -D 6666 -N "root@$HOST"
|
||||
elif [ "$COMMAND" = "push" ] ; then
|
||||
echo "Pushing $1 to host $HOST."
|
||||
scp $SSH_OPTS -r $1 "root@$HOST:"
|
||||
elif [ "$COMMAND" = "screen" ] ; then
|
||||
echo "Logging in and attaching screen on host $HOST."
|
||||
ssh $SSH_OPTS -t "root@$HOST" 'screen -D -R'
|
||||
else
|
||||
echo "Executing command on host $HOST."
|
||||
ssh $SSH_OPTS -t "root@$HOST" "$COMMAND"
|
||||
fi
|
|
@ -0,0 +1,77 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Create a HBase AMI.
|
||||
# Inspired by Jonathan Siegel's EC2 script (http://blogsiegel.blogspot.com/2006/08/sandboxing-amazon-ec2.html)
|
||||
|
||||
# allow override of INSTANCE_TYPE from the command line
|
||||
[ ! -z $1 ] && INSTANCE_TYPE=$1
|
||||
|
||||
# Import variables
|
||||
bin=`dirname "$0"`
|
||||
bin=`cd "$bin"; pwd`
|
||||
. "$bin"/hbase-ec2-env.sh
|
||||
|
||||
echo "INSTANCE_TYPE is $INSTANCE_TYPE."
|
||||
echo "ARCH is $ARCH."
|
||||
|
||||
AMI_IMAGE=`ec2-describe-images -a | grep $S3_BUCKET | grep hbase | grep $HBASE_VERSION | grep $ARCH | grep available | awk '{print $2}'`
|
||||
|
||||
[ ! -z $AMI_IMAGE ] && echo "AMI already registered, use: ec2-deregister $AMI_IMAGE" && exit 1
|
||||
|
||||
echo "Starting a AMI with ID $BASE_AMI_IMAGE."
|
||||
OUTPUT=`ec2-run-instances $BASE_AMI_IMAGE -k $KEY_NAME -t $INSTANCE_TYPE`
|
||||
BOOTING_INSTANCE=`echo $OUTPUT | awk '{print $6}'`
|
||||
|
||||
echo "Instance is $BOOTING_INSTANCE."
|
||||
|
||||
echo "Polling server status (ec2-describe-instances $BOOTING_INSTANCE)"
|
||||
while true; do
|
||||
printf "."
|
||||
HOSTNAME=`ec2-describe-instances $BOOTING_INSTANCE | grep running | awk '{print $4}'`
|
||||
if [ ! -z $HOSTNAME ]; then
|
||||
break;
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
echo "The server is available at $HOSTNAME."
|
||||
while true; do
|
||||
REPLY=`ssh $SSH_OPTS "root@$HOSTNAME" 'echo "hello"'`
|
||||
if [ ! -z $REPLY ]; then
|
||||
break;
|
||||
fi
|
||||
sleep 5
|
||||
done
|
||||
|
||||
echo "Copying scripts."
|
||||
|
||||
# Copy setup scripts
|
||||
scp $SSH_OPTS "$bin"/hbase-ec2-env.sh "root@$HOSTNAME:/mnt"
|
||||
scp $SSH_OPTS "$bin"/image/create-hbase-image-remote "root@$HOSTNAME:/mnt"
|
||||
scp $SSH_OPTS "$bin"/image/ec2-run-user-data "root@$HOSTNAME:/etc/init.d"
|
||||
|
||||
# Copy private key and certificate (for bundling image)
|
||||
scp $SSH_OPTS $EC2_KEYDIR/pk*.pem "root@$HOSTNAME:/mnt"
|
||||
scp $SSH_OPTS $EC2_KEYDIR/cert*.pem "root@$HOSTNAME:/mnt"
|
||||
|
||||
# Connect to it
|
||||
ssh $SSH_OPTS "root@$HOSTNAME" "sh -c \"INSTANCE_TYPE=$INSTANCE_TYPE /mnt/create-hbase-image-remote\""
|
||||
|
||||
# Register image
|
||||
ec2-register $S3_BUCKET/hbase-$HBASE_VERSION-$ARCH.manifest.xml
|
||||
|
||||
echo "Terminate with: ec2-terminate-instances $BOOTING_INSTANCE"
|
|
@ -0,0 +1,47 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Delete the groups an local files associated with a cluster.
|
||||
|
||||
if [ -z $1 ]; then
|
||||
echo "Cluster name required!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CLUSTER=$1
|
||||
|
||||
# Finding HBase clusters
|
||||
CLUSTERS=`ec2-describe-instances | \
|
||||
awk '"RESERVATION" == $1 && $4 ~ /-master$/, "INSTANCE" == $1' | tr '\n' '\t' | \
|
||||
grep "$CLUSTER" | grep running | cut -f4 | rev | cut -d'-' -f2- | rev`
|
||||
|
||||
if [ -n "$CLUSTERS" ]; then
|
||||
echo "Cluster $CLUSTER has running instances. Please terminate them first."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Import variables
|
||||
bin=`dirname "$0"`
|
||||
bin=`cd "$bin"; pwd`
|
||||
. "$bin"/hbase-ec2-env.sh
|
||||
|
||||
"$bin"/revoke-hbase-cluster-secgroups $CLUSTER
|
||||
|
||||
rm -f $MASTER_IP_PATH
|
||||
rm -f $MASTER_PRIVATE_IP_PATH
|
||||
rm -f $MASTER_ZONE_PATH
|
||||
rm -f $ZOOKEEPER_QUORUM_PATH
|
|
@ -0,0 +1,63 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
bin=`dirname "$0"`
|
||||
bin=`cd "$bin"; pwd`
|
||||
|
||||
# if no args specified, show usage
|
||||
if [ $# = 0 ]; then
|
||||
echo "Usage: hbase-ec2 COMMAND"
|
||||
echo "where COMMAND is one of:"
|
||||
echo " list list all running HBase EC2 clusters"
|
||||
echo " launch-cluster <name> <slaves> <zoos> launch a HBase cluster"
|
||||
echo " launch-zookeeper <name> <zoos> launch the zookeeper quorum"
|
||||
echo " launch-master <name> launch or find a cluster master"
|
||||
echo " launch-slaves <name> <slaves> launch the cluster slaves"
|
||||
echo " terminate-cluster <name> terminate all HBase EC2 instances"
|
||||
echo " delete-cluster <name> clean up after a terminated cluster"
|
||||
echo " login <name|instance id> login to the master node"
|
||||
echo " screen <name|instance id> start or attach 'screen' on the master"
|
||||
echo " proxy <name|instance id> start a socks proxy on localhost:6666"
|
||||
echo " push <name> <file> scp a file to the master node"
|
||||
echo " <shell cmd> <group|instance id> execute a command on the master"
|
||||
echo " create-image create a HBase AMI"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# get arguments
|
||||
COMMAND="$1"
|
||||
shift
|
||||
|
||||
if [ "$COMMAND" = "create-image" ] ; then
|
||||
. "$bin"/create-hbase-image $*
|
||||
elif [ "$COMMAND" = "launch-cluster" ] ; then
|
||||
. "$bin"/launch-hbase-cluster $*
|
||||
elif [ "$COMMAND" = "launch-zookeeper" ] ; then
|
||||
. "$bin"/launch-hbase-zookeeper $*
|
||||
elif [ "$COMMAND" = "launch-master" ] ; then
|
||||
. "$bin"/launch-hbase-master $*
|
||||
elif [ "$COMMAND" = "launch-slaves" ] ; then
|
||||
. "$bin"/launch-hbase-slaves $*
|
||||
elif [ "$COMMAND" = "delete-cluster" ] ; then
|
||||
. "$bin"/delete-hbase-cluster $*
|
||||
elif [ "$COMMAND" = "terminate-cluster" ] ; then
|
||||
. "$bin"/terminate-hbase-cluster $*
|
||||
elif [ "$COMMAND" = "list" ] ; then
|
||||
. "$bin"/list-hbase-clusters
|
||||
else
|
||||
. "$bin"/cmd-hbase-cluster "$COMMAND" $*
|
||||
fi
|
|
@ -0,0 +1,107 @@
|
|||
# Set environment variables for running Hbase on Amazon EC2 here. All are required.
|
||||
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Your Amazon Account Number.
|
||||
AWS_ACCOUNT_ID=
|
||||
|
||||
# Your Amazon AWS access key.
|
||||
AWS_ACCESS_KEY_ID=
|
||||
|
||||
# Your Amazon AWS secret access key.
|
||||
AWS_SECRET_ACCESS_KEY=
|
||||
|
||||
# Location of EC2 keys.
|
||||
# The default setting is probably OK if you set up EC2 following the Amazon Getting Started guide.
|
||||
EC2_KEYDIR=`dirname "$EC2_PRIVATE_KEY"`
|
||||
|
||||
# The EC2 key name used to launch instances.
|
||||
# The default is the value used in the Amazon Getting Started guide.
|
||||
KEY_NAME=root
|
||||
|
||||
# Where your EC2 private key is stored (created when following the Amazon Getting Started guide).
|
||||
# You need to change this if you don't store this with your other EC2 keys.
|
||||
PRIVATE_KEY_PATH=`echo "$EC2_KEYDIR"/"id_rsa_$KEY_NAME"`
|
||||
|
||||
# SSH options used when connecting to EC2 instances.
|
||||
SSH_OPTS=`echo -q -i "$PRIVATE_KEY_PATH" -o StrictHostKeyChecking=no -o ServerAliveInterval=30`
|
||||
|
||||
# The version of HBase to use.
|
||||
HBASE_VERSION=0.20.1
|
||||
|
||||
# The version of Hadoop to use.
|
||||
HADOOP_VERSION=$HBASE_VERSION
|
||||
|
||||
# The Amazon S3 bucket where the HBase AMI is stored.
|
||||
# The default value is for public images, so can be left if you are using running a public image.
|
||||
# Change this value only if you are creating your own (private) AMI
|
||||
# so you can store it in a bucket you own.
|
||||
#S3_BUCKET=hbase-images
|
||||
S3_BUCKET=iridiant-bundles
|
||||
|
||||
# Enable public access web interfaces
|
||||
# XXX -- Generally, you do not want to do this
|
||||
ENABLE_WEB_PORTS=false
|
||||
|
||||
# The script to run on instance boot.
|
||||
USER_DATA_FILE=hbase-ec2-init-remote.sh
|
||||
|
||||
# Use only c1.xlarge unless you know what you are doing
|
||||
INSTANCE_TYPE=${INSTANCE_TYPE:-c1.xlarge}
|
||||
|
||||
# Use only c1.medium unless you know what you are doing
|
||||
ZOO_INSTANCE_TYPE=${ZOO_INSTANCE_TYPE:-c1.medium}
|
||||
|
||||
# The EC2 group master name. CLUSTER is set by calling scripts
|
||||
CLUSTER_MASTER=$CLUSTER-master
|
||||
|
||||
# Cached values for a given cluster
|
||||
MASTER_PRIVATE_IP_PATH=~/.hbase-private-$CLUSTER_MASTER
|
||||
MASTER_IP_PATH=~/.hbase-$CLUSTER_MASTER
|
||||
MASTER_ZONE_PATH=~/.hbase-zone-$CLUSTER_MASTER
|
||||
|
||||
# The Zookeeper EC2 group name. CLUSTER is set by calling scripts.
|
||||
CLUSTER_ZOOKEEPER=$CLUSTER-zookeeper
|
||||
ZOOKEEPER_QUORUM_PATH=~/.hbase-quorum-$CLUSTER_ZOOKEEPER
|
||||
|
||||
#
|
||||
# The following variables are only used when creating an AMI.
|
||||
#
|
||||
|
||||
# The version number of the installed JDK.
|
||||
JAVA_VERSION=1.6.0_16
|
||||
|
||||
# SUPPORTED_ARCHITECTURES = ['i386', 'x86_64']
|
||||
# The download URL for the Sun JDK. Visit http://java.sun.com/javase/downloads/index.jsp and get the URL for the "Linux self-extracting file".
|
||||
if [ "$INSTANCE_TYPE" = "m1.small" -o "$INSTANCE_TYPE" = "c1.medium" ]; then
|
||||
ARCH='i386'
|
||||
BASE_AMI_IMAGE="ami-48aa4921" # ec2-public-images/fedora-8-i386-base-v1.10.manifest.xml
|
||||
AMI_IMAGE="ami-c644a7af"
|
||||
JAVA_BINARY_URL='http://iridiant.s3.amazonaws.com/jdk/jdk-6u16-linux-i586.bin'
|
||||
else
|
||||
ARCH='x86_64'
|
||||
BASE_AMI_IMAGE="ami-f61dfd9f" # ec2-public-images/fedora-8-x86_64-base-v1.10.manifest.xml
|
||||
AMI_IMAGE="ami-f244a79b"
|
||||
JAVA_BINARY_URL='http://iridiant.s3.amazonaws.com/jdk/jdk-6u16-linux-x64.bin'
|
||||
fi
|
||||
|
||||
if [ "$ZOO_INSTANCE_TYPE" = "m1.small" -o "$ZOO_INSTANCE_TYPE" = "c1.medium" ]; then
|
||||
ZOO_ARCH='i386'
|
||||
ZOO_AMI_IMAGE="ami-c644a7af"
|
||||
else
|
||||
ZOO_ARCH='x86_64'
|
||||
ZOO_AMI_IMAGE="ami-f244a79b"
|
||||
fi
|
|
@ -0,0 +1,180 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
###############################################################################
|
||||
# Script that is run on each EC2 instance on boot. It is passed in the EC2 user
|
||||
# data, so should not exceed 16K in size.
|
||||
###############################################################################
|
||||
|
||||
MASTER_HOST=%MASTER_HOST%
|
||||
ZOOKEEPER_QUORUM=%ZOOKEEPER_QUORUM%
|
||||
SECURITY_GROUPS=`wget -q -O - http://169.254.169.254/latest/meta-data/security-groups`
|
||||
IS_MASTER=`echo $SECURITY_GROUPS | awk '{ a = match ($0, "-master$"); if (a) print "true"; else print "false"; }'`
|
||||
if [ "$IS_MASTER" = "true" ]; then
|
||||
MASTER_HOST=`wget -q -O - http://169.254.169.254/latest/meta-data/local-hostname`
|
||||
fi
|
||||
HADOOP_HOME=`ls -d /usr/local/hadoop-*`
|
||||
HBASE_HOME=`ls -d /usr/local/hbase-*`
|
||||
|
||||
###############################################################################
|
||||
# Hadoop configuration
|
||||
###############################################################################
|
||||
|
||||
cat > $HADOOP_HOME/conf/hadoop-site.xml <<EOF
|
||||
<?xml version="1.0"?>
|
||||
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
||||
<configuration>
|
||||
<property>
|
||||
<name>hadoop.tmp.dir</name>
|
||||
<value>/mnt/hadoop</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.default.name</name>
|
||||
<value>hdfs://$MASTER_HOST:50001</value>
|
||||
</property>
|
||||
</configuration>
|
||||
EOF
|
||||
|
||||
# Configure Hadoop for Ganglia
|
||||
# overwrite hadoop-metrics.properties
|
||||
cat > $HADOOP_HOME/conf/hadoop-metrics.properties <<EOF
|
||||
dfs.class=org.apache.hadoop.metrics.ganglia.GangliaContext
|
||||
dfs.period=10
|
||||
dfs.servers=$MASTER_HOST:8649
|
||||
jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
|
||||
jvm.period=10
|
||||
jvm.servers=$MASTER_HOST:8649
|
||||
mapred.class=org.apache.hadoop.metrics.ganglia.GangliaContext
|
||||
mapred.period=10
|
||||
mapred.servers=$MASTER_HOST:8649
|
||||
EOF
|
||||
|
||||
###############################################################################
|
||||
# HBase configuration
|
||||
###############################################################################
|
||||
|
||||
cat > $HBASE_HOME/conf/hbase-site.xml <<EOF
|
||||
<?xml version="1.0"?>
|
||||
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
||||
<configuration>
|
||||
<property>
|
||||
<name>fs.default.name</name>
|
||||
<value>hdfs://$MASTER_HOST:50001</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.replication</name>
|
||||
<value>3</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.client.block.write.retries</name>
|
||||
<value>100</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>hbase.rootdir</name>
|
||||
<value>hdfs://$MASTER_HOST:50001/hbase</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>hbase.cluster.distributed</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>hbase.zookeeper.quorum</name>
|
||||
<value>$ZOOKEEPER_QUORUM</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>zookeeper.session.timeout</name>
|
||||
<value>60000</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>hbase.regionserver.handler.count</name>
|
||||
<value>100</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>hbase.hregion.memstore.block.multiplier</name>
|
||||
<value>3</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>hbase.hstore.blockingStoreFiles</name>
|
||||
<value>15</value>
|
||||
</property>
|
||||
</configuration>
|
||||
EOF
|
||||
|
||||
# Configure HBase for Ganglia
|
||||
# overwrite hadoop-metrics.properties
|
||||
cat > $HBASE_HOME/conf/hadoop-metrics.properties <<EOF
|
||||
dfs.class=org.apache.hadoop.metrics.ganglia.GangliaContext
|
||||
dfs.period=10
|
||||
dfs.servers=$MASTER_HOST:8649
|
||||
hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
|
||||
hbase.period=10
|
||||
hbase.servers=$MASTER_HOST:8649
|
||||
jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
|
||||
jvm.period=10
|
||||
jvm.servers=$MASTER_HOST:8649
|
||||
EOF
|
||||
|
||||
###############################################################################
|
||||
# Start services
|
||||
###############################################################################
|
||||
|
||||
# up open file descriptor limits
|
||||
echo "root soft nofile 32768" >> /etc/security/limits.conf
|
||||
echo "root hard nofile 32768" >> /etc/security/limits.conf
|
||||
|
||||
# up epoll limits
|
||||
# ok if this fails, only valid for kernels 2.6.27+
|
||||
sysctl -w fs.epoll.max_user_instances=32768
|
||||
|
||||
mkdir -p /mnt/hadoop/logs
|
||||
mkdir -p /mnt/hbase/logs
|
||||
|
||||
[ ! -f /etc/hosts ] && echo "127.0.0.1 localhost" > /etc/hosts
|
||||
|
||||
# not set on boot
|
||||
export USER="root"
|
||||
|
||||
if [ "$IS_MASTER" = "true" ]; then
|
||||
# MASTER
|
||||
# Prep Ganglia
|
||||
sed -i -e "s|\( *mcast_join *=.*\)|#\1|" \
|
||||
-e "s|\( *bind *=.*\)|#\1|" \
|
||||
-e "s|\( *mute *=.*\)| mute = yes|" \
|
||||
-e "s|\( *location *=.*\)| location = \"master-node\"|" \
|
||||
/etc/gmond.conf
|
||||
mkdir -p /mnt/ganglia/rrds
|
||||
chown -R ganglia:ganglia /mnt/ganglia/rrds
|
||||
rm -rf /var/lib/ganglia; cd /var/lib; ln -s /mnt/ganglia ganglia; cd
|
||||
service gmond start
|
||||
service gmetad start
|
||||
apachectl start
|
||||
|
||||
# only format on first boot
|
||||
[ ! -e /mnt/hadoop/dfs ] && "$HADOOP_HOME"/bin/hadoop namenode -format
|
||||
|
||||
"$HADOOP_HOME"/bin/hadoop-daemon.sh start namenode
|
||||
|
||||
"$HADOOP_HOME"/bin/hadoop-daemon.sh start datanode
|
||||
|
||||
sleep 10
|
||||
|
||||
"$HBASE_HOME"/bin/hbase-daemon.sh start master
|
||||
|
||||
else
|
||||
|
||||
# SLAVE
|
||||
|
||||
# Prep Ganglia
|
||||
sed -i -e "s|\( *mcast_join *=.*\)|#\1|" \
|
||||
-e "s|\( *bind *=.*\)|#\1|" \
|
||||
-e "s|\(udp_send_channel {\)|\1\n host=$MASTER_HOST|" \
|
||||
/etc/gmond.conf
|
||||
service gmond start
|
||||
|
||||
"$HADOOP_HOME"/bin/hadoop-daemon.sh start datanode
|
||||
|
||||
"$HBASE_HOME"/bin/hbase-daemon.sh start regionserver
|
||||
|
||||
fi
|
||||
|
||||
# Run this script on next boot
|
||||
rm -f /var/ec2/ec2-run-user-data.*
|
|
@ -0,0 +1,50 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# ZOOKEEPER_QUORUM set in the environment by the caller
|
||||
HBASE_HOME=`ls -d /usr/local/hbase-*`
|
||||
|
||||
###############################################################################
|
||||
# HBase configuration (Zookeeper)
|
||||
###############################################################################
|
||||
|
||||
cat > $HBASE_HOME/conf/hbase-site.xml <<EOF
|
||||
<?xml version="1.0"?>
|
||||
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
||||
<configuration>
|
||||
<property>
|
||||
<name>hbase.zookeeper.quorum</name>
|
||||
<value>$ZOOKEEPER_QUORUM</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>zookeeper.session.timeout</name>
|
||||
<value>60000</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>hbase.zookeeper.property.dataDir</name>
|
||||
<value>/mnt/hbase/zk</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>hbase.zookeeper.property.maxClientCnxns</name>
|
||||
<value>100</value>
|
||||
</property>
|
||||
</configuration>
|
||||
EOF
|
||||
|
||||
###############################################################################
|
||||
# Start services
|
||||
###############################################################################
|
||||
|
||||
# up open file descriptor limits
|
||||
echo "root soft nofile 32768" >> /etc/security/limits.conf
|
||||
echo "root hard nofile 32768" >> /etc/security/limits.conf
|
||||
|
||||
# up epoll limits
|
||||
# ok if this fails, only valid for kernels 2.6.27+
|
||||
sysctl -w fs.epoll.max_user_instance=32768
|
||||
|
||||
mkdir -p /mnt/hbase/logs
|
||||
mkdir -p /mnt/hbase/zk
|
||||
|
||||
[ ! -f /etc/hosts ] && echo "127.0.0.1 localhost" > /etc/hosts
|
||||
|
||||
"$HBASE_HOME"/bin/hbase-daemon.sh start zookeeper
|
|
@ -0,0 +1,101 @@
|
|||
#!/bin/sh
|
||||
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
# Create a Hbase AMI. Runs on the EC2 instance.
|
||||
|
||||
# Import variables
|
||||
bin=`dirname "$0"`
|
||||
bin=`cd "$bin"; pwd`
|
||||
. "$bin"/hbase-ec2-env.sh
|
||||
|
||||
echo "Remote: INSTANCE_TYPE is $INSTANCE_TYPE."
|
||||
echo "Remote: ARCH is $ARCH."
|
||||
|
||||
# Remove environment script since it contains sensitive information
|
||||
rm -f "$bin"/hbase-ec2-env.sh
|
||||
|
||||
# Install Java
|
||||
echo "Downloading and installing java binary."
|
||||
cd /usr/local
|
||||
wget -nv -O java.bin $JAVA_BINARY_URL
|
||||
sh java.bin
|
||||
rm -f java.bin
|
||||
|
||||
# Install tools
|
||||
echo "Installing rpms."
|
||||
yum -y update
|
||||
yum -y install rsync lynx screen ganglia-gmetad ganglia-gmond ganglia-web httpd php
|
||||
yum -y clean all
|
||||
|
||||
# Install Hadoop
|
||||
echo "Installing Hadoop $HADOOP_VERSION."
|
||||
cd /usr/local
|
||||
wget -nv http://archive.apache.org/dist/hadoop/core/hadoop-$HADOOP_VERSION/hadoop-$HADOOP_VERSION.tar.gz
|
||||
[ ! -f hadoop-$HADOOP_VERSION.tar.gz ] && wget -nv http://www.apache.org/dist/hadoop/core/hadoop-$HADOOP_VERSION/hadoop-$HADOOP_VERSION.tar.gz
|
||||
tar xzf hadoop-$HADOOP_VERSION.tar.gz
|
||||
rm -f hadoop-$HADOOP_VERSION.tar.gz
|
||||
|
||||
# Configure Hadoop
|
||||
sed -i \
|
||||
-e "s|# export JAVA_HOME=.*|export JAVA_HOME=/usr/local/jdk${JAVA_VERSION}|" \
|
||||
-e 's|# export HADOOP_LOG_DIR=.*|export HADOOP_LOG_DIR=/mnt/hadoop/logs|' \
|
||||
-e 's|# export HADOOP_SLAVE_SLEEP=.*|export HADOOP_SLAVE_SLEEP=1|' \
|
||||
-e 's|# export HADOOP_OPTS=.*|export HADOOP_OPTS=-server|' \
|
||||
/usr/local/hadoop-$HADOOP_VERSION/conf/hadoop-env.sh
|
||||
|
||||
# Install HBase
|
||||
echo "Installing HBase $HBASE_VERSION."
|
||||
cd /usr/local
|
||||
wget -nv http://iridiant.s3.amazonaws.com/hbase/hbase-$HBASE_VERSION.tar.gz
|
||||
tar xzf hbase-$HBASE_VERSION.tar.gz
|
||||
rm -f hbase-$HBASE_VERSION.tar.gz
|
||||
|
||||
# Configure HBase
|
||||
sed -i \
|
||||
-e "s|# export JAVA_HOME=.*|export JAVA_HOME=/usr/local/jdk${JAVA_VERSION}|" \
|
||||
-e 's|# export HBASE_OPTS=.*|export HBASE_OPTS="$HBASE_OPTS -server -XX:+UseConcMarkSweepGC -XX:+DoEscapeAnalysis -XX:+AggressiveOpts"|' \
|
||||
-e 's|# export HBASE_LOG_DIR=.*|export HBASE_LOG_DIR=/mnt/hbase/logs|' \
|
||||
-e 's|# export HBASE_SLAVE_SLEEP=.*|export HBASE_SLAVE_SLEEP=1|' \
|
||||
/usr/local/hbase-$HBASE_VERSION/conf/hbase-env.sh
|
||||
|
||||
# Run user data as script on instance startup
|
||||
chmod +x /etc/init.d/ec2-run-user-data
|
||||
echo "/etc/init.d/ec2-run-user-data" >> /etc/rc.d/rc.local
|
||||
|
||||
# Setup root user bash environment
|
||||
echo "export JAVA_HOME=/usr/local/jdk${JAVA_VERSION}" >> /root/.bash_profile
|
||||
echo "export HADOOP_HOME=/usr/local/hadoop-${HADOOP_VERSION}" >> /root/.bash_profile
|
||||
echo "export HBASE_HOME=/usr/local/hbase-${HBASE_VERSION}" >> /root/.bash_profile
|
||||
echo 'export PATH=$JAVA_HOME/bin:$HADOOP_HOME/bin:$HBASE_HOME/bin:$PATH' >> /root/.bash_profile
|
||||
|
||||
# Configure networking.
|
||||
# Delete SSH authorized_keys since it includes the key it was launched with. (Note that it is re-populated when an instance starts.)
|
||||
rm -f /root/.ssh/authorized_keys
|
||||
# Ensure logging in to new hosts is seamless.
|
||||
echo ' StrictHostKeyChecking no' >> /etc/ssh/ssh_config
|
||||
|
||||
# Bundle and upload image
|
||||
cd ~root
|
||||
# Don't need to delete .bash_history since it isn't written until exit.
|
||||
df -h
|
||||
ec2-bundle-vol -d /mnt -k /mnt/pk*.pem -c /mnt/cert*.pem -u $AWS_ACCOUNT_ID -s 3072 -p hbase-$HBASE_VERSION-$ARCH -r $ARCH
|
||||
|
||||
ec2-upload-bundle -b $S3_BUCKET -m /mnt/hbase-$HBASE_VERSION-$ARCH.manifest.xml -a $AWS_ACCESS_KEY_ID -s $AWS_SECRET_ACCESS_KEY
|
||||
|
||||
# End
|
||||
echo Done
|
|
@ -0,0 +1,51 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# ec2-run-user-data - Run instance user-data if it looks like a script.
|
||||
#
|
||||
# Only retrieves and runs the user-data script once per instance. If
|
||||
# you want the user-data script to run again (e.g., on the next boot)
|
||||
# then add this command in the user-data script:
|
||||
# rm -f /var/ec2/ec2-run-user-data.*
|
||||
#
|
||||
# History:
|
||||
# 2008-05-16 Eric Hammond <ehammond@thinksome.com>
|
||||
# - Initial version including code from Kim Scheibel, Jorge Oliveira
|
||||
# 2008-08-06 Tom White
|
||||
# - Updated to use mktemp on fedora
|
||||
#
|
||||
|
||||
prog=$(basename $0)
|
||||
logger="logger -t $prog"
|
||||
curl="curl --retry 3 --silent --show-error --fail"
|
||||
instance_data_url=http://169.254.169.254/2008-02-01
|
||||
|
||||
# Wait until networking is up on the EC2 instance.
|
||||
perl -MIO::Socket::INET -e '
|
||||
until(new IO::Socket::INET("169.254.169.254:80")){print"Waiting for network...\n";sleep 1}
|
||||
' | $logger
|
||||
|
||||
# Exit if we have already run on this instance (e.g., previous boot).
|
||||
ami_id=$($curl $instance_data_url/meta-data/ami-id)
|
||||
been_run_file=/var/ec2/$prog.$ami_id
|
||||
mkdir -p $(dirname $been_run_file)
|
||||
if [ -f $been_run_file ]; then
|
||||
$logger < $been_run_file
|
||||
exit
|
||||
fi
|
||||
|
||||
# Retrieve the instance user-data and run it if it looks like a script
|
||||
user_data_file=`mktemp -t ec2-user-data.XXXXXXXXXX`
|
||||
chmod 700 $user_data_file
|
||||
$logger "Retrieving user-data"
|
||||
$curl -o $user_data_file $instance_data_url/user-data 2>&1 | $logger
|
||||
if [ ! -s $user_data_file ]; then
|
||||
$logger "No user-data available"
|
||||
elif head -1 $user_data_file | egrep -v '^#!'; then
|
||||
$logger "Skipping user-data as it does not begin with #!"
|
||||
else
|
||||
$logger "Running user-data"
|
||||
echo "user-data has already been run on this instance" > $been_run_file
|
||||
$user_data_file 2>&1 | logger -t "user-data"
|
||||
$logger "user-data exit code: $?"
|
||||
fi
|
||||
rm -f $user_data_file
|
|
@ -0,0 +1,87 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Set up security groups for the EC2 HBase cluster
|
||||
|
||||
if [ -z $1 ]; then
|
||||
echo "Cluster name required!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CLUSTER=$1
|
||||
|
||||
# Import variables
|
||||
bin=`dirname "$0"`
|
||||
bin=`cd "$bin"; pwd`
|
||||
. "$bin"/hbase-ec2-env.sh
|
||||
|
||||
echo "Creating/checking security groups"
|
||||
|
||||
ec2-describe-group | egrep "[[:space:]]$CLUSTER_MASTER[[:space:]]" > /dev/null
|
||||
if [ ! $? -eq 0 ]; then
|
||||
echo "Creating group $CLUSTER_MASTER"
|
||||
ec2-add-group $CLUSTER_MASTER -d "Group for HBase Master."
|
||||
ec2-authorize $CLUSTER_MASTER -o $CLUSTER_MASTER -u $AWS_ACCOUNT_ID
|
||||
ec2-authorize $CLUSTER_MASTER -p 22 # ssh
|
||||
|
||||
if [ $ENABLE_WEB_PORTS = "true" ]; then
|
||||
ec2-authorize $CLUSTER_MASTER -p 50070 # NameNode web interface
|
||||
ec2-authorize $CLUSTER_MASTER -p 50075 # DataNode web interface
|
||||
ec2-authorize $CLUSTER_MASTER -p 60010 # HBase master web interface
|
||||
ec2-authorize $CLUSTER_MASTER -p 60030 # HBase region server web interface
|
||||
fi
|
||||
else
|
||||
echo "Security group $CLUSTER_MASTER exists, ok"
|
||||
fi
|
||||
|
||||
ec2-describe-group | egrep "[[:space:]]$CLUSTER[[:space:]]" > /dev/null
|
||||
if [ ! $? -eq 0 ]; then
|
||||
echo "Creating group $CLUSTER"
|
||||
ec2-add-group $CLUSTER -d "Group for HBase Slaves."
|
||||
ec2-authorize $CLUSTER -o $CLUSTER -u $AWS_ACCOUNT_ID
|
||||
ec2-authorize $CLUSTER -p 22 # ssh
|
||||
|
||||
if [ $ENABLE_WEB_PORTS = "true" ]; then
|
||||
ec2-authorize $CLUSTER -p 50070 # NameNode web interface
|
||||
ec2-authorize $CLUSTER -p 50075 # DataNode web interface
|
||||
ec2-authorize $CLUSTER -p 60010 # HBase master web interface
|
||||
ec2-authorize $CLUSTER -p 60030 # HBase region server web interface
|
||||
fi
|
||||
|
||||
ec2-authorize $CLUSTER_MASTER -o $CLUSTER -u $AWS_ACCOUNT_ID
|
||||
ec2-authorize $CLUSTER -o $CLUSTER_MASTER -u $AWS_ACCOUNT_ID
|
||||
else
|
||||
echo "Security group $CLUSTER exists, ok"
|
||||
fi
|
||||
|
||||
# Set up zookeeper group
|
||||
|
||||
ec2-describe-group | egrep "[[:space:]]$CLUSTER_ZOOKEEPER[[:space:]]" > /dev/null
|
||||
if [ ! $? -eq 0 ]; then
|
||||
echo "Creating group $CLUSTER_ZOOKEEPER"
|
||||
ec2-add-group $CLUSTER_ZOOKEEPER -d "Group for HBase Zookeeper quorum."
|
||||
ec2-authorize $CLUSTER_ZOOKEEPER -o $CLUSTER_ZOOKEEPER -u $AWS_ACCOUNT_ID
|
||||
ec2-authorize $CLUSTER_ZOOKEEPER -p 22 # ssh
|
||||
|
||||
ec2-authorize $CLUSTER_MASTER -o $CLUSTER_ZOOKEEPER -u $AWS_ACCOUNT_ID
|
||||
ec2-authorize $CLUSTER_ZOOKEEPER -o $CLUSTER_MASTER -u $AWS_ACCOUNT_ID
|
||||
ec2-authorize $CLUSTER -o $CLUSTER_ZOOKEEPER -u $AWS_ACCOUNT_ID
|
||||
ec2-authorize $CLUSTER_ZOOKEEPER -o $CLUSTER -u $AWS_ACCOUNT_ID
|
||||
else
|
||||
echo "Security group $CLUSTER_ZOOKEEPER exists, ok"
|
||||
fi
|
||||
|
|
@ -0,0 +1,63 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Launch an EC2 cluster of HBase instances.
|
||||
|
||||
bin=`dirname "$0"`
|
||||
bin=`cd "$bin"; pwd`
|
||||
|
||||
if [ -z $1 ]; then
|
||||
echo "Cluster name required!"
|
||||
exit 1
|
||||
fi
|
||||
CLUSTER=$1
|
||||
|
||||
if [ -z $2 ]; then
|
||||
echo "Must specify the number of slaves to start."
|
||||
exit 1
|
||||
fi
|
||||
SLAVES=$2
|
||||
|
||||
if [ -z $3 ]; then
|
||||
echo "Must specify the number of zookeepers to start."
|
||||
exit 1
|
||||
fi
|
||||
ZOOS=$3
|
||||
|
||||
# Set up security groups
|
||||
|
||||
if ! "$bin"/init-hbase-cluster-secgroups $CLUSTER ; then
|
||||
exit $?
|
||||
fi
|
||||
|
||||
# Launch the ZK quorum peers
|
||||
|
||||
if ! "$bin"/launch-hbase-zookeeper $CLUSTER $ZOOS ; then
|
||||
exit $?
|
||||
fi
|
||||
|
||||
# Launch the HBase master
|
||||
|
||||
if ! "$bin"/launch-hbase-master $CLUSTER ; then
|
||||
exit $?
|
||||
fi
|
||||
|
||||
# Launch the HBase slaves
|
||||
|
||||
if ! "$bin"/launch-hbase-slaves $CLUSTER $SLAVES ; then
|
||||
exit $?
|
||||
fi
|
|
@ -0,0 +1,87 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Launch an EC2 HBase master.
|
||||
|
||||
if [ -z $1 ]; then
|
||||
echo "Cluster name required!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CLUSTER=$1
|
||||
|
||||
# Import variables
|
||||
bin=`dirname "$0"`
|
||||
bin=`cd "$bin"; pwd`
|
||||
. "$bin"/hbase-ec2-env.sh
|
||||
|
||||
if [ -z $AWS_ACCOUNT_ID ]; then
|
||||
echo "Please set AWS_ACCOUNT_ID in $bin/hbase-ec2-env.sh."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Testing for existing master in group: $CLUSTER"
|
||||
MASTER_EC2_HOST=`ec2-describe-instances | awk '"RESERVATION" == $1 && "'$CLUSTER_MASTER'" == $4, "RESERVATION" == $1 && "'$CLUSTER_MASTER'" != $4'`
|
||||
MASTER_EC2_HOST=`echo "$MASTER_EC2_HOST" | awk '"INSTANCE" == $1 && "running" == $6 {print $4}'`
|
||||
|
||||
if [ ! -z "$MASTER_EC2_HOST" ]; then
|
||||
echo "Master already running on: $MASTER_EC2_HOST"
|
||||
MASTER_HOST=`ec2-describe-instances $INSTANCE | grep INSTANCE | grep running | grep $MASTER_EC2_HOST | awk '{print $5}'`
|
||||
echo $MASTER_HOST > $MASTER_PRIVATE_IP_PATH
|
||||
echo $MASTER_EC2_HOST > $MASTER_IP_PATH
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Finding HBase image
|
||||
[ -z "$AMI_IMAGE" ] && AMI_IMAGE=`ec2-describe-images -a | grep $S3_BUCKET | grep $HBASE_VERSION | grep $ARCH | grep available | awk '{print $2}'`
|
||||
|
||||
# Start a master
|
||||
echo "Starting master with AMI $AMI_IMAGE (arch $ARCH)"
|
||||
# Substituting zookeeper quorum
|
||||
ZOOKEEPER_QUORUM=`cat $ZOOKEEPER_QUORUM_PATH`
|
||||
sed -e "s|%ZOOKEEPER_QUORUM%|$ZOOKEEPER_QUORUM|" \
|
||||
"$bin"/$USER_DATA_FILE > "$bin"/$USER_DATA_FILE.master
|
||||
INSTANCE=`ec2-run-instances $AMI_IMAGE -n 1 -g $CLUSTER_MASTER -k $KEY_NAME -f "$bin"/$USER_DATA_FILE.master -t $INSTANCE_TYPE | grep INSTANCE | awk '{print $2}'`
|
||||
echo -n "Waiting for instance $INSTANCE to start"
|
||||
while true; do
|
||||
printf "."
|
||||
# get private dns
|
||||
MASTER_HOST=`ec2-describe-instances $INSTANCE | grep running | awk '{print $5}'`
|
||||
if [ ! -z $MASTER_HOST ]; then
|
||||
echo " Started as $MASTER_HOST"
|
||||
break;
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
rm -f "$bin"/$USER_DATA_FILE.master
|
||||
|
||||
MASTER_EC2_HOST=`ec2-describe-instances $INSTANCE | grep INSTANCE | grep running | grep $MASTER_HOST | awk '{print $4}'`
|
||||
echo $MASTER_HOST > $MASTER_PRIVATE_IP_PATH
|
||||
echo $MASTER_EC2_HOST > $MASTER_IP_PATH
|
||||
MASTER_EC2_ZONE=`ec2-describe-instances $INSTANCE | grep INSTANCE | grep running | grep $MASTER_HOST | awk '{print $11}'`
|
||||
echo $MASTER_EC2_ZONE > $MASTER_ZONE_PATH
|
||||
|
||||
while true; do
|
||||
REPLY=`ssh $SSH_OPTS "$KEY_NAME@$MASTER_EC2_HOST" 'echo "hello"'`
|
||||
if [ ! -z $REPLY ]; then
|
||||
break;
|
||||
fi
|
||||
sleep 5
|
||||
done
|
||||
|
||||
MASTER_IP=`dig +short $MASTER_EC2_HOST`
|
||||
echo "Master is $MASTER_EC2_HOST, ip is $MASTER_IP, zone is $MASTER_EC2_ZONE."
|
|
@ -0,0 +1,59 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Launch an EC2 HBase slaves.
|
||||
|
||||
if [ -z $1 ]; then
|
||||
echo "Cluster name required!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CLUSTER=$1
|
||||
|
||||
if [ -z $2 ]; then
|
||||
echo "Must specify the number of slaves to start."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
NO_INSTANCES=$2
|
||||
|
||||
# Import variables
|
||||
bin=`dirname "$0"`
|
||||
bin=`cd "$bin"; pwd`
|
||||
. "$bin"/hbase-ec2-env.sh
|
||||
|
||||
if [ ! -f $MASTER_IP_PATH ]; then
|
||||
echo "Must start Cluster Master first!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
[ -z "$AMI_IMAGE" ] && AMI_IMAGE=`ec2-describe-images -a | grep $S3_BUCKET | grep $HBASE_VERSION | grep $ARCH |grep available | awk '{print $2}'`
|
||||
|
||||
MASTER_HOST=`cat $MASTER_PRIVATE_IP_PATH`
|
||||
MASTER_ZONE=`cat $MASTER_ZONE_PATH`
|
||||
ZOOKEEPER_QUORUM=`cat $ZOOKEEPER_QUORUM_PATH`
|
||||
|
||||
# Substituting master hostname and zookeeper quorum
|
||||
sed -e "s|%MASTER_HOST%|$MASTER_HOST|" \
|
||||
-e "s|%ZOOKEEPER_QUORUM%|$ZOOKEEPER_QUORUM|" \
|
||||
"$bin"/$USER_DATA_FILE > "$bin"/$USER_DATA_FILE.slave
|
||||
|
||||
# Start slaves
|
||||
echo "Starting $NO_INSTANCES AMI(s) with ID $AMI_IMAGE (arch $ARCH) in group $CLUSTER in zone $MASTER_ZONE"
|
||||
ec2-run-instances $AMI_IMAGE -n "$NO_INSTANCES" -g "$CLUSTER" -k "$KEY_NAME" -f "$bin"/$USER_DATA_FILE.slave -t "$INSTANCE_TYPE" -z "$MASTER_ZONE" | grep INSTANCE | awk '{print $2}'
|
||||
|
||||
rm "$bin"/$USER_DATA_FILE.slave
|
|
@ -0,0 +1,84 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Launch the EC2 HBase Zookeepers.
|
||||
|
||||
if [ -z $1 ]; then
|
||||
echo "Cluster name required!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z $2 ]; then
|
||||
echo "Must specify the number of zookeeper quorum peers to start."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CLUSTER=$1
|
||||
NO_INSTANCES=$2
|
||||
|
||||
# Import variables
|
||||
bin=`dirname "$0"`
|
||||
bin=`cd "$bin"; pwd`
|
||||
. "$bin"/hbase-ec2-env.sh
|
||||
|
||||
type=$ZOO_INSTANCE_TYPE
|
||||
[ -z "$type" ] && type=$INSTANCE_TYPE
|
||||
arch=$ZOO_ARCH
|
||||
[ -z "$arch" ] && arch=$ARCH
|
||||
|
||||
# Finding HBase image
|
||||
[ -z "$ZOO_AMI_IMAGE" ] && ZOO_AMI_IMAGE=`ec2-describe-images -a | grep $S3_BUCKET | grep hbase | grep $HBASE_VERSION | grep $arch |grep available | awk '{print $2}'`
|
||||
|
||||
# Start Zookeeper instances
|
||||
|
||||
echo "Starting ZooKeeper quorum ensemble."
|
||||
|
||||
peers=""
|
||||
public_names=""
|
||||
for inst in `seq 1 $NO_INSTANCES` ; do
|
||||
echo "Starting an AMI with ID $ZOO_AMI_IMAGE (arch $arch) in group $CLUSTER_ZOOKEEPER"
|
||||
INSTANCE=`ec2-run-instances $ZOO_AMI_IMAGE -n 1 -g $CLUSTER_ZOOKEEPER -k ${KEY_NAME} -t $type | grep INSTANCE | awk '{print $2}'`
|
||||
echo -n "Waiting for instance $INSTANCE to start: "
|
||||
while true; do
|
||||
printf "."
|
||||
# get private dns
|
||||
priv=`ec2-describe-instances $INSTANCE | grep running | awk '{print $5}'`
|
||||
if [ ! -z $priv ]; then
|
||||
echo " Started ZooKeeper instance $INSTANCE as ${priv}"
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
peers="$peers $priv"
|
||||
public=`ec2-describe-instances $INSTANCE | grep running | awk '{print $4}'`
|
||||
echo " Public DNS name is $public."
|
||||
public_names="$public_names $public"
|
||||
done
|
||||
|
||||
ZOOKEEPER_QUORUM=`echo $peers | sed -e 's/ /,/g'`
|
||||
echo $ZOOKEEPER_QUORUM > $ZOOKEEPER_QUORUM_PATH
|
||||
echo "ZooKeeper quorum is $ZOOKEEPER_QUORUM."
|
||||
|
||||
# Start Zookeeper quorum
|
||||
|
||||
echo "Initializing the ZooKeeper quorum ensemble."
|
||||
|
||||
for host in $public_names ; do
|
||||
echo " $host"
|
||||
scp $SSH_OPTS "$bin"/hbase-ec2-init-zookeeper-remote.sh "${KEY_NAME}@${host}:/var/tmp"
|
||||
ssh $SSH_OPTS "${KEY_NAME}@${host}" "sh -c \"ZOOKEEPER_QUORUM=\"$ZOOKEEPER_QUORUM\" sh /var/tmp/hbase-ec2-init-zookeeper-remote.sh\""
|
||||
done
|
|
@ -0,0 +1,31 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# List running clusters.
|
||||
|
||||
# Import variables
|
||||
bin=`dirname "$0"`
|
||||
bin=`cd "$bin"; pwd`
|
||||
. "$bin"/hbase-ec2-env.sh
|
||||
|
||||
# Finding HBase clusters
|
||||
CLUSTERS=`ec2-describe-instances | awk '"RESERVATION" == $1 && $4 ~ /-master$/, "INSTANCE" == $1' | tr '\n' '\t' | grep running | cut -f4 | rev | cut -d'-' -f2- | rev`
|
||||
|
||||
[ -z "$CLUSTERS" ] && echo "No running clusters." && exit 0
|
||||
|
||||
echo "Running HBase clusters:"
|
||||
echo "$CLUSTERS"
|
|
@ -0,0 +1,68 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Clean up security groups for the EC2 HBase cluster
|
||||
|
||||
if [ -z $1 ]; then
|
||||
echo "Cluster name required!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CLUSTER=$1
|
||||
|
||||
# Import variables
|
||||
bin=`dirname "$0"`
|
||||
bin=`cd "$bin"; pwd`
|
||||
. "$bin"/hbase-ec2-env.sh
|
||||
|
||||
echo "Revoking security groups"
|
||||
|
||||
ec2-describe-group | egrep "[[:space:]]$CLUSTER_MASTER[[:space:]]" > /dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
ec2-revoke $CLUSTER_MASTER -o $CLUSTER_MASTER -u $AWS_ACCOUNT_ID
|
||||
fi
|
||||
|
||||
ec2-describe-group | egrep "[[:space:]]$CLUSTER[[:space:]]" > /dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
ec2-revoke $CLUSTER -o $CLUSTER -u $AWS_ACCOUNT_ID
|
||||
ec2-revoke $CLUSTER_MASTER -o $CLUSTER -u $AWS_ACCOUNT_ID
|
||||
ec2-revoke $CLUSTER -o $CLUSTER_MASTER -u $AWS_ACCOUNT_ID
|
||||
fi
|
||||
|
||||
ec2-describe-group | egrep "[[:space:]]$CLUSTER_ZOOKEEPER[[:space:]]" > /dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
ec2-revoke $CLUSTER_ZOOKEEPER -o $CLUSTER_ZOOKEEPER -u $AWS_ACCOUNT_ID
|
||||
ec2-revoke $CLUSTER_MASTER -o $CLUSTER_ZOOKEEPER -u $AWS_ACCOUNT_ID
|
||||
ec2-revoke $CLUSTER_ZOOKEEPER -o $CLUSTER_MASTER -u $AWS_ACCOUNT_ID
|
||||
ec2-revoke $CLUSTER -o $CLUSTER_ZOOKEEPER -u $AWS_ACCOUNT_ID
|
||||
ec2-revoke $CLUSTER_ZOOKEEPER -o $CLUSTER -u $AWS_ACCOUNT_ID
|
||||
fi
|
||||
|
||||
ec2-describe-group | egrep "[[:space:]]$CLUSTER_MASTER[[:space:]]" > /dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
ec2-delete-group $CLUSTER_MASTER
|
||||
fi
|
||||
|
||||
ec2-describe-group | egrep "[[:space:]]$CLUSTER_ZOOKEEPER[[:space:]]" > /dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
ec2-delete-group $CLUSTER_ZOOKEEPER
|
||||
fi
|
||||
|
||||
ec2-describe-group | egrep "[[:space:]]$CLUSTER[[:space:]]" > /dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
ec2-delete-group $CLUSTER
|
||||
fi
|
|
@ -0,0 +1,46 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Terminate a cluster.
|
||||
|
||||
if [ -z $1 ]; then
|
||||
echo "Cluster name required!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CLUSTER=$1
|
||||
|
||||
# Import variables
|
||||
bin=`dirname "$0"`
|
||||
bin=`cd "$bin"; pwd`
|
||||
. "$bin"/hbase-ec2-env.sh
|
||||
|
||||
# Finding HBase image
|
||||
HBASE_INSTANCES=`ec2-describe-instances | awk '"RESERVATION" == $1 && ("'$CLUSTER'" == $4 || "'$CLUSTER_MASTER'" == $4 || "'$CLUSTER_ZOOKEEPER'" == $4), "RESERVATION" == $1 && ("'$CLUSTER'" != $4 && "'$CLUSTER_MASTER'" != $4 && "'$CLUSTER_ZOOKEEPER'" != $4)'`
|
||||
HBASE_INSTANCES=`echo "$HBASE_INSTANCES" | grep INSTANCE | grep running`
|
||||
|
||||
[ -z "$HBASE_INSTANCES" ] && echo "No running instances in cluster $CLUSTER." && exit 0
|
||||
|
||||
echo "Running HBase instances:"
|
||||
echo "$HBASE_INSTANCES"
|
||||
read -p "Terminate all instances? [yes or no]: " answer
|
||||
|
||||
if [ "$answer" != "yes" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ec2-terminate-instances `echo "$HBASE_INSTANCES" | awk '{print $2}'`
|
Loading…
Reference in New Issue