HBASE-1961 HBase EC2 scripts; updated readme; hardcode keypair to 'root'; copy ssh private key to master

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@882242 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Andrew Kyle Purtell 2009-11-19 18:45:06 +00:00
parent cb026c7c83
commit 969df0a469
6 changed files with 57 additions and 24 deletions

View File

@ -14,23 +14,57 @@ You need both the EC2 API and AMI tools
http://developer.amazonwebservices.com/connect/entry.jspa?externalID=368&categoryID=88
installed and on the path. For Ubuntu, "apt-get install ec2-ami-tools ec2-api-tools".
installed and on the path.
The hbase-ec2-env.sh script requires some configuration:
When setting up keypairs on EC2, be sure to name your keypair as 'root'.
- Fill in AWS_ACCOUNT_ID with your EC2 account number
Quick Start:
- Fill in AWS_ACCESS_KEY_ID with your EC2 access key
1) Download and unzip the EC2 AMI and API tools zipfiles.
- Fill in AWS_SECRET_ACCESS_KEY with your EC2 secret access key
For Ubuntu, "apt-get install ec2-ami-tools ec2-api-tools".
- Fill in KEY_NAME with the SSH keypair you will use
2) Put the tools on the path and set EC2_HOME in the environment to point to
the top level directory of the API tools.
- Fill in EC2_PRIVATE_KEY with the location of your AWS private key file --
must begin with 'pk' and end with '.pem'
3) Configure src/contrib/ec2/bin/hbase-ec2-env.sh
- Fill in EC2_CERT with the location of your AWS certificate -- must begin
with 'cert' and end with '.pem'
Fill in AWS_ACCOUNT_ID with your EC2 account number.
- Make sure the private part of your AWS SSH keypair exists in the same
directory as EC2_PRIVATE_KEY with the name id_rsa_${KEYNAME}
Fill in AWS_ACCESS_KEY_ID with your EC2 access key.
Fill in AWS_SECRET_ACCESS_KEY with your EC2 secret access key.
Fill in EC2_PRIVATE_KEY with the location of your AWS private key file --
must begin with 'pk' and end with '.pem'.
Fill in EC2_CERT with the location of your AWS certificate -- must begin
with 'cert' and end with '.pem'.
Make sure the private part of your AWS SSH keypair exists in the same
directory as EC2_PRIVATE_KEY with the name id_rsa_root.
4) ./bin/hbase-ec2 launch-cluster <name> <nr-zoos> <nr-slaves>, e.g
./bin/hbase-ec2 launch-cluster testcluster 3 3
5) Once the above command has finished without error, ./bin/hbase-ec2 login
<name>, e.g.
./bin/hbase-ec2 login testcluster
6) Check that the cluster is up and functional:
hbase shell
> status 'simple'
You should see something like:
3 live servers
domU-12-31-39-09-75-11.compute-1.internal:60020 1258653694915
requests=0, regions=1, usedHeap=29, maxHeap=987
domU-12-31-39-01-AC-31.compute-1.internal:60020 1258653709041
requests=0, regions=1, usedHeap=29, maxHeap=987
domU-12-31-39-01-B0-91.compute-1.internal:60020 1258653706411
requests=0, regions=0, usedHeap=27, maxHeap=987
0 dead servers

View File

@ -34,7 +34,7 @@ AMI_IMAGE=`ec2-describe-images $TOOL_OPTS -a | grep $S3_BUCKET | grep hbase | gr
[ ! -z $AMI_IMAGE ] && echo "AMI already registered, use: ec2-deregister $AMI_IMAGE" && exit 1
echo "Starting a AMI with ID $BASE_AMI_IMAGE."
OUTPUT=`ec2-run-instances $BASE_AMI_IMAGE $TOOL_OPTS -k $KEY_NAME -t $INSTANCE_TYPE`
OUTPUT=`ec2-run-instances $BASE_AMI_IMAGE $TOOL_OPTS -k root -t $INSTANCE_TYPE`
BOOTING_INSTANCE=`echo $OUTPUT | awk '{print $6}'`
echo "Instance is $BOOTING_INSTANCE."

View File

@ -34,13 +34,9 @@ EC2_CERT=
# The default setting is probably OK if you set up EC2 following the Amazon Getting Started guide.
EC2_KEYDIR=`dirname "$EC2_PRIVATE_KEY"`
# The EC2 key name used to launch instances.
# The default is the value used in the Amazon Getting Started guide.
KEY_NAME=root
# Where your EC2 private key is stored (created when following the Amazon Getting Started guide).
# You need to change this if you don't store this with your other EC2 keys.
PRIVATE_KEY_PATH=`echo "$EC2_KEYDIR"/"id_rsa_$KEY_NAME"`
PRIVATE_KEY_PATH=`echo "$EC2_KEYDIR"/"id_rsa_root"`
# SSH options used when connecting to EC2 instances.
SSH_OPTS=`echo -q -i "$PRIVATE_KEY_PATH" -o StrictHostKeyChecking=no -o ServerAliveInterval=30`

View File

@ -55,7 +55,7 @@ echo "Starting master with AMI $AMI_IMAGE (arch $ARCH)"
ZOOKEEPER_QUORUM=`cat $ZOOKEEPER_QUORUM_PATH`
sed -e "s|%ZOOKEEPER_QUORUM%|$ZOOKEEPER_QUORUM|" \
"$bin"/$USER_DATA_FILE > "$bin"/$USER_DATA_FILE.master
INSTANCE=`ec2-run-instances $AMI_IMAGE $TOOL_OPTS -n 1 -g $CLUSTER_MASTER -k $KEY_NAME -f "$bin"/$USER_DATA_FILE.master -t $INSTANCE_TYPE | grep INSTANCE | awk '{print $2}'`
INSTANCE=`ec2-run-instances $AMI_IMAGE $TOOL_OPTS -n 1 -g $CLUSTER_MASTER -k root -f "$bin"/$USER_DATA_FILE.master -t $INSTANCE_TYPE | grep INSTANCE | awk '{print $2}'`
echo -n "Waiting for instance $INSTANCE to start"
while true; do
printf "."
@ -76,12 +76,15 @@ MASTER_EC2_ZONE=`ec2-describe-instances $TOOL_OPTS $INSTANCE | grep INSTANCE | g
echo $MASTER_EC2_ZONE > $MASTER_ZONE_PATH
while true; do
REPLY=`ssh $SSH_OPTS "$KEY_NAME@$MASTER_EC2_HOST" 'echo "hello"'`
REPLY=`ssh $SSH_OPTS "root@$MASTER_EC2_HOST" 'echo "hello"'`
if [ ! -z $REPLY ]; then
break;
fi
sleep 5
done
scp $SSH_OPTS $PRIVATE_KEY_PATH "root@$MASTER_EC2_HOST:/root/.ssh/id_rsa"
ssh $SSH_OPTS "root@$MASTER_EC2_HOST" "chmod 600 /root/.ssh/id_rsa"
MASTER_IP=`dig +short $MASTER_EC2_HOST`
echo "Master is $MASTER_EC2_HOST, ip is $MASTER_IP, zone is $MASTER_EC2_ZONE."

View File

@ -54,6 +54,6 @@ sed -e "s|%MASTER_HOST%|$MASTER_HOST|" \
# Start slaves
echo "Starting $NO_INSTANCES AMI(s) with ID $AMI_IMAGE (arch $ARCH) in group $CLUSTER in zone $MASTER_ZONE"
ec2-run-instances $AMI_IMAGE $TOOL_OPTS -n "$NO_INSTANCES" -g "$CLUSTER" -k "$KEY_NAME" -f "$bin"/$USER_DATA_FILE.slave -t "$INSTANCE_TYPE" -z "$MASTER_ZONE" | grep INSTANCE | awk '{print $2}'
ec2-run-instances $AMI_IMAGE $TOOL_OPTS -n "$NO_INSTANCES" -g "$CLUSTER" -k root -f "$bin"/$USER_DATA_FILE.slave -t "$INSTANCE_TYPE" -z "$MASTER_ZONE" | grep INSTANCE | awk '{print $2}'
rm "$bin"/$USER_DATA_FILE.slave

View File

@ -51,7 +51,7 @@ peers=""
public_names=""
for inst in `seq 1 $NO_INSTANCES` ; do
echo "Starting an AMI with ID $ZOO_AMI_IMAGE (arch $arch) in group $CLUSTER_ZOOKEEPER"
INSTANCE=`ec2-run-instances $ZOO_AMI_IMAGE $TOOL_OPTS -n 1 -g $CLUSTER_ZOOKEEPER -k ${KEY_NAME} -t $type | grep INSTANCE | awk '{print $2}'`
INSTANCE=`ec2-run-instances $ZOO_AMI_IMAGE $TOOL_OPTS -n 1 -g $CLUSTER_ZOOKEEPER -k root -t $type | grep INSTANCE | awk '{print $2}'`
echo -n "Waiting for instance $INSTANCE to start: "
while true; do
printf "."
@ -79,6 +79,6 @@ echo "Initializing the ZooKeeper quorum ensemble."
for host in $public_names ; do
echo " $host"
scp $SSH_OPTS "$bin"/hbase-ec2-init-zookeeper-remote.sh "${KEY_NAME}@${host}:/var/tmp"
ssh $SSH_OPTS "${KEY_NAME}@${host}" "sh -c \"ZOOKEEPER_QUORUM=\"$ZOOKEEPER_QUORUM\" sh /var/tmp/hbase-ec2-init-zookeeper-remote.sh\""
scp $SSH_OPTS "$bin"/hbase-ec2-init-zookeeper-remote.sh "root@${host}:/var/tmp"
ssh $SSH_OPTS "root@${host}" "sh -c \"ZOOKEEPER_QUORUM=\"$ZOOKEEPER_QUORUM\" sh /var/tmp/hbase-ec2-init-zookeeper-remote.sh\""
done