mirror of https://github.com/apache/lucene.git
SOLR-11492 - clean up /solr/cloud-dev and add a well documented script.
This commit is contained in:
parent
43ee86a3d0
commit
d75f027912
|
@ -254,6 +254,8 @@ Other Changes
|
|||
|
||||
* SOLR-13767: Upgrade jackson to 2.9.9 (janhoy)
|
||||
|
||||
* SOLR-11492: Clean up /solr/cloud-dev scripts and provide a single well documented script (Gus Heck, Robert Bunch)
|
||||
|
||||
================== 8.2.0 ==================
|
||||
|
||||
Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.
|
||||
|
|
|
@ -1,20 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
numServers=$1
|
||||
|
||||
die () {
|
||||
echo >&2 "$@"
|
||||
exit 1
|
||||
}
|
||||
|
||||
[ "$#" -eq 1 ] || die "1 argument required, $# provided, usage: clean.sh {numServers}"
|
||||
|
||||
cd ..
|
||||
|
||||
for (( i=1; i <= $numServers; i++ ))
|
||||
do
|
||||
rm -r -f server$i
|
||||
done
|
||||
|
||||
rm -r -f serverzk
|
||||
rm -r -f server-lastlogs
|
|
@ -1,53 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# TODO: !OUT OF DATE!
|
||||
|
||||
cd ..
|
||||
|
||||
rm -r -f server2
|
||||
rm -r -f server3
|
||||
rm -r -f server4
|
||||
rm -r -f server5
|
||||
rm -r -f server6
|
||||
|
||||
rm -r -f dist
|
||||
rm -r -f build
|
||||
rm -r -f server/solr/zoo_data
|
||||
rm -r -f server/solr/data
|
||||
rm -f server/server.log
|
||||
|
||||
ant server dist
|
||||
|
||||
cp -r -f server server2
|
||||
cp -r -f server server3
|
||||
cp -r -f server server4
|
||||
cp -r -f server server5
|
||||
cp -r -f server server6
|
||||
|
||||
# first try uploading a conf dir
|
||||
java -classpath lib/*:dist/*:build/lucene-libs/* org.apache.solr.cloud.ZkCLI -cmd upconfig -zkhost 127.0.0.1:9983 -confdir server/solr/collection1/conf -confname conf1 -solrhome server/solr -runzk 8983
|
||||
|
||||
# upload a second conf set so we avoid single conf auto linking
|
||||
java -classpath lib/*:dist/*:build/lucene-libs/* org.apache.solr.cloud.ZkCLI -cmd upconfig -zkhost 127.0.0.1:9983 -confdir server/solr/collection1/conf -confname conf2 -solrhome server/solr -runzk 8983
|
||||
|
||||
# now try linking a collection to a conf set
|
||||
java -classpath lib/*:dist/*:build/lucene-libs/* org.apache.solr.cloud.ZkCLI -cmd linkconfig -zkhost 127.0.0.1:9983 -collection collection1 -confname conf1 -solrhome server/solr -runzk 8983
|
||||
|
||||
|
||||
cd server
|
||||
java -DzkRun -DnumShards=2 -DSTOP.PORT=7983 -DSTOP.KEY=key -jar start.jar 1>server.log 2>&1 &
|
||||
|
||||
cd ../server2
|
||||
java -Djetty.port=7574 -DzkHost=localhost:9983 -DnumShards=2 -DSTOP.PORT=6574 -DSTOP.KEY=key -jar start.jar 1>server2.log 2>&1 &
|
||||
|
||||
cd ../server3
|
||||
java -Djetty.port=7575 -DzkHost=localhost:9983 -DnumShards=2 -DSTOP.PORT=6575 -DSTOP.KEY=key -jar start.jar 1>server3.log 2>&1 &
|
||||
|
||||
cd ../server4
|
||||
java -Djetty.port=7576 -DzkHost=localhost:9983 -DnumShards=2 -DSTOP.PORT=6576 -DSTOP.KEY=key -jar start.jar 1>server4.log 2>&1 &
|
||||
|
||||
cd ../server5
|
||||
java -Djetty.port=7577 -DzkHost=localhost:9983 -DnumShards=2 -DSTOP.PORT=6577 -DSTOP.KEY=key -jar start.jar 1>server5.log 2>&1 &
|
||||
|
||||
cd ../server6
|
||||
java -Djetty.port=7578 -DzkHost=localhost:9983 -DnumShards=2 -DSTOP.PORT=6578 -DSTOP.KEY=key -jar start.jar 1>server6.log 2>&1 &
|
|
@ -0,0 +1,383 @@
|
|||
#!/bin/bash
|
||||
|
||||
##################################################################################
|
||||
#
|
||||
# The goal of this script is to allow quick setup of a blank local multi node
|
||||
# cluster for development testing without needing to erase or interfere with
|
||||
# previous testing. It also enables redeployment of the code for such testing
|
||||
# clusters without erasing the data previously indexed.
|
||||
#
|
||||
# It is for dev testing only NOT for production use.
|
||||
#
|
||||
# This is also NOT meant to be run from this directory within a lucene-solr
|
||||
# working copy. Typical usage is to copy it out to a separate workspace
|
||||
# such as (<GIT_CHECKOUT>/../testing) and edit then either use the -w option
|
||||
# or edit the definition of DEFAULT_VCS_WORKSPACE variable below.
|
||||
#
|
||||
# Usage:
|
||||
# ./cloud.sh <command> [options] [name]
|
||||
#
|
||||
# Options:
|
||||
# -c clean the data & zk collections erasing all indexed data
|
||||
# -r recompile server with 'ant clean server create-package'
|
||||
# -m <mem> memory per node
|
||||
# -a <args> additional JVM options
|
||||
# -n <num> number of nodes to create/start if this doesn't match error
|
||||
# -w <path> path to the vcs checkout
|
||||
# -z <num> port to look for zookeeper on (2181 default)
|
||||
#
|
||||
# Commands:
|
||||
# new Create a new cluster named by the current date or [name]
|
||||
# start Start an existing cluster specified by [name]
|
||||
# stop stop the cluster specified by [name]
|
||||
# restart stop and then start
|
||||
#
|
||||
# In all cases if [name] is unspecified ls -t will be used to determine the
|
||||
# most recent cluster working directory, and that will be used. If it is
|
||||
# specified it will be resolved as a path from the directory where cloud.sh
|
||||
# has been run.
|
||||
#
|
||||
# By default the script sets up a local Solr cloud with 4 nodes, in a local
|
||||
# directory with ISO date as the name. A local zookeeper at 2181 or the
|
||||
# specified port is presumed to be available, a new zk chroot is used for each
|
||||
# cluster based on the file system path to the cluster directory. the default
|
||||
# solr.xml is added to this solr root dir in zookeeper.
|
||||
#
|
||||
# Debugging ports are automatically opened for each node starting with port 5001
|
||||
#
|
||||
# Specifying an explicit destination path will cause the script to
|
||||
# use that path and a zk chroot that matches, so more than one install
|
||||
# can be created in a day, or issue numbers etc can be used. Normally the
|
||||
# directories containing clusters created by this tool are in the same
|
||||
# directory as this script. Distant paths with slashes or funny characters
|
||||
# *might* work, but are not well tested, YMMV.
|
||||
#
|
||||
# PEREQ: 1. Zookeeper on localhost:2181 (or as specified by -z option) where
|
||||
# it is ok to create a lot of top level directories named for
|
||||
# the absolute path of the [name] directory (for example:
|
||||
# /solr_home_myuser_projects_solr_testing_2019-01-01) Note
|
||||
# that not using the embedded zookeeper is key to being able
|
||||
# switch between testing setups and to test vs alternate versions
|
||||
# of zookeeper if desired.
|
||||
#
|
||||
# SETUP: 1. Place this script in a directory intended to hold all your
|
||||
# testing installations of solr.
|
||||
# 2. Edit DEFAULT_VCS_WORKSPACE if the present value does not suit
|
||||
# your purposes.
|
||||
# 3. chmod +x cloud.sh
|
||||
#
|
||||
# EXAMPLES:
|
||||
#
|
||||
# Create a brand new 4 node cluster deployed in a directory named for today
|
||||
#
|
||||
# ./cloud.sh new
|
||||
#
|
||||
# Create a brand new 4 node cluster deployed in a directory named SOLR-1234567
|
||||
#
|
||||
# ./cloud.sh new SOLR-1234567
|
||||
#
|
||||
# Stop the cluster
|
||||
#
|
||||
# ./cloud.sh stop
|
||||
#
|
||||
# Compile and push new code to a running cluster (incl bounce the cluster)
|
||||
#
|
||||
# ./cloud.sh restart -r
|
||||
#
|
||||
# Dump your hoplessly fubar'd test collections and start fresh with current tarball
|
||||
#
|
||||
# ./cloud.sh restart -c
|
||||
#
|
||||
##################################################################################
|
||||
|
||||
DEFAULT_VCS_WORKSPACE='../code/lucene-solr'
|
||||
|
||||
############## Normally no need to edit below this line ##############
|
||||
|
||||
##############
|
||||
# Parse Args #
|
||||
##############
|
||||
|
||||
COMMAND=$1
|
||||
shift
|
||||
|
||||
CLEAN=false # default
|
||||
MEMORY=1g # default
|
||||
JVM_ARGS='' # default
|
||||
RECOMPILE=false # default
|
||||
NUM_NODES=0 # need to detect if not specified
|
||||
VCS_WORK=${DEFAULT_VCS_WORKSPACE}
|
||||
ZK_PORT=2181
|
||||
|
||||
while getopts ":crm:a:n:w:z:" opt; do
|
||||
case ${opt} in
|
||||
c)
|
||||
CLEAN=true
|
||||
;;
|
||||
r)
|
||||
RECOMPILE=true
|
||||
;;
|
||||
m)
|
||||
MEMORY=$OPTARG
|
||||
;;
|
||||
a)
|
||||
JVM_ARGS=$OPTARG
|
||||
;;
|
||||
n)
|
||||
NUM_NODES=$OPTARG
|
||||
;;
|
||||
w)
|
||||
VCS_WORK=$OPTARG
|
||||
;;
|
||||
z)
|
||||
ZK_PORT=$OPTARG
|
||||
;;
|
||||
\?)
|
||||
echo "Invalid option: -$OPTARG" >&2
|
||||
exit 1
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND -1))
|
||||
|
||||
CLUSTER_WD=$1
|
||||
|
||||
#################
|
||||
# Validate Args #
|
||||
#################
|
||||
case ${COMMAND} in
|
||||
new);;
|
||||
stop);;
|
||||
start);;
|
||||
restart);;
|
||||
*) echo "Invalid command $COMMAND"; exit 2;
|
||||
esac
|
||||
|
||||
case ${NUM_NODES} in
|
||||
''|*[!0-9]*) echo "$NUM_NODES (-n) is not a positive integer"; exit 3 ;;
|
||||
*) ;;
|
||||
esac
|
||||
|
||||
case ${ZK_PORT} in
|
||||
''|*[!0-9]*) echo "$NUM_NODES (-z) is not a positive integer"; exit 3 ;;
|
||||
*) ;;
|
||||
esac
|
||||
|
||||
if [[ "$COMMAND" = "new" ]]; then
|
||||
if [[ "$CLEAN" = true ]]; then
|
||||
echo "Command new and option -c (clean) do not make sense together since a newly created cluster has no data to clean."; exit 1;
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ ! -d "$VCS_WORK" ]]; then
|
||||
echo "$VCS_WORK (vcs working directory) does not exist"; exit 4;
|
||||
fi
|
||||
|
||||
if [[ ! "$COMMAND" = "new" ]]; then
|
||||
if [[ -z "$CLUSTER_WD" ]]; then
|
||||
# find the most recently touched directory in the local directory
|
||||
CLUSTER_WD=$(find . -maxdepth 1 -mindepth 1 -type d -print0 | xargs -0 ls -1 -td | sed -E 's/\.\/(.*)/\1/' | head -n1)
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ ! -z "$CLUSTER_WD" ]]; then
|
||||
if [[ ! -d "$CLUSTER_WD" && ! "$COMMAND" = "new" ]]; then
|
||||
echo "$CLUSTER_WD (cluster working directory) does not exist or is not a directory"; exit 5;
|
||||
fi
|
||||
fi
|
||||
|
||||
############################
|
||||
# Print our initialization #
|
||||
############################
|
||||
echo "COMMAND : $COMMAND"
|
||||
echo "VCS WD : $VCS_WORK"
|
||||
echo "CLUSTER WD : $CLUSTER_WD"
|
||||
echo "NUM NODES : $NUM_NODES"
|
||||
echo "ZK PORT : $ZK_PORT"
|
||||
echo "CLEAN : $CLEAN"
|
||||
echo "RECOMPILE : $RECOMPILE"
|
||||
|
||||
###########################################################
|
||||
# Create new cluster working dir if new command specified #
|
||||
###########################################################
|
||||
mkdirIfReq() {
|
||||
if [[ "$COMMAND" = "new" ]]; then
|
||||
if [[ -z "$CLUSTER_WD" ]]; then
|
||||
DATE=$(date "+%Y-%m-%d")
|
||||
CLUSTER_WD="${DATE}"
|
||||
fi
|
||||
mkdir "$CLUSTER_WD"
|
||||
if [[ "$?" -ne 0 ]]; then
|
||||
echo "Unable to create $CLUSTER_WD"; exit 6;
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
#################
|
||||
# Find Solr etc #
|
||||
#################
|
||||
|
||||
findSolr() {
|
||||
pushd ${CLUSTER_WD}
|
||||
CLUSTER_WD_FULL=$(pwd -P)
|
||||
SOLR=${CLUSTER_WD}/$(find . -maxdepth 1 -name 'solr*' -type d -print0 | xargs -0 ls -1 -td | sed -E 's/\.\/(solr.*)/\1/' | head -n1)
|
||||
popd
|
||||
|
||||
#echo "Found solr at $SOLR"
|
||||
SAFE_DEST="${CLUSTER_WD_FULL//\//_}";
|
||||
}
|
||||
|
||||
###############################################
|
||||
# Clean node dir (and thus data) if requested #
|
||||
###############################################
|
||||
cleanIfReq() {
|
||||
if [[ "$CLEAN" = true ]]; then
|
||||
if [[ -d "$CLUSTER_WD" ]]; then
|
||||
echo "Cleaning out $CLUSTER_WD"
|
||||
pushd ${CLUSTER_WD}
|
||||
rm -rf n* # remove node dirs which are are n1, n2, n3 etc
|
||||
popd
|
||||
fi
|
||||
findSolr
|
||||
echo COLLECTIONS FOUND IN ZK | egrep --color=always '.*'
|
||||
COLLECTIONS_TO_CLEAN=`${SOLR}/bin/solr zk ls /solr_${SAFE_DEST}/collections -z localhost:${ZK_PORT}`; echo $COLLECTIONS_TO_CLEAN | egrep --color=always '.*'
|
||||
for collection in ${COLLECTIONS_TO_CLEAN}; do
|
||||
echo nuke $collection
|
||||
${SOLR}/bin/solr zk rm -r /solr_${SAFE_DEST}/collections/${collection} -z localhost:${ZK_PORT}
|
||||
echo $?
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
#################################
|
||||
# Recompile server if requested #
|
||||
#################################
|
||||
recompileIfReq() {
|
||||
if [[ "$RECOMPILE" = true ]]; then
|
||||
pushd "$VCS_WORK"/solr
|
||||
ant clean server create-package
|
||||
if [[ "$?" -ne 0 ]]; then
|
||||
echo "BUILD FAIL - cloud.sh stopping, see above output for details"; popd; exit 7;
|
||||
fi
|
||||
popd
|
||||
copyTarball
|
||||
fi
|
||||
}
|
||||
|
||||
################
|
||||
# Copy tarball #
|
||||
################
|
||||
copyTarball() {
|
||||
echo "foo"
|
||||
pushd ${CLUSTER_WD}
|
||||
echo "bar"
|
||||
rm -rf solr-* # remove tarball and dir to which it extracts
|
||||
echo "baz"
|
||||
pushd # back to original dir to properly resolve vcs working dir
|
||||
echo "foobar:"$(pwd)
|
||||
if [[ ! -f $(ls "$VCS_WORK"/solr/package/solr-*.tgz) ]]; then
|
||||
echo "No solr tarball found try again with -r"; popd; exit 10;
|
||||
fi
|
||||
cp "$VCS_WORK"/solr/package/solr-*.tgz ${CLUSTER_WD}
|
||||
pushd # back into cluster wd to unpack
|
||||
tar xzvf solr-*.tgz
|
||||
popd
|
||||
}
|
||||
|
||||
#############################################
|
||||
# Test to see if port for zookeeper is open #
|
||||
# Assume that zookeeper holds it if it is #
|
||||
#############################################
|
||||
testZookeeper() {
|
||||
PORT_FOUND=$( netstat -an | grep '\b'${ZK_PORT}'\s' | grep LISTEN | awk '{print $4}' | sed -E 's/.*\b('${ZK_PORT}')\s*/\1/');
|
||||
if [[ -z "$PORT_FOUND" ]]; then
|
||||
echo "No process listening on port ${ZK_PORT}. Please start zookeeper and try again"; exit 8;
|
||||
fi
|
||||
}
|
||||
|
||||
##########################
|
||||
# Start server instances #
|
||||
##########################
|
||||
start(){
|
||||
testZookeeper
|
||||
echo "Starting servers"
|
||||
findSolr
|
||||
|
||||
echo "SOLR=$SOLR"
|
||||
SOLR_ROOT=$("${SOLR}/server/scripts/cloud-scripts/zkcli.sh" -zkhost localhost:${ZK_PORT} -cmd getfile "/solr_${SAFE_DEST}" /dev/stdout);
|
||||
if [[ -z ${SOLR_ROOT} ]]; then
|
||||
# Need a fresh root in zookeeper...
|
||||
"${SOLR}/server/scripts/cloud-scripts/zkcli.sh" -zkhost localhost:${ZK_PORT} -cmd makepath "/solr_${SAFE_DEST}";
|
||||
"${SOLR}/server/scripts/cloud-scripts/zkcli.sh" -zkhost localhost:${ZK_PORT} -cmd put "/solr_${SAFE_DEST}" "created by cloud.sh"; # so we can test for existence next time
|
||||
"${SOLR}/server/scripts/cloud-scripts/zkcli.sh" -zkhost localhost:${ZK_PORT} -cmd putfile "/solr_${SAFE_DEST}/solr.xml" "${SOLR}/server/solr/solr.xml";
|
||||
fi
|
||||
|
||||
ACTUAL_NUM_NODES=$(ls -1 -d ${CLUSTER_WD}/n* | wc -l )
|
||||
if [[ "$NUM_NODES" -eq 0 ]]; then
|
||||
NUM_NODES=${ACTUAL_NUM_NODES}
|
||||
else
|
||||
if [[ "$NUM_NODES" -ne "$ACTUAL_NUM_NODES" ]]; then
|
||||
#check that this isn't first time startup..
|
||||
if [[ "$ACTUAL_NUM_NODES" -ne 0 ]]; then
|
||||
echo "Requested $NUM_NODES for a cluster that already has $ACTUAL_NUM_NODES. Refusing to start!"; exit 9;
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "$NUM_NODES" -eq 0 ]]; then
|
||||
NUM_NODES=4 # nothing pre-existing found, default to 4
|
||||
fi
|
||||
echo "Final NUM_NODES is $NUM_NODES"
|
||||
for i in `seq 1 $NUM_NODES`; do
|
||||
mkdir -p "${CLUSTER_WD}/n${i}"
|
||||
argsArray=(-c -s $CLUSTER_WD_FULL/n${i} -z localhost:${ZK_PORT}/solr_${SAFE_DEST} -p 898${i} -m $MEMORY \
|
||||
-a "-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=500${i} \
|
||||
-Dsolr.solrxml.location=zookeeper -Dsolr.log.dir=$CLUSTER_WD_FULL/n${i} $JVM_ARGS")
|
||||
FINAL_COMMAND="${SOLR}/bin/solr ${argsArray[@]}"
|
||||
echo ${FINAL_COMMAND}
|
||||
${SOLR}/bin/solr "${argsArray[@]}"
|
||||
done
|
||||
|
||||
touch ${CLUSTER_WD} # make this the most recently updated dir for ls -t
|
||||
|
||||
}
|
||||
|
||||
stop() {
|
||||
echo "Stopping servers"
|
||||
pushd ${CLUSTER_WD}
|
||||
SOLR=${CLUSTER_WD}/$(find . -maxdepth 1 -name 'solr*' -type d -print0 | xargs -0 ls -1 -td | sed -E 's/\.\/(solr.*)/\1/' | head -n1)
|
||||
popd
|
||||
|
||||
"${SOLR}/bin/solr" stop -all
|
||||
}
|
||||
|
||||
########################
|
||||
# process the commands #
|
||||
########################
|
||||
case ${COMMAND} in
|
||||
new)
|
||||
testZookeeper
|
||||
mkdirIfReq
|
||||
recompileIfReq
|
||||
if [[ "$RECOMPILE" = false ]]; then
|
||||
copyTarball
|
||||
fi
|
||||
start
|
||||
;;
|
||||
stop)
|
||||
stop
|
||||
;;
|
||||
start)
|
||||
testZookeeper
|
||||
cleanIfReq
|
||||
recompileIfReq
|
||||
start
|
||||
;;
|
||||
restart)
|
||||
testZookeeper
|
||||
stop
|
||||
cleanIfReq
|
||||
recompileIfReq
|
||||
start
|
||||
;;
|
||||
*) echo "Invalid command $COMMAND"; exit 2;
|
||||
esac
|
|
@ -1,37 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
source ./functions.sh
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
start $2 $3 "$4"
|
||||
;;
|
||||
stop)
|
||||
stop $2
|
||||
;;
|
||||
kill)
|
||||
do_kill $2
|
||||
;;
|
||||
reinstall)
|
||||
reinstall $2
|
||||
;;
|
||||
rebuild)
|
||||
rebuild $2
|
||||
;;
|
||||
status)
|
||||
status $2
|
||||
;;
|
||||
cleanlogs)
|
||||
cleanlogs $2
|
||||
;;
|
||||
taillogs)
|
||||
taillogs $2
|
||||
;;
|
||||
createshard)
|
||||
createshard $2 $3 $4 $5
|
||||
;;
|
||||
*)
|
||||
echo $"Usage: $0 { rebuild| reinstall <instanceid>| start <instanceid> [numshards]| stop <instanceid>|kill <instanceid>| status<instanceid>| cleanlogs<instanceid>| createshard <instance> <collection> <coreName> [shardId]}"
|
||||
exit 1
|
||||
esac
|
||||
exit 0
|
|
@ -1,26 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
cd ..
|
||||
|
||||
rm -r -f example2
|
||||
|
||||
rm -r -f dist
|
||||
rm -r -f build
|
||||
rm -r -f example/solr/zoo_data
|
||||
rm -r -f example/solr/collection1/data
|
||||
rm -f example/example.log
|
||||
|
||||
ant server dist
|
||||
|
||||
cp -r -f example example2
|
||||
|
||||
|
||||
cd example
|
||||
java -DzkRun -DnumShards=2 -DSTOP.PORT=7983 -DSTOP.KEY=key -Dbootstrap_conf=true -jar start.jar 1>example.log 2>&1 &
|
||||
|
||||
sleep 10
|
||||
|
||||
cd ../example2
|
||||
java -Djetty.port=9574 -DzkRun -DzkHost=localhost:9983 -DSTOP.PORT=6574 -DSTOP.KEY=key -jar start.jar 1>example2.log 2>&1 &
|
||||
|
||||
|
|
@ -1,36 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
cd ..
|
||||
|
||||
rm -r -f example2
|
||||
rm -r -f example3
|
||||
rm -r -f example4
|
||||
|
||||
rm -r -f dist
|
||||
rm -r -f build
|
||||
rm -r -f example/solr/zoo_data
|
||||
rm -r -f example/solr/collection1/data
|
||||
rm -f example/example.log
|
||||
|
||||
ant server dist
|
||||
|
||||
cp -r -f example example2
|
||||
cp -r -f example example3
|
||||
cp -r -f example example4
|
||||
|
||||
|
||||
cd example
|
||||
java -DzkRun -DnumShards=2 -DSTOP.PORT=7983 -DSTOP.KEY=key -Dbootstrap_conf=true -jar start.jar 1>example.log 2>&1 &
|
||||
|
||||
# wait for config to go up
|
||||
sleep 10
|
||||
|
||||
cd ../example2
|
||||
java -Djetty.port=9574 -DzkRun -DzkHost=localhost:9983 -DSTOP.PORT=6574 -DSTOP.KEY=key -jar start.jar 1>example2.log 2>&1 &
|
||||
|
||||
cd ../example3
|
||||
java -Djetty.port=9575 -DzkRun -DzkHost=localhost:9983 -DSTOP.PORT=6575 -DSTOP.KEY=key -jar start.jar 1>example3.log 2>&1 &
|
||||
|
||||
cd ../example4
|
||||
java -Djetty.port=9576 -DzkHost=localhost:9983 -DnumShards=2 -DSTOP.PORT=6576 -DSTOP.KEY=key -jar start.jar 1>example4.log 2>&1 &
|
||||
|
|
@ -1,35 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
cd ..
|
||||
|
||||
rm -r -f example2
|
||||
rm -r -f example3
|
||||
rm -r -f example4
|
||||
|
||||
rm -r -f dist
|
||||
rm -r -f build
|
||||
rm -r -f example/solr/zoo_data
|
||||
rm -r -f example/solr/collection1/data
|
||||
rm -f example/example.log
|
||||
|
||||
ant server dist
|
||||
|
||||
cp -r -f example example2
|
||||
cp -r -f example example3
|
||||
cp -r -f example example4
|
||||
|
||||
|
||||
cd example
|
||||
java -DzkRun -DnumShards=2 -DSTOP.PORT=7983 -DSTOP.KEY=key -Dbootstrap_conf=true -DzkHost=localhost:9983,localhost:14574,localhost:14585 -jar start.jar 1>example.log 2>&1 &
|
||||
|
||||
cd ../example2
|
||||
java -Djetty.port=13574 -DzkRun -DzkHost=localhost:9983,localhost:14574,localhost:14585 -DSTOP.PORT=6574 -DSTOP.KEY=key -jar start.jar 1>example2.log 2>&1 &
|
||||
|
||||
cd ../example3
|
||||
java -Djetty.port=13585 -DzkRun -DzkHost=localhost:9983,localhost:14574,localhost:14585 -DSTOP.PORT=6575 -DSTOP.KEY=key -jar start.jar 1>example3.log 2>&1 &
|
||||
|
||||
# wait for config to go up
|
||||
sleep 10
|
||||
|
||||
cd ../example4
|
||||
java -Djetty.port=13596 -DzkHost=localhost:9983,localhost:14574,localhost:14585 -DnumShards=2 -DSTOP.PORT=6576 -DSTOP.KEY=key -jar start.jar 1>example4.log 2>&1 &
|
|
@ -1,77 +0,0 @@
|
|||
INT_JAVA_OPTS="-server -Xms256M -Xmx256M"
|
||||
BASE_PORT=8900
|
||||
BASE_STOP_PORT=9900
|
||||
ZK_PORT="2414"
|
||||
ZK_CHROOT="solr"
|
||||
|
||||
rebuild() {
|
||||
echo "Rebuilding"
|
||||
cd ..
|
||||
rm -r -f dist
|
||||
rm -r -f build
|
||||
rm -r -f server/solr/zoo_data
|
||||
rm -f server/server.log
|
||||
ant server dist
|
||||
}
|
||||
|
||||
setports() {
|
||||
PORT="$(( $BASE_PORT + $1 ))"
|
||||
STOP_PORT="$(( $BASE_STOP_PORT + $1 ))"
|
||||
}
|
||||
|
||||
reinstall() {
|
||||
echo "Reinstalling instance $1"
|
||||
cd ..
|
||||
rm -rf server$1
|
||||
cp -r -f server server$1
|
||||
}
|
||||
|
||||
start() {
|
||||
OPT="-DzkHost=localhost:$ZK_PORT/$ZK_CHROOT"
|
||||
NUMSHARDS=$2
|
||||
|
||||
echo "Starting instance $1"
|
||||
|
||||
setports $1
|
||||
cd ../server$1
|
||||
java $JAVA_OPTS -Djetty.port=$PORT $OPT -jar start.jar --module=http STOP.PORT=$STOP_PORT STOP.KEY=key jetty.base=. 1>server$1.log 2>&1 &
|
||||
}
|
||||
|
||||
stop() {
|
||||
echo "Stopping instance $1"
|
||||
setports $1
|
||||
cd ../server$1
|
||||
java -jar start.jar --module=http STOP.PORT=$STOP_PORT STOP.KEY=key --stop
|
||||
}
|
||||
|
||||
do_kill() {
|
||||
echo "Killing instance $1"
|
||||
setports $1
|
||||
PID=`ps aux|grep "STOP.PORT=$STOP_PORT"|grep -v grep|cut -b 8-15`
|
||||
if [ "" = "$PID" ]; then
|
||||
echo "not running?"
|
||||
else
|
||||
kill -9 $PID
|
||||
fi
|
||||
}
|
||||
|
||||
status() {
|
||||
echo "Status:"
|
||||
ps aux|grep "STOP.PORT"|grep -v grep
|
||||
}
|
||||
|
||||
cleanlogs() {
|
||||
cd ../server$1
|
||||
mv server$1.log server$1.oldlog
|
||||
}
|
||||
|
||||
taillogs() {
|
||||
cd ../server$1
|
||||
tail -f server$1.log
|
||||
}
|
||||
|
||||
createshard() {
|
||||
setports $1
|
||||
echo "Creating new shard @instance $1, collection=$2, shard=$3, name=$4"
|
||||
curl "http://127.0.0.1:$PORT/solr/admin/cores?action=CREATE&collection=$2&name=$3&shard=$4"
|
||||
}
|
|
@ -1,39 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
numServers=$1
|
||||
|
||||
baseJettyPort=8900
|
||||
baseStopPort=9900
|
||||
|
||||
ZK_CHROOT="solr"
|
||||
|
||||
die () {
|
||||
echo >&2 "$@"
|
||||
exit 1
|
||||
}
|
||||
|
||||
[ "$#" -eq 1 ] || die "1 argument required, $# provided, usage: solrcloud-start-exisiting.sh [numServers]"
|
||||
|
||||
|
||||
cd ..
|
||||
|
||||
# Useful if you want to startup on an existing setup with new code mods
|
||||
# ant server dist
|
||||
|
||||
cd serverzk
|
||||
stopPort=1313
|
||||
jettyPort=8900
|
||||
exec -a jettyzk java -Xmx512m $JAVA_OPTS -Djetty.port=$jettyPort -DhostPort=$jettyPort -DzkRun -DzkHost=localhost:9900/$ZK_CHROOT -DzkRunOnly=true -jar start.jar --module=http STOP.PORT=$stopPort STOP.KEY=key jetty.base=. 1>serverzk.log 2>&1 &
|
||||
|
||||
cd ..
|
||||
|
||||
cd server
|
||||
|
||||
for (( i=1; i <= $numServers; i++ ))
|
||||
do
|
||||
echo "starting server$i"
|
||||
cd ../server$i
|
||||
stopPort=`expr $baseStopPort + $i`
|
||||
jettyPort=`expr $baseJettyPort + $i`
|
||||
exec -a jetty java -Xmx1g $JAVA_OPTS -Djetty.port=$jettyPort -DzkHost=localhost:9900/$ZK_CHROOT -jar start.jar --module=http STOP.PORT=$stopPort STOP.KEY=key jetty.base=. 1>server$i.log 2>&1 &
|
||||
done
|
|
@ -1,74 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# These scripts are best effort developer scripts. No promises.
|
||||
|
||||
# To run on hdfs, try something along the lines of:
|
||||
# export JAVA_OPTS="-Dsolr.directoryFactory=solr.HdfsDirectoryFactory -Dsolr.lock.type=hdfs -Dsolr.hdfs.home=hdfs://localhost:8020/solr -Dsolr.hdfs.confdir=/etc/hadoop_conf/conf"
|
||||
|
||||
# To use ZooKeeper security, try:
|
||||
# export JAVA_OPTS="-DzkACLProvider=org.apache.solr.common.cloud.VMParamsAllAndReadonlyDigestZkACLProvider -DzkCredentialsProvider=org.apache.solr.common.cloud.VMParamsSingleSetCredentialsDigestZkCredentialsProvider -DzkDigestUsername=admin-user -DzkDigestPassword=admin-password -DzkDigestReadonlyUsername=readonly-user -DzkDigestReadonlyPassword=readonly-password"
|
||||
#
|
||||
# To create a collection, curl "localhost:8901/solr/admin/collections?action=CREATE&name=collection1&numShards=2&replicationFactor=1&maxShardsPerNode=10"
|
||||
# To add a document, curl http://localhost:8901/solr/collection1/update -H 'Content-type:application/json' -d '[{"id" : "book1"}]'
|
||||
|
||||
numServers=$1
|
||||
numShards=$2
|
||||
|
||||
baseJettyPort=8900
|
||||
baseStopPort=9900
|
||||
|
||||
zkAddress=localhost:9900/solr
|
||||
|
||||
die () {
|
||||
echo >&2 "$@"
|
||||
exit 1
|
||||
}
|
||||
|
||||
[ "$#" -eq 1 ] || die "1 argument required, $# provided, usage: solrcloud-start.sh [numServers]"
|
||||
|
||||
cd ..
|
||||
|
||||
for (( i=1; i <= $numServers; i++ ))
|
||||
do
|
||||
echo "try to remove existing directory: server$i"
|
||||
rm -r -f server$i
|
||||
done
|
||||
|
||||
|
||||
rm -r -f dist
|
||||
rm -r -f build
|
||||
rm -r -f server/solr/zoo_data
|
||||
rm -f server/server.log
|
||||
|
||||
ant -f ../build.xml clean
|
||||
ant server dist
|
||||
|
||||
for (( i=1; i <= $numServers; i++ ))
|
||||
do
|
||||
echo "create server$i"
|
||||
cp -r -f server server$i
|
||||
done
|
||||
|
||||
rm -r -f serverzk
|
||||
cp -r -f server serverzk
|
||||
cp core/src/test-files/solr/solr-no-core.xml serverzk/solr/solr.xml
|
||||
rm -r -f serverzk/solr/collection1/core.properties
|
||||
cd serverzk
|
||||
stopPort=1313
|
||||
jettyPort=8900
|
||||
exec -a jettyzk java -Xmx512m $JAVA_OPTS -Djetty.port=$jettyPort -DhostPort=$jettyPort -DzkRun=localhost:9900/solr -DzkHost=$zkAddress -DzkRunOnly=true -jar start.jar --module=http STOP.PORT=$stopPort STOP.KEY=key jetty.base=. 1>serverzk.log 2>&1 &
|
||||
cd ..
|
||||
|
||||
# upload config files
|
||||
java -classpath "server/solr-webapp/webapp/WEB-INF/lib/*:server/lib/ext/*" $JAVA_OPTS org.apache.solr.cloud.ZkCLI -zkhost $zkAddress -cmd upconfig --confdir server/solr/configsets/basic_configs/conf --confname basic_configs
|
||||
|
||||
cd server
|
||||
|
||||
for (( i=1; i <= $numServers; i++ ))
|
||||
do
|
||||
echo "starting server$i"
|
||||
cd ../server$i
|
||||
stopPort=`expr $baseStopPort + $i`
|
||||
jettyPort=`expr $baseJettyPort + $i`
|
||||
exec -a jetty java -Xmx1g $JAVA_OPTS -Djetty.port=$jettyPort -DzkHost=$zkAddress -jar start.jar --module=http STOP.PORT=$stopPort STOP.KEY=key jetty.base=. 1>server$i.log 2>&1 &
|
||||
done
|
|
@ -1,64 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
numServers=$1
|
||||
baseJettyPort=8900
|
||||
baseStopPort=9900
|
||||
|
||||
die () {
|
||||
echo >&2 "$@"
|
||||
exit 1
|
||||
}
|
||||
|
||||
[ "$#" -eq 1 ] || die "1 argument required, $# provided, usage: stop.sh {numServers}"
|
||||
|
||||
cd ../server
|
||||
|
||||
for (( i=1; i <= $numServers; i++ ))
|
||||
do
|
||||
stopPort=`expr $baseStopPort + $i`
|
||||
echo "stopping server$i, stop port is $stopPort"
|
||||
cd ../server$i
|
||||
java -jar start.jar --module=http STOP.PORT=$stopPort STOP.KEY=key --stop
|
||||
done
|
||||
|
||||
|
||||
mkdir ../server-lastlogs
|
||||
|
||||
for (( i=1; i <= $numServers; i++ ))
|
||||
do
|
||||
cd ../server$i
|
||||
|
||||
jettyPort=`expr $baseJettyPort + $i`
|
||||
echo "Make sure jetty stops and wait for it: $jettyPort"
|
||||
|
||||
pid=`lsof -i:$jettyPort -sTCP:LISTEN -t`
|
||||
echo "pid:$pid"
|
||||
#kill $pid
|
||||
#wait $pid
|
||||
if [ ! -z "$pid" ]
|
||||
then
|
||||
while [ -e /proc/$pid ]; do sleep 1; done
|
||||
fi
|
||||
|
||||
# save the last shutdown logs
|
||||
echo "copy server$i.log to lastlogs"
|
||||
cp -r -f server$i.log ../server-lastlogs/server-last$i.log
|
||||
done
|
||||
|
||||
# stop zk runner
|
||||
java -jar start.jar --module=http STOP.PORT=1313 STOP.KEY=key --stop
|
||||
|
||||
echo "wait for port to be available: $baseJettyPort"
|
||||
|
||||
pid=`lsof -i:$baseJettyPort -sTCP:LISTEN -t`
|
||||
echo "pid:$pid"
|
||||
#kill $pid
|
||||
#wait $pid
|
||||
if [ ! -z "$pid" ]
|
||||
then
|
||||
while [ -e /proc/$pid ]; do sleep 0.1; done
|
||||
fi
|
||||
nc -w 30 127.0.0.1 $baseJettyPort
|
||||
|
||||
sleep 5
|
||||
|
Loading…
Reference in New Issue