This commit is contained in:
Karl Wright 2018-03-16 10:44:08 -04:00
commit cbd4b671ff
20 changed files with 1417 additions and 81 deletions

View File

@ -113,7 +113,7 @@
</subant>
</target>
<target name="validate" description="Validate dependencies, licenses, etc." depends="-validate-source-patterns,resolve-groovy,rat-sources-typedef,-install-forbidden-apis">
<target name="validate" description="Validate dependencies, licenses, etc." depends="validate-source-patterns,resolve-groovy,rat-sources-typedef,-install-forbidden-apis">
<subant target="validate" inheritall="false" failonerror="true">
<fileset dir="lucene" includes="build.xml" />
<fileset dir="solr" includes="build.xml" />
@ -124,7 +124,7 @@
</subant>
</target>
<target name="-validate-source-patterns" unless="disable.source-patterns" depends="resolve-groovy,rat-sources-typedef">
<target name="validate-source-patterns" unless="disable.source-patterns" depends="resolve-groovy,rat-sources-typedef">
<groovy taskname="source-patterns" classpathref="rat.classpath" src="${common.dir}/tools/src/groovy/check-source-patterns.groovy"/>
</target>

View File

@ -6,10 +6,11 @@ as to the usefulness of the tools.
Description of dev-tools/ contents:
./size-estimator-lucene-solr.xls -- Spreadsheet for estimating memory and disk usage in Lucene/Solr
./doap/ -- Lucene and Solr project descriptors in DOAP RDF format.
./eclipse/ -- Used to generate project descriptors for the Eclipse IDE.
./git/ -- Git documentation and resources.
./idea/ -- Used to generate project descriptors for IntelliJ's IDEA IDE.
./maven/ -- Mavenizes the Lucene/Solr packages
./netbeans/ -- Used to generate project descriptors for the Netbeans IDE.
./scripts/ -- Odds and ends for building releases, etc.
./doap/ -- Lucene and Solr project descriptors in DOAP RDF format.
./eclipse/ -- Used to generate project descriptors for the Eclipse IDE.
./git/ -- Git documentation and resources.
./idea/ -- Used to generate project descriptors for IntelliJ's IDEA IDE.
./maven/ -- Mavenizes the Lucene/Solr packages
./netbeans/ -- Used to generate project descriptors for the Netbeans IDE.
./scripts/ -- Odds and ends for building releases, etc.
./test-patch/ -- Scripts for automatically validating patches

View File

@ -0,0 +1,71 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ----------------------------------------------------------------------------------
#
# This script is a copy of the one used by ASF Jenkins's PreCommit-LUCENE-Build job.
#
# See the script "test-patch.sh" in this directory for the script to use for
# local manual patch validation.
#
# For other examples of scripts used to invoke Yetus, see the configuration on the
# PreCommit jobs on ASF Jenkins: https://builds.apache.org/view/PreCommit+Builds/
#
# ------------>8-------------------------->8-------------------------->8------------
#!/usr/bin/env bash
# This is a modified copy of the script from Jenkins project "PreCommit-HADOOP-Build"
YETUSDIR=${WORKSPACE}/yetus
TESTPATCHBIN=${YETUSDIR}/precommit/test-patch.sh
ARTIFACTS_SUBDIR=out
ARTIFACTS=${WORKSPACE}/${ARTIFACTS_SUBDIR}
BASEDIR=${WORKSPACE}/sourcedir
rm -rf "${ARTIFACTS}"
mkdir -p "${ARTIFACTS}"
PIDMAX=10000 # Arbitrary limit; may need to revisit
YETUS_RELEASE=0.7.0
YETUS_TARBALL="yetus-${YETUS_RELEASE}.tar.gz"
echo "Downloading Yetus ${YETUS_RELEASE}"
curl -L "https://api.github.com/repos/apache/yetus/tarball/rel/${YETUS_RELEASE}" -o "${YETUS_TARBALL}"
rm -rf "${YETUSDIR}"
mkdir -p "${YETUSDIR}"
gunzip -c "${YETUS_TARBALL}" | tar xpf - -C "${YETUSDIR}" --strip-components 1
YETUS_ARGS+=("--project=LUCENE")
YETUS_ARGS+=("--basedir=${BASEDIR}")
YETUS_ARGS+=("--patch-dir=${ARTIFACTS}")
YETUS_ARGS+=("--build-url-artifacts=artifact/${ARTIFACTS_SUBDIR}")
YETUS_ARGS+=("--personality=${BASEDIR}/dev-tools/test-patch/lucene-solr-yetus-personality.sh")
YETUS_ARGS+=("--jira-user=lucenesolrqa")
YETUS_ARGS+=("--jira-password=$JIRA_PASSWORD")
YETUS_ARGS+=("--brief-report-file=${ARTIFACTS}/email-report.txt")
YETUS_ARGS+=("--console-report-file=${ARTIFACTS}/console-report.txt")
YETUS_ARGS+=("--html-report-file=${ARTIFACTS}/console-report.html")
YETUS_ARGS+=("--proclimit=${PIDMAX}")
YETUS_ARGS+=("--console-urls")
YETUS_ARGS+=("--debug")
YETUS_ARGS+=("--skip-dirs=dev-tools")
YETUS_ARGS+=("--bugcomments=jira")
YETUS_ARGS+=("--resetrepo")
YETUS_ARGS+=("--run-tests")
YETUS_ARGS+=("--contrib-guide=https://wiki.apache.org/lucene-java/HowToContribute#Contributing_your_work")
YETUS_ARGS+=("--jenkins")
YETUS_ARGS+=("LUCENE-${ISSUE_NUM}")
/bin/bash ${TESTPATCHBIN} "${YETUS_ARGS[@]}"

View File

@ -0,0 +1,406 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a Yetus precommit "personality" (aka customized configuration) for Lucene/Solr.
#
# See the Yetus precommit documentation at https://yetus.apache.org/documentation/0.7.0/
# and especially https://yetus.apache.org/documentation/0.7.0/precommit-advanced/.
# See also the Yetus source code for other projects' personality examples at
# https://git-wip-us.apache.org/repos/asf?p=yetus.git;f=precommit/personality;a=tree;hb=HEAD
#
# To add a new validation method (aka "plugin"):
# 1) Add its name to the PLUGIN_LIST below
# 2) Invoke "add_test_type" with it below
# 3) Add a "<plugin>_filefilter" function to decide whether the plugin needs to be run based on changed files
# 4) Add a "<plugin>_rebuild" function to call out to ant to perform the validation method.
# See examples of the above-described function types ^^ below.
# Both compile+javac plugins are required, as well as unit+junit: in both cases, neither work individually
PLUGIN_LIST="ant,jira,compile,javac,unit,junit,test4tests"
PLUGIN_LIST+=",testoutput,checkluceneversion,ratsources,checkforbiddenapis,checklicenses"
PLUGIN_LIST+=",validatesourcepatterns,validaterefguide"
personality_plugins "${PLUGIN_LIST}"
add_test_type "checkluceneversion"
add_test_type "ratsources"
add_test_type "checkforbiddenapis"
add_test_type "checklicenses"
add_test_type "validatesourcepatterns"
add_test_type "validaterefguide"
add_test_format "testoutput"
## @description Globals specific to this personality
## @audience private
## @stability evolving
function personality_globals
{
#shellcheck disable=SC2034
PATCH_BRANCH_DEFAULT=master
#shellcheck disable=SC2034
JIRA_ISSUE_RE='^(LUCENE|SOLR)-[0-9]+$'
#shellcheck disable=SC2034
JIRA_STATUS_RE='Patch Available'
#shellcheck disable=SC2034
GITHUB_REPO="apache/lucene-solr"
#shellcheck disable=SC2034
BUILDTOOL=ant
}
## @description Queue up modules for this personality
## @audience private
## @stability evolving
## @param repostatus
## @param testtype
function personality_modules
{
local repostatus=$1
local testtype=$2
local module
local extra
local moduleType="submodules"
yetus_debug "Personality (lucene-solr): ${repostatus} ${testtype}"
clear_personality_queue
case ${testtype} in
clean|distclean|validatesourcepatterns)
moduleType="top"
;;
checkluceneversion)
moduleType="solr"
;;
ratsources)
moduleType="submodules"
;;
checkforbiddenapis)
moduleType="both"
;;
checklicenses)
moduleType="mains"
;;
validaterefguide)
moduleType="solr-ref-guide"
;;
compile)
moduleType="submodules"
extra="compile-test"
;;
junit|unit)
moduleType="submodules"
extra="test"
;;
*)
;;
esac
case ${moduleType} in
submodules)
for module in "${CHANGED_MODULES[@]}"; do
if [[ ! "${module}" =~ ^lucene/(licenses|site) ]]; then # blacklist lucene/ dirs that aren't modules
if [[ "${module}" =~ ^(lucene/(analysis/[^/]+|[^/]+)) ]]; then
local lucene_module=${BASH_REMATCH[0]}
personality_enqueue_module "${lucene_module}" "${extra}"
elif [[ "${module}" =~ ^solr/(core|solrj|test-framework|solr-ref-guide|contrib/[^.]+) ]]; then # whitelist solr/ modules
local solr_module=${BASH_REMATCH[0]}
# In solr-ref-guide module, do not execute "compile" or "unit" plugins
if [[ ! "${solr_module}" == solr/solr-ref-guide || ! ${testtype} =~ ^(compile|unit)$ ]]; then
personality_enqueue_module "${solr_module}" "${extra}"
fi
fi
fi
done
;;
lucene|solr)
personality_enqueue_module "${moduleType}" "${extra}"
;;
top)
personality_enqueue_module . "${extra}"
;;
mains)
personality_enqueue_module "lucene" "${extra}"
personality_enqueue_module "solr" "${extra}"
;;
both) # solr, lucene, or both
# personality_enqueue_module KEEPS duplicates, so de-dupe first
local doSolr=0,doLucene=0
for module in "${CHANGED_MODULES[@]}"; do
if [[ "${module}" =~ ^solr/ ]]; then doSolr=1; fi
if [[ "${module}" =~ ^lucene/ ]]; then doLucene=1; fi
done
if [[ ${doLucene} == 1 ]]; then
if [[ ${doSolr} == 1 ]]; then
personality_enqueue_module . "${extra}"
else
personality_enqueue_module "lucene" "${extra}"
fi
elif [[ ${doSolr} == 1 ]]; then
personality_enqueue_module "solr" "${extra}"
fi
;;
solr-ref-guide)
for module in "${CHANGED_MODULES[@]}"; do
if [[ "${module}" =~ ^solr/solr-ref-guide ]]; then
personality_enqueue_module "solr/solr-ref-guide" "${extra}"
fi
done
;;
*)
;;
esac
}
## @description Add tests based upon personality needs
## @audience private
## @stability evolving
## @param filename
function personality_file_tests
{
declare filename=$1
yetus_debug "Using Lucene/Solr-specific personality_file_tests"
if [[ ! ${filename} =~ solr-ref-guide ]]; then
if [[ ${filename} =~ build\.xml$ || ${filename} =~ /src/(java|resources|test|test-files|tools) ]]; then
yetus_debug "tests/unit: ${filename}"
add_test compile
add_test javac
add_test unit
fi
fi
}
## @description hook to reroute junit folder to search test results based on the module
## @audience private
## @stability evolving
## @param module
## @param buildlogfile
function testoutput_process_tests
{
# shellcheck disable=SC2034
declare module=$1
declare buildlogfile=$2
if [[ "${module}" =~ ^lucene/analysis/ ]]; then
JUNIT_TEST_OUTPUT_DIR="../../build/${module#*/}"
elif [[ "${module}" =~ ^solr/contrib/extraction ]]; then
JUNIT_TEST_OUTPUT_DIR="../../build/contrib/solr-cell"
elif [[ "${module}" =~ ^solr/contrib/(.*) ]]; then
JUNIT_TEST_OUTPUT_DIR="../../build/contrib/solr-${BASH_REMATCH[1]}"
elif [[ "${module}" =~ ^(lucene|solr)/ ]]; then
JUNIT_TEST_OUTPUT_DIR="../build/${module#*/}"
fi
yetus_debug "Rerouting build dir for junit to ${JUNIT_TEST_OUTPUT_DIR}"
}
## @description checkluceneversion file filter
## @audience private
## @stability evolving
## @param filename
function checkluceneversion_filefilter
{
local filename=$1
if [[ ${filename} =~ ^solr/(example|server/solr/configsets) ]]; then
yetus_debug "tests/checkluceneversion: ${filename}"
add_test checkluceneversion
fi
}
## @description checkluceneversion test
## @audience private
## @stability evolving
## @param repostatus
function checkluceneversion_rebuild
{
local repostatus=$1
lucene_ant_command ${repostatus} "checkluceneversion" "check-example-lucene-match-version" "Check configsets' lucene version"
}
## @description ratsources file filter
## @audience private
## @stability evolving
## @param filename
function ratsources_filefilter
{
local filename=$1
if [[ ${filename} =~ /src/|\.xml$ ]] ; then
yetus_debug "tests/ratsources: ${filename}"
add_test ratsources
fi
}
## @description ratsources test
## @audience private
## @stability evolving
## @param repostatus
function ratsources_rebuild
{
local repostatus=$1
lucene_ant_command ${repostatus} "ratsources" "rat-sources" "Release audit (RAT)"
}
## @description checkforbiddenapis file filter
## @audience private
## @stability evolving
## @param filename
function checkforbiddenapis_filefilter
{
local filename=$1
if [[ ${filename} =~ \.java$ ]] ; then
yetus_debug "tests/checkforbiddenapis: ${filename}"
add_test checkforbiddenapis
fi
}
## @description checkforbiddenapis test
## @audience private
## @stability evolving
## @param repostatus
function checkforbiddenapis_rebuild
{
local repostatus=$1
lucene_ant_command ${repostatus} "checkforbiddenapis" "check-forbidden-apis" "Check forbidden APIs"
}
## @description checklicenses file filter
## @audience private
## @stability evolving
## @param filename
function checklicenses_filefilter
{
local filename=$1
if [[ ${filename} =~ (lucene|solr)/licenses/|lucene/ivy-versions.properties$ ]]; then
yetus_debug "tests/checklicenses: ${filename}"
add_test checklicenses
fi
}
## @description checklicenses test
## @audience private
## @stability evolving
## @param repostatus
function checklicenses_rebuild
{
local repostatus=$1
lucene_ant_command ${repostatus} "checklicenses" "check-licenses" "Check licenses"
}
## @description validaterefguide file filter
## @audience private
## @stability evolving
## @param filename
function validaterefguide_filefilter
{
local filename=$1
if [[ ${filename} =~ solr/solr-ref-guide ]]; then
yetus_debug "tests/validaterefguide: ${filename}"
add_test validaterefguide
fi
}
## @description validaterefguide test
## @audience private
## @stability evolving
## @param repostatus
function validaterefguide_rebuild
{
local repostatus=$1
lucene_ant_command ${repostatus} "validaterefguide" "bare-bones-html-validation" "Validate ref guide"
}
## @description validatesourcepatterns file filter
## @audience private
## @stability evolving
## @param filename
function validatesourcepatterns_filefilter
{
local filename=$1
if [[ ${filename} =~ \.(java|jflex|py|pl|g4|jj|html|js|css|xml|xsl|vm|sh|cmd|bat|policy|properties|mdtext|groovy|template|adoc|json)$ ]]; then
yetus_debug "tests/validatesourcepatterns: ${filename}"
add_test validatesourcepatterns
fi
}
## @description validatesourcepatterns test
## @audience private
## @stability evolving
## @param repostatus
function validatesourcepatterns_rebuild
{
local repostatus=$1
lucene_ant_command ${repostatus} "validatesourcepatterns" "validate-source-patterns" "Validate source patterns"
}
function lucene_ant_command
{
declare repostatus=$1
declare testname=$2
declare antcommand=$3
declare title=$4
declare result=0
declare i=0
declare module
declare fn
declare result
if [[ "${repostatus}" = branch ]]; then
return 0
fi
if ! verify_needed_test ${testname}; then
echo "${BUILDMODEMSG} does not need ${testname} testing."
return 0
fi
big_console_header "${title}"
personality_modules ${repostatus} ${testname}
until [[ ${i} -eq ${#MODULE[@]} ]]; do
if [[ ${MODULE_STATUS[${i}]} == -1 ]]; then
((result=result+1))
((i=i+1))
continue
fi
ANT_ARGS=${antcommand}
start_clock
module=${MODULE[${i}]}
fn=$(module_file_fragment "${module}")
logfilename="${repostatus}-${antcommand}-${fn}.txt";
logfile="${PATCH_DIR}/${logfilename}"
buildtool_cwd "${i}"
echo_and_redirect "${logfile}" $(ant_executor)
if [[ $? == 0 ]] ; then
module_status ${i} +1 "${logfilename}" "${title}" "${antcommand} passed"
else
module_status ${i} -1 "${logfilename}" "${title}" "${antcommand} failed"
((result=result+1))
fi
((i=i+1))
savestop=$(stop_clock)
MODULE_STATUS_TIMER[${i}]=${savestop}
done
ANT_ARGS=""
if [[ ${result} -gt 0 ]]; then
modules_messages ${repostatus} "${title}" false
return 1
fi
modules_messages ${repostatus} "${title}" true
return 0
}

View File

@ -0,0 +1,91 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Invoke Yetus locally to validate a patch against Lucene/Solr, and
# (optionally) post a validation report comment on the passed-in JIRA issue
# from which the patch was downloaded.
#
# NB 1: The Lucene/Solr Yetus personality currently performs the equivalent
# of "ant precommit" and "ant test" in modified modules; instead of using
# this script to test your changes, please consider invoking those targets
# directly.
#
# NB 2: The Jenkins job "PreCommit-Admin" automatically detects new patches
# posted to LUCENE and SOLR JIRA issues that are in the "Patch Available"
# state, and then queues the appropriate "PreCommit-LUCENE-Build" or
# "PreCommit-SOLR-Build" job pointing to the JIRA hosting the new patch.
# Those jobs perform the same checks as this script, and like this script,
# will post a comment on the JIRA issue. As a result, manual invocation
# (e.g. via this script) should ordinarily not be necessary.
#
# Environment variable ${YETUS_HOME} must point to the separately installed
# Yetus home directory, e.g. the "rel/0.7.0" tag checked out from a local
# Yetus Git repository.
#
# Environment variable ${PROJECT_DIR} must point to a local Lucene/Solr git
# workspace dir.
#
# The sole cmdline param can be a JIRA issue, a local patch file,
# or a URL to a patch file.
#
# If the cmdline param is a JIRA issue, the patch to download and validate
# will be the most recently uploaded patch on the issue. See the patch
# naming schema that Yetus recognizes:
# https://yetus.apache.org/documentation/in-progress/precommit-patchnames/
#
# If the cmdline param is a JIRA issue and you provide JIRA user/password via
# environment variables ${JIRA_USER} and ${JIRA_PASSWORD}, a patch validation
# report will be posted as a comment on the JIRA issue.
help () {
echo "Usage 1: [ JIRA_USER=xxx JIRA_PASSWORD=yyy ] PROJECT_DIR=/path/to/lucene-solr YETUS_HOME=/path/to/yetus $0 SOLR-12345"
echo "Usage 2: [ JIRA_USER=xxx JIRA_PASSWORD=yyy ] PROJECT_DIR=/path/to/lucene-solr YETUS_HOME=/path/to/yetus $0 LUCENE-12345"
echo "Usage 3: PROJECT_DIR=/path/to/lucene-solr YETUS_HOME=/path/to/yetus $0 ../local.patch"
echo "Usage 4: PROJECT_DIR=/path/to/lucene-solr YETUS_HOME=/path/to/yetus $0 http://example.com/remote.patch"
}
if [[ -z "${PROJECT_DIR}" || -z "${YETUS_HOME}" || -z "${1}" || "${1}" =~ ^-(-?h(elp)?|\?)$ ]] ; then
help
exit 1
fi
PATCH_REF=${1}
TEST_PATCH_BIN="${YETUS_HOME}/precommit/test-patch.sh"
SCRIPT_DIR="$( cd "$( dirname "${0}" )" && pwd )"
declare -a YETUS_ARGS
if [[ ${PATCH_REF} =~ ^(LUCENE|SOLR)- ]]; then
JIRA_PROJECT=${BASH_REMATCH[0]}
YETUS_ARGS+=("--project=${JIRA_PROJECT}")
if [[ -n "${JIRA_USER}" ]] && [[ -n "${JIRA_PASSWORD}" ]] ; then
YETUS_ARGS+=("--jira-user=${JIRA_USER}")
YETUS_ARGS+=("--jira-password=${JIRA_PASSWORD}")
YETUS_ARGS+=("--bugcomments=jira")
fi
fi
YETUS_ARGS+=("--basedir=${PROJECT_DIR}")
YETUS_ARGS+=("--personality=${SCRIPT_DIR}/lucene-solr-yetus-personality.sh")
YETUS_ARGS+=("--skip-dirs=dev-tools")
YETUS_ARGS+=("--resetrepo")
YETUS_ARGS+=("--run-tests")
YETUS_ARGS+=("--debug")
YETUS_ARGS+=("--robot")
YETUS_ARGS+=("${PATCH_REF}")
/bin/bash ${TEST_PATCH_BIN} "${YETUS_ARGS[@]}"

View File

@ -103,6 +103,10 @@ New Features
deleted documents around for later reuse. See "IW.softUpdateDocument(...)"
for reference. (Simon Willnauer)
Other
* SOLR-10912: Add automatic patch validation. (Mano Kovacs, Steve Rowe)
======================= Lucene 7.3.0 =======================
API Changes

View File

@ -449,6 +449,9 @@ Other Changes
* SOLR-12099: Remove reopenReaders attribute from 'IndexConfig in SolrConfig' page in ref guide. (shalin)
* SOLR-12098: Document the Lucene spins auto-detection and its effect on CMS dynamic defaults.
(Cassandra Targett, shalin)
================== 7.2.1 ==================
Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

53
solr/bin-test/README.md Normal file
View File

@ -0,0 +1,53 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
# bin/solr Tests
This directory contains tests for the `bin/solr` command-line scripts. For
instructions on running these tests, run `bin-test/test -h`.
## Test Harness/Infrastructure
Where possible, these tests model themselves after the pattern well-established
by JUnit.
- JUnit's `@Test` is emulated using the function name prefix: `solr_test_`
Any bash functions starting with that prefix are identified as tests.
- JUnit's `@Before` and `@After` are imitated using the function names
`solr_unit_test_before`, and `solr_unit_test_after`. If a suite contains
these functions, they will be run before and after each test.
- JUnit's `@BeforeClass` and `@AfterClass` are imitated using the function
names: `solr_suite_before`, and `solr_suite_after`. If a suite contains
these functions, they will be run at the very beginning and end of suite
execution.
- Test success/failure is judged by the test's return value. 0 indicates
success; non-zero indicates failure. Unlike in JUnit/Java which has
exceptions, bash assertions have no way to suspend test execution on
failure. Because of this, assertions are often followed by ` || return 1`,
which ensures the test exists immediately if the assertion fails. Existing
tests provided examples of this.
## Test Helpers
A variety of assertions and general utilities are available for use in
`bin-test/utils/`.
## Limitations
1. Currently this test suite is only available for \*nix environments
2. Tests written in bash are both slow, and harder to maintain than traditional
JUnit tests. If a test _can_ be written as a JUnit test, it should be. This
suite should only be used to test things that cannot be tested by JUnit.

191
solr/bin-test/test Executable file
View File

@ -0,0 +1,191 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
function ensure_cwd_is_solr() {
local cwd=`pwd`
if [[ "solr" != `basename $cwd` ]]
then
echo "ERROR: Please run this script from the 'solr' directory."
exit 1
fi
}
function run_suite_before_if_present() {
if declare -f solr_suite_before > /dev/null ; then
solr_suite_before
fi
}
function run_suite_after_if_present() {
if declare -f solr_suite_after > /dev/null ; then
solr_suite_after
fi
}
function run_test_before_if_present() {
if declare -f solr_unit_test_before > /dev/null ; then
solr_unit_test_before
fi
}
function run_test_after_if_present() {
if declare -f solr_unit_test_after > /dev/null ; then
solr_unit_test_after
fi
}
function run_test_suite() {
local test_file=$1
local test_name_filter="${2:-}"
echo "Executing $test_file"
source $test_file
test_names="$(declare -F | awk '{print $3}' | grep 'solr_test')"
run_suite_before_if_present
for test_name in $test_names
do
if [[ -z "$test_name_filter" || "$test_name_filter" == "$test_name" ]]; then
run_single_test $test_name
fi
unset -f $test_name
done
run_suite_after_if_present
unset solr_suite_before
unset solr_suite_after
unset solr_unit_test_before
unset solr_unit_test_after
}
function run_single_test() {
local test_name=$1
echo -n " $test_name "
run_test_before_if_present
let NUM_TESTS+=1
output=$($test_name)
if [[ $? -ne 0 ]]; then
let NUM_FAILURES+=1
echo "FAILED"
echo "---------------------------------------------------"
echo "$output"
echo "---------------------------------------------------"
else
let NUM_SUCCESSES+=1
echo "SUCCEEDED"
fi
run_test_after_if_present
}
function ensure_param_arg_present() {
if [[ $# -lt 2 || "$2" == -* ]]; then
echo "Option '$1' requires a single argument, but none provided."
exit 1
fi
}
function print_help() {
echo "Usage: bin-test/test [-h] [-s SUITE_NAME] [-t SUITE_NAME#TEST_NAME]"
echo ""
echo " Run tests for the 'bin/solr' Solr startup/admin scripts. By default all tests are run."
echo " Tests suites or single tests can be selected by use of the options below:"
echo ""
echo " -s|--run-single-suite Runs all tests living in the specified file. Filename argument"
echo " should include the full file extension, but no path prefix."
echo " (e.g. test_help.sh works, bin-test/test_help.sh and test_help"
echo " do not)"
echo ""
echo " -t|--run-single-test Runs the specified test from the specified test suite file."
echo " Takes an argument in the form 'SUITE_NAME#TEST_NAME', where"
echo " SUITE_NAME is the filename of a test suite (see -s above), and"
echo " TEST_NAME matches the name of a bash test function present in"
echo " that file"
echo ""
echo " -h|--help You're soaking in it."
echo ""
exit 0
}
function run_all_tests() {
test_files="$(find bin-test -name "test_*.sh")"
for test_file in $test_files
do
run_test_suite $test_file
done
}
## MAIN ##
##########
ensure_cwd_is_solr
# Can be 'all', 'help', 'single-suite', or 'single-test'
MODE="all"
SUITE_NAME=""
TEST_NAME=""
NUM_TESTS=0
NUM_SUCCESSES=0
NUM_FAILURES=0
while [[ $# -gt 0 ]]
do
case $1 in
-h|--help)
MODE="help"
shift 1
;;
-s|--run-single-suite)
ensure_param_arg_present $@
MODE="single-suite"
SUITE_NAME="bin-test/$2"
shift 2
;;
-t|--run-single-test)
ensure_param_arg_present $@
MODE="single-test"
SUITE_NAME="bin-test/$(echo "$2" | cut -d "#" -f 1 | tr -d " ")"
TEST_NAME=$(echo "$2" | cut -d "#" -f 2 | tr -d " ")
shift 2
;;
*)
echo "WARNING: Unexpected argument [$1] detected, ignoring."
shift 1
;;
esac
done
case $MODE in
"help")
print_help
MAIN_RESULT=0
;;
"single-suite")
run_test_suite $SUITE_NAME
MAIN_RESULT=$?
;;
"single-test")
run_test_suite $SUITE_NAME $TEST_NAME
MAIN_RESULT=$?
;;
"all")
run_all_tests
MAIN_RESULT=$?
esac
echo "Ran $NUM_TESTS tests, with $NUM_SUCCESSES passing, and $NUM_FAILURES failures"
exit $MAIN_RESULT

View File

@ -0,0 +1,133 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
source bin-test/utils/assert.sh
source bin-test/utils/cleanup.sh
# All tests should start with solr_test
function solr_suite_before() {
bin/solr stop -all > /dev/null 2>&1
bin/solr start -c > /dev/null 2>&1
local source_configset_dir="server/solr/configsets/sample_techproducts_configs"
TMP_CONFIGSET_DIR="/tmp/test_config"
rm -rf $TMP_CONFIGSET_DIR; cp -r $source_configset_dir $TMP_CONFIGSET_DIR
}
function solr_suite_after() {
bin/solr stop -all > /dev/null 2>&1
rm -rf $TMP_CONFIGSET_DIR
}
function solr_unit_test_before() {
delete_all_collections > /dev/null 2>&1
}
function solr_unit_test_after() {
delete_all_collections > /dev/null 2>&1
}
function solr_test_can_create_collection() {
local create_cmd="bin/solr create_collection -c COLL_NAME"
local expected_output="Created collection 'COLL_NAME'"
local actual_output; actual_output=$($create_cmd)
assert_cmd_succeeded "$create_cmd" || return 1
assert_output_contains "$actual_output" "$expected_output" || return 1
}
function solr_test_rejects_d_option_with_invalid_config_dir() {
local create_cmd="bin/solr create_collection -c COLL_NAME -d /asdf"
local expected_output="Specified configuration directory /asdf not found!"
local actual_output; actual_output=$($create_cmd)
assert_cmd_failed "$create_cmd" || return 1
assert_output_contains "$actual_output" "$expected_output" || return 1
}
function solr_test_accepts_d_option_with_explicit_builtin_config() {
local create_cmd="bin/solr create_collection -c COLL_NAME -d sample_techproducts_configs"
local expected_output="Created collection 'COLL_NAME'"
local actual_output; actual_output=$($create_cmd)
assert_cmd_succeeded "$create_cmd" || return 1
assert_output_contains "$actual_output" "$expected_output" || return 1
}
function solr_test_accepts_d_option_with_explicit_path_to_config() {
local create_cmd="bin/solr create_collection -c COLL_NAME -d $TMP_CONFIGSET_DIR"
local expected_output="Created collection 'COLL_NAME'"
local actual_output; actual_output=$($create_cmd)
assert_cmd_succeeded "$create_cmd" || return 1
assert_output_contains "$actual_output" "$expected_output" || return 1
}
function solr_test_accepts_n_option_as_config_name() {
local create_cmd="bin/solr create_collection -c COLL_NAME -n other_conf_name"
local expected_name_output="Created collection 'COLL_NAME'"
local expected_config_name_output="config-set 'other_conf_name'"
local actual_output; actual_output=$($create_cmd)
# Expect to fail, change to success
assert_cmd_succeeded "$create_cmd" || return 1
assert_output_contains "$actual_output" "$expected_name_output" || return 1
assert_output_contains "$actual_output" "$expected_config_name_output" || return 1
}
function solr_test_allows_config_reuse_when_n_option_specifies_same_config() {
local create_cmd1="bin/solr create_collection -c COLL_NAME_1 -n shared_config"
local expected_coll_name_output1="Created collection 'COLL_NAME_1'"
local create_cmd2="bin/solr create_collection -c COLL_NAME_2 -n shared_config"
local expected_coll_name_output2="Created collection 'COLL_NAME_2'"
local expected_config_name_output="config-set 'shared_config'"
local actual_output1; actual_output1=$($create_cmd1)
assert_cmd_succeeded "$create_cmd1" || return 1
assert_output_contains "$actual_output1" "$expected_coll_name_output1" || return 1
assert_output_contains "$actual_output1" "$expected_config_name_output" || return 1
local actual_output2; actual_output2=$($create_cmd2)
assert_cmd_succeeded "$create_cmd2" || return 1
assert_output_contains "$actual_output2" "$expected_coll_name_output2" || return 1
assert_output_contains "$actual_output2" "$expected_config_name_output" || return 1
}
function solr_test_create_multisharded_collections_when_s_provided() {
local create_cmd="bin/solr create_collection -c COLL_NAME -s 2"
local expected_coll_name_output="Created collection 'COLL_NAME'"
local expected_shards_output="2 shard(s)"
local actual_output; actual_output=$($create_cmd)
assert_cmd_succeeded "$create_cmd" || return 1
assert_output_contains "$actual_output" "$expected_coll_name_output" || return 1
assert_output_contains "$actual_output" "$expected_shards_output" || return 1
}
function solr_test_creates_replicated_collections_when_r_provided() {
local create_cmd="bin/solr create_collection -c COLL_NAME -rf 2"
local expected_coll_name_output="Created collection 'COLL_NAME'"
local expected_rf_output="2 replica(s)"
local actual_output; actual_output=$($create_cmd)
assert_cmd_succeeded "$create_cmd" || return 1
assert_output_contains "$actual_output" "$expected_coll_name_output" || return 1
assert_output_contains "$actual_output" "$expected_rf_output" || return 1
}

View File

@ -0,0 +1,70 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
source bin-test/utils/assert.sh
source bin-test/utils/cleanup.sh
# All tests should start with solr_test
function solr_suite_before() {
bin/solr stop -all > /dev/null 2>&1
bin/solr start -c > /dev/null 2>&1
}
function solr_suite_after() {
bin/solr stop -all > /dev/null 2>&1
}
function solr_unit_test_before() {
delete_all_collections
}
function solr_unit_test_after() {
delete_all_collections
}
function solr_test_can_delete_collection() {
bin/solr create_collection -c COLL_NAME
assert_collection_exists "COLL_NAME" || return 1
bin/solr delete -c "COLL_NAME"
assert_collection_doesnt_exist "COLL_NAME" || return 1
}
function solr_test_deletes_accompanying_zk_config_by_default() {
bin/solr create_collection -c "COLL_NAME"
assert_config_exists "COLL_NAME" || return 1
bin/solr delete -c "COLL_NAME"
assert_config_doesnt_exist "COLL_NAME" || return 1
}
function solr_test_deletes_accompanying_zk_config_with_nondefault_name() {
bin/solr create_collection -c "COLL_NAME" -n "NONDEFAULT_CONFIG_NAME"
assert_config_exists "NONDEFAULT_CONFIG_NAME" || return 1
bin/solr delete -c "COLL_NAME"
assert_config_doesnt_exist "NONDEFAULT_CONFIG_NAME"
}
function solr_test_deleteConfig_option_can_opt_to_leave_config_in_zk() {
bin/solr create_collection -c "COLL_NAME"
assert_config_exists "COLL_NAME"
bin/solr delete -c "COLL_NAME" -deleteConfig false
assert_config_exists "COLL_NAME"
}

134
solr/bin-test/test_help.sh Normal file
View File

@ -0,0 +1,134 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
source bin-test/utils/assert.sh
function solr_test_start_help_flag_prints_help() {
local help_cmd="bin/solr start -help"
local expected_output="Usage: solr start"
local actual_output; actual_output=$($help_cmd)
assert_cmd_succeeded "$help_cmd" || return 1
assert_output_contains "$actual_output" "$expected_output" || return 1
assert_output_not_contains "$actual_output" "ERROR" || return 1
}
function solr_test_stop_help_flag_prints_help() {
local help_cmd="bin/solr stop -help"
local expected_output="Usage: solr stop"
local actual_output; actual_output=$($help_cmd)
assert_cmd_succeeded "$help_cmd" || return 1
assert_output_contains "$actual_output" "$expected_output" || return 1
assert_output_not_contains "$actual_output" "ERROR" || return 1
}
function solr_test_restart_help_flag_prints_help() {
local help_cmd="bin/solr restart -help"
local expected_output="Usage: solr restart"
local actual_output; actual_output=$($help_cmd)
assert_cmd_succeeded "$help_cmd" || return 1
assert_output_contains "$actual_output" "$expected_output" || return 1
assert_output_not_contains "$actual_output" "ERROR" || return 1
}
function solr_test_status_help_flag_prints_help() {
#TODO Currently the status flag doesn't return nice help text!
return 0
}
function solr_test_healthcheck_help_flag_prints_help() {
local help_cmd="bin/solr healthcheck -help"
local expected_output="Usage: solr healthcheck"
local actual_output; actual_output=$($help_cmd)
assert_cmd_succeeded "$help_cmd" || return 1
assert_output_contains "$actual_output" "$expected_output" || return 1
assert_output_not_contains "$actual_output" "ERROR" || return 1
}
function solr_test_create_help_flag_prints_help() {
local help_cmd="bin/solr create -help"
local expected_output="Usage: solr create"
local actual_output; actual_output=$($help_cmd)
assert_cmd_succeeded "$help_cmd" || return 1
assert_output_contains "$actual_output" "$expected_output" || return 1
assert_output_not_contains "$actual_output" "ERROR" || return 1
}
function solr_test_create_core_help_flag_prints_help() {
local help_cmd="bin/solr create_core -help"
local expected_output="Usage: solr create_core"
local actual_output; actual_output=$($help_cmd)
assert_cmd_succeeded "$help_cmd" || return 1
assert_output_contains "$actual_output" "$expected_output" || return 1
assert_output_not_contains "$actual_output" "ERROR" || return 1
}
function solr_test_create_collection_help_flag_prints_help() {
local help_cmd="bin/solr create_collection -help"
local expected_output="Usage: solr create_collection"
local actual_output; actual_output=$($help_cmd)
assert_cmd_succeeded "$help_cmd" || return 1
assert_output_contains "$actual_output" "$expected_output" || return 1
assert_output_not_contains "$actual_output" "ERROR" || return 1
}
function solr_test_delete_help_flag_prints_help() {
local help_cmd="bin/solr delete -help"
local expected_output="Usage: solr delete"
local actual_output; actual_output=$($help_cmd)
assert_cmd_succeeded "$help_cmd" || return 1
assert_output_contains "$actual_output" "$expected_output" || return 1
assert_output_not_contains "$actual_output" "ERROR" || return 1
}
function solr_test_version_help_flag_prints_help() {
#TODO Currently the version -help flag doesn't return nice help text!
return 0
}
function solr_test_zk_help_flag_prints_help() {
local help_cmd="bin/solr zk -help"
local expected_output="Usage: solr zk"
local actual_output; actual_output=$($help_cmd)
assert_cmd_succeeded "$help_cmd" || return 1
assert_output_contains "$actual_output" "$expected_output" || return 1
assert_output_not_contains "$actual_output" "ERROR" || return 1
}
function solr_test_auth_help_flag_prints_help() {
local help_cmd="bin/solr auth -help"
local expected_output="Usage: solr auth"
local actual_output; actual_output=$($help_cmd)
assert_cmd_succeeded "$help_cmd" || return 1
assert_output_contains "$actual_output" "$expected_output" || return 1
assert_output_not_contains "$actual_output" "ERROR" || return 1
}
function solr_test_assert_help_flag_prints_help() {
#TODO Currently the assert -help flag doesn't return nice help text!
# It returns autogenerated SolrCLI help, which is similar but not _quite_
# the same thing.
return 0
}

View File

@ -0,0 +1,39 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# All tests should start with solr_test
function solr_suite_before() {
bin/solr stop -all > /dev/null 2>&1
}
function solr_suite_after() {
bin/solr stop -all > /dev/null 2>&1
}
function solr_test_11740_checks_f() {
# SOLR-11740
bin/solr start
bin/solr start -p 7574
bin/solr stop -all 2>&1 | grep -i "forcefully killing"
rcode=$?
if [[ $rcode -eq 0 ]]
then
echo "Unexpected forceful kill - please check."
return 2
fi
}

View File

@ -0,0 +1,123 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ASSERT_SUCCEEDED=1
ASSERT_FAILURE=0
function assert_cmd_succeeded() {
retval=$?
if [[ $? -ne 0 ]]; then
echo "Expected command $1 to succeed, but exited with $retval"
return $ASSERT_FAILURE
fi
return $ASSERT_SUCCESS
}
function assert_cmd_failed() {
retval=$?
if [[ $? -eq 0 ]]; then
echo "Expected command $1 to fail, but exited with $retval"
return $ASSERT_FAILURE
fi
return $ASSERT_SUCCESS
}
function assert_output_contains() {
local actual_output="$1"
local needle="$2"
if echo "$actual_output" | grep -q "$needle"; then
return $ASSERT_SUCCESS
fi
echo "Expected to find "$needle" in output [$actual_output]"
return $ASSERT_FAILURE
}
function assert_output_not_contains() {
local actual_output="$1"
local needle="$2"
if echo "$actual_output" | grep -q "$needle"; then
echo "Didn't expect to find "$needle" in output [$actual_output]"
return $ASSERT_FAILURE
fi
return $ASSERT_SUCCESS
}
function assert_collection_exists() {
local coll_name=$1
local coll_list=$(bin/solr zk ls /collections -z localhost:9983)
for coll in "$coll_list";
do
if [[ $(echo $coll | tr -d " ") -eq $coll_name ]]; then
return $ASSERT_SUCCESS
fi
done
echo "Expected to find collection named [$coll_name], but could only find: $coll_list"
return $ASSERT_FAILURE
}
function assert_collection_doesnt_exist() {
local coll_name=$1
local coll_list=$(bin/solr zk ls /collections -z localhost:9983)
for coll in "$coll_list";
do
if [[ $(echo $coll | tr -d " ") -eq $coll_name ]]; then
echo "Expected not to find collection [$coll_name], but it exists"
return $ASSERT_FAILURE
fi
done
return $ASSERT_SUCCESS
}
function assert_config_exists() {
local config_name=$1
local config_list=$(bin/solr zk ls /configs -z localhost:9983)
for config in "$config_list";
do
if [[ $(echo $config | tr -d " ") -eq $config_name ]]; then
return $ASSERT_SUCCESS
fi
done
echo "Expected to find config named [$config_name], but could only find: $config_list"
return $ASSERT_FAILURE
}
function assert_config_doesnt_exist() {
local config_name=$1
local config_list=$(bin/solr zk ls /configs -z localhost:9983)
for config in "$config_list";
do
if [[ $(echo $config | tr -d " ") -eq $config_name ]]; then
echo "Expected not to find config [$config_name], but it exists"
return $ASSERT_FAILURE
fi
done
return $ASSERT_SUCCESS
}

View File

@ -0,0 +1,25 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
function delete_all_collections() {
local collection_list="$(bin/solr zk ls /collections -z localhost:9983)"
for collection in $collection_list;
do
if [[ -n $collection ]]; then
bin/solr delete -c $collection
fi
done
}

View File

@ -112,11 +112,31 @@ The example above shows Solr's {solr-javadocs}/solr-core/org/apache/solr/index/S
The merge scheduler controls how merges are performed. The default `ConcurrentMergeScheduler` performs merges in the background using separate threads. The alternative, `SerialMergeScheduler`, does not perform merges with separate threads.
The `ConcurrentMergeScheduler` has two configurable attributes:
`maxMergeCount`::
The maximum number of simultaneous merges that are allowed. If a merge is necessary yet we already have this many threads running, the indexing thread will block until a merge thread has completed. Note that Solr will only run the smallest `maxThreadCount` merges at a time.
`maxThreadCount`::
The maximum number of simultaneous merge threads that should be running at once. This must be less than `maxMergeCount`.
The defaults for the above attributes are dynamically set based on whether the underlying disk drive is rotational disk or not. Refer to the <<taking-solr-to-production.adoc#dynamic-defaults-for-concurrentmergescheduler, Dynamic defaults for ConcurrentMergeScheduler>> section for more details.
.Example: Dynamic defaults
[source,xml]
----
<mergeScheduler class="org.apache.lucene.index.ConcurrentMergeScheduler"/>
----
.Example: Explicit defaults
[source,xml]
----
<mergeScheduler class="org.apache.lucene.index.ConcurrentMergeScheduler">
<int name="maxMergeCount">9</int>
<int name="maxThreadCount">4</int>
</mergeScheduler>
----
=== mergedSegmentWarmer
When using Solr in for <<near-real-time-searching.adoc#near-real-time-searching,Near Real Time Searching>> a merged segment warmer can be configured to warm the reader on the newly merged segment, before the merge commits. This is not required for near real-time search, but will reduce search latency on opening a new near real-time reader after a merge completes.

View File

@ -18,7 +18,7 @@
Solr has support for writing and reading its index and transaction log files to the HDFS distributed filesystem.
This does not use Hadoop MapReduce to process Solr data, rather it only uses the HDFS filesystem for index and transaction log file storage. To use Hadoop MapReduce to process Solr data, see the MapReduceIndexerTool in the Solr contrib area.
This does not use Hadoop MapReduce to process Solr data, rather it only uses the HDFS filesystem for index and transaction log file storage.
To use HDFS rather than a local filesystem, you must be using Hadoop 2.x and you will need to instruct Solr to use the `HdfsDirectoryFactory`. There are also several additional parameters to define. These can be set in one of three ways:
@ -30,10 +30,10 @@ To use HDFS rather than a local filesystem, you must be using Hadoop 2.x and you
=== Standalone Solr Instances
For standalone Solr instances, there are a few parameters you should be sure to modify before starting Solr. These can be set in `solrconfig.xml`(more on that <<HdfsDirectoryFactory Parameters,below>>), or passed to the `bin/solr` script at startup.
For standalone Solr instances, there are a few parameters you should modify before starting Solr. These can be set in `solrconfig.xml` (more on that <<HdfsDirectoryFactory Parameters,below>>), or passed to the `bin/solr` script at startup.
* You need to use an `HdfsDirectoryFactory` and a data dir of the form `hdfs://host:port/path`
* You need to specify an UpdateLog location of the form `hdfs://host:port/path`
* You need to use an `HdfsDirectoryFactory` and a data directory in the form `hdfs://host:port/path`
* You need to specify an `updateLog` location in the form `hdfs://host:port/path`
* You should specify a lock factory type of `'hdfs'` or none.
If you do not modify `solrconfig.xml`, you can instead start Solr on HDFS with the following command:
@ -81,7 +81,7 @@ For example, to set JVM arguments to always use HDFS when running in SolrCloud m
== The Block Cache
For performance, the HdfsDirectoryFactory uses a Directory that will cache HDFS blocks. This caching mechanism is meant to replace the standard file system cache that Solr utilizes so much. By default, this cache is allocated off heap. This cache will often need to be quite large and you may need to raise the off heap memory limit for the specific JVM you are running Solr in. For the Oracle/OpenJDK JVMs, the follow is an example command line parameter that you can use to raise the limit when starting Solr:
For performance, the `HdfsDirectoryFactory` uses a Directory that will cache HDFS blocks. This caching mechanism replaces the standard file system cache that Solr utilizes. By default, this cache is allocated off-heap. This cache will often need to be quite large and you may need to raise the off-heap memory limit for the specific JVM you are running Solr in. For the Oracle/OpenJDK JVMs, the following is an example command-line parameter that you can use to raise the limit when starting Solr:
[source,bash]
----
@ -90,7 +90,7 @@ For performance, the HdfsDirectoryFactory uses a Directory that will cache HDFS
== HdfsDirectoryFactory Parameters
The `HdfsDirectoryFactory` has a number of settings that are defined as part of the `directoryFactory` configuration.
The `HdfsDirectoryFactory` has a number of settings defined as part of the `directoryFactory` configuration.
=== Solr HDFS Settings
@ -176,67 +176,42 @@ If using Kerberos, you will need to add the three Kerberos related properties to
</directoryFactory>
----
// In Solr 8, this should be removed entirely;
// it's here now only for back-compat for existing users
== Automatically Add Replicas in SolrCloud
One benefit to running Solr in HDFS is the ability to automatically add new replicas when the Overseer notices that a shard has gone down. Because the "gone" index shards are stored in HDFS, the a new core will be created and the new core will point to the existing indexes in HDFS.
The ability to automatically add new replicas when the Overseer notices that a shard has gone down was previously only available to users running Solr in HDFS, but it is now available to all users via Solr's autoscaling framework. See the section <<solrcloud-autoscaling-triggers.adoc#auto-add-replicas-trigger,Auto Add Replicas Trigger>> for details on how to enable and disable this feature.
Collections created using `autoAddReplicas=true` on a shared file system have automatic addition of replicas enabled. The following settings can be used to override the defaults in the `<solrcloud>` section of `solr.xml`.
`autoReplicaFailoverWorkLoopDelay`::
The time (in ms) between clusterstate inspections by the Overseer to detect and possibly act on creation of a replacement replica. The default is `10000`.
`autoReplicaFailoverWaitAfterExpiration`::
The minimum time (in ms) to wait for initiating replacement of a replica after first noticing it not being live. This is important to prevent false positives while stoping or starting the cluster. The default is `30000`.
`autoReplicaFailoverBadNodeExpiration`::
The delay (in ms) after which a replica marked as down would be unmarked. The default is `60000`.
=== Temporarily Disable autoAddReplicas for the Entire Cluster
When doing offline maintenance on the cluster and for various other use cases where an admin would like to temporarily disable auto addition of replicas, the following APIs will disable and re-enable autoAddReplicas for *all collections in the cluster*:
Disable automatic addition of replicas cluster-wide by setting the cluster property `autoAddReplicas` to `false`, as in these examples:
[.dynamic-tabs]
--
[example.tab-pane#v1disableautoadd]
[WARNING]
====
[.tab-label]*V1 API*
The ability to enable or disable the autoAddReplicas feature with cluster properties has been deprecated and will be removed in a future version. All users of this feature who have previously used that approach are encouraged to change their configurations to use the autoscaling framework to ensure continued operation of this feature in their Solr installations.
For users using this feature with the deprecated configuration, you can temporarily disable it cluster-wide by setting the cluster property `autoAddReplicas` to `false`, as in these examples:
.V1 API
[source,bash]
----
http://localhost:8983/solr/admin/collections?action=CLUSTERPROP&name=autoAddReplicas&val=false
----
====
[example.tab-pane#v2disableautoadd]
====
[.tab-label]*V2 API*
.V2 API
[source,bash]
----
curl -X POST -H 'Content-type: application/json' -d '{"set-property": {"name":"autoAddReplicas", "val":false}}' http://localhost:8983/api/cluster
----
====
--
Re-enable automatic addition of replicas (for those collections created with `autoAddReplica=true`) by unsetting the `autoAddReplicas` cluster property. When no `val` parameter is provided, the cluster property is unset:
Re-enable the feature by unsetting the `autoAddReplicas` cluster property. When no `val` parameter is provided, the cluster property is unset:
[.dynamic-tabs]
--
[example.tab-pane#v1enableautoadd]
====
[.tab-label]*V1 API*
.V1 API
[source,bash]
----
http://localhost:8983/solr/admin/collections?action=CLUSTERPROP&name=autoAddReplicas
----
====
[example.tab-pane#v2enableautoadd]
====
[.tab-label]*V2 API*
.V2 API
[source,bash]
----
curl -X POST -H 'Content-type: application/json' -d '{"set-property": {"name":"autoAddReplicas"}}' http://localhost:8983/api/cluster
----
====
--

View File

@ -61,7 +61,12 @@ When a collection has the parameter `autoAddReplicas` set to true then a trigger
which are then processed by configured actions (usually resulting in computing and executing a plan
to add replicas on the live nodes to maintain the expected replication factor).
You can see the section <<solrcloud-autoscaling-auto-add-replicas.adoc#solrcloud-autoscaling-auto-add-replicas, Autoscaling Automatically Adding Replicas>> to learn more about how the `.autoAddReplicas` trigger works.
Refer to the section <<solrcloud-autoscaling-auto-add-replicas.adoc#solrcloud-autoscaling-auto-add-replicas, Autoscaling Automatically Adding Replicas>> to learn more about how the `.autoAddReplicas` trigger works.
This trigger supports one parameter:
`autoReplicaFailoverWaitAfterExpiration`::
The minimum time in milliseconds to wait for initiating replacement of a replica after first noticing it not being live. This is important to prevent false positives while stopping or starting the cluster. The default is `120000` (2 minutes).
== Metric Trigger

View File

@ -163,6 +163,20 @@ If the `status` command is not successful, look for error messages in `/var/solr
== Fine-Tune Your Production Setup
=== Dynamic Defaults for ConcurrentMergeScheduler
The Merge Scheduler is configured in `solrconfig.xml` and defaults to `ConcurrentMergeScheduler`. This scheduler uses multiple threads to merge Lucene segments in the background.
By default, the `ConcurrentMergeScheduler` auto-detects whether the underlying disk drive is rotational or a SSD and sets defaults for `maxThreadCount` and `maxMergeCount` accordingly. If the disk drive is determined to be rotational then the `maxThreadCount` is set to 1 and `maxMergeCount` is set to 6. Otherwise, `maxThreadCount` is set to 4 or half the number of processors available to the JVM whichever is greater and `maxMergeCount` is set to `maxThreadCount+5`.
This auto-detection works only on Linux and even then it is not guaranteed to be correct. On all other platforms, the disk is assumed to be rotational. Therefore, if the auto-detection fails or is incorrect then indexing performance can suffer badly due to the wrong defaults.
The auto-detected value is exposed by the <<metrics-reporting.adoc#metrics-api, Metrics API>> with the key `solr.node:CONTAINER.fs.coreRoot.spins`. A value of `true` denotes that the disk is detected to be a rotational or spinning disk.
It is safer to explicitly set values for `maxThreadCount` and `maxMergeCount` in the <<indexconfig-in-solrconfig.adoc#mergescheduler, IndexConfig section of SolrConfig.xml>> so that values appropriate to your hardware are used.
Alternatively, the boolean system property `lucene.cms.override_spins` can be set in the `SOLR_OPTS` variable in the include file to override the auto-detected value. Similarily, the system property `lucene.cms.override_core_count` can be set to the number of CPU cores to override the auto-detected processor count.
=== Memory and GC Settings
By default, the `bin/solr` script sets the maximum Java heap size to 512M (-Xmx512m), which is fine for getting started with Solr. For production, youll want to increase the maximum heap size based on the memory requirements of your search application; values between 10 and 20 gigabytes are not uncommon for production servers. When you need to change the memory settings for your Solr server, use the `SOLR_JAVA_MEM` variable in the include file, such as:

View File

@ -451,6 +451,10 @@ public class ZkStateReader implements Closeable {
});
securityData = getSecurityProps(true);
}
collectionPropsWatches.forEach((k,v) -> {
new PropsWatcher(k).refreshAndWatch(true);
});
}
private void addSecurityNodeWatcher(final Callable<Pair<byte[], Stat>> callback)
@ -1295,20 +1299,6 @@ public class ZkStateReader implements Closeable {
if (reconstructState.get()) {
new StateWatcher(collection).refreshAndWatch();
}
AtomicBoolean addPropsWatch = new AtomicBoolean(false);
collectionPropsWatches.compute(collection, (k, v) -> {
if (v == null) {
addPropsWatch.set(true);
v = new CollectionWatch<>();
}
v.coreRefCount++;
return v;
});
if (addPropsWatch.get()) {
new PropsWatcher(collection).refreshAndWatch(false);
}
}
/**
@ -1341,18 +1331,6 @@ public class ZkStateReader implements Closeable {
constructState(Collections.emptySet());
}
}
collectionPropsWatches.compute(collection, (k, v) -> {
if (v == null)
return null;
if (v.coreRefCount > 0)
v.coreRefCount--;
if (v.canBeRemoved()) {
watchedCollectionProps.remove(collection);
return null;
}
return v;
});
}
/**