SOLR-10272: Use _default config set if config name is not specified with CREATE collection

This commit is contained in:
Ishan Chattopadhyaya 2017-06-27 09:07:05 +05:30
parent 93c96b06fb
commit ee572b052a
39 changed files with 305 additions and 128 deletions

View File

@ -173,6 +173,8 @@ New Features
* SOLR-10574: New _default config set replacing basic_configs and data_driven_schema_configs. * SOLR-10574: New _default config set replacing basic_configs and data_driven_schema_configs.
(Ishan Chattopadhyaya, noble, shalin, hossman, David Smiley, Jan Hoydahl, Alexandre Rafalovich) (Ishan Chattopadhyaya, noble, shalin, hossman, David Smiley, Jan Hoydahl, Alexandre Rafalovich)
* SOLR-10272: Use _default config set if no collection.configName is specified with CREATE (Ishan Chattopadhyaya)
Bug Fixes Bug Fixes
---------------------- ----------------------
* SOLR-9262: Connection and read timeouts are being ignored by UpdateShardHandler after SOLR-4509. * SOLR-9262: Connection and read timeouts are being ignored by UpdateShardHandler after SOLR-4509.

View File

@ -932,14 +932,12 @@ if [[ "$SCRIPT_CMD" == "create" || "$SCRIPT_CMD" == "create_core" || "$SCRIPT_CM
done done
fi fi
if [ -z "$CREATE_CONFDIR" ]; then # validate the confdir arg (if provided)
CREATE_CONFDIR='_default' if ! [ -z "$CREATE_CONFDIR" ]; then
fi if [[ ! -d "$SOLR_TIP/server/solr/configsets/$CREATE_CONFDIR" && ! -d "$CREATE_CONFDIR" ]]; then
echo -e "\nSpecified configuration directory $CREATE_CONFDIR not found!\n"
# validate the confdir arg exit 1
if [[ ! -d "$SOLR_TIP/server/solr/configsets/$CREATE_CONFDIR" && ! -d "$CREATE_CONFDIR" ]]; then fi
echo -e "\nSpecified configuration directory $CREATE_CONFDIR not found!\n"
exit 1
fi fi
if [ -z "$CREATE_NAME" ]; then if [ -z "$CREATE_NAME" ]; then
@ -948,11 +946,6 @@ if [[ "$SCRIPT_CMD" == "create" || "$SCRIPT_CMD" == "create_core" || "$SCRIPT_CM
exit 1 exit 1
fi fi
# If not defined, use the collection name for the name of the configuration in Zookeeper
if [ -z "$CREATE_CONFNAME" ]; then
CREATE_CONFNAME="$CREATE_NAME"
fi
if [ -z "$CREATE_PORT" ]; then if [ -z "$CREATE_PORT" ]; then
for ID in `ps auxww | grep java | grep start\.jar | awk '{print $2}' | sort -r` for ID in `ps auxww | grep java | grep start\.jar | awk '{print $2}' | sort -r`
do do
@ -1661,6 +1654,11 @@ else
fi fi
fi fi
# Set the default configset dir to be bootstrapped as _default
if [ -z "$DEFAULT_CONFDIR" ]; then
DEFAULT_CONFDIR="$SOLR_SERVER_DIR/solr/configsets/_default/conf"
fi
# This is quite hacky, but examples rely on a different log4j.properties # This is quite hacky, but examples rely on a different log4j.properties
# so that we can write logs for examples to $SOLR_HOME/../logs # so that we can write logs for examples to $SOLR_HOME/../logs
if [ -z "$SOLR_LOGS_DIR" ]; then if [ -z "$SOLR_LOGS_DIR" ]; then
@ -1911,7 +1909,7 @@ function launch_solr() {
"-Djetty.port=$SOLR_PORT" "-DSTOP.PORT=$stop_port" "-DSTOP.KEY=$STOP_KEY" \ "-Djetty.port=$SOLR_PORT" "-DSTOP.PORT=$stop_port" "-DSTOP.KEY=$STOP_KEY" \
"${SOLR_HOST_ARG[@]}" "-Duser.timezone=$SOLR_TIMEZONE" \ "${SOLR_HOST_ARG[@]}" "-Duser.timezone=$SOLR_TIMEZONE" \
"-Djetty.home=$SOLR_SERVER_DIR" "-Dsolr.solr.home=$SOLR_HOME" "-Dsolr.data.home=$SOLR_DATA_HOME" "-Dsolr.install.dir=$SOLR_TIP" \ "-Djetty.home=$SOLR_SERVER_DIR" "-Dsolr.solr.home=$SOLR_HOME" "-Dsolr.data.home=$SOLR_DATA_HOME" "-Dsolr.install.dir=$SOLR_TIP" \
"${LOG4J_CONFIG[@]}" "${SOLR_OPTS[@]}") "-Dsolr.default.confdir=$DEFAULT_CONFDIR" "${LOG4J_CONFIG[@]}" "${SOLR_OPTS[@]}")
if [ "$SOLR_MODE" == "solrcloud" ]; then if [ "$SOLR_MODE" == "solrcloud" ]; then
IN_CLOUD_MODE=" in SolrCloud mode" IN_CLOUD_MODE=" in SolrCloud mode"

View File

@ -1212,13 +1212,15 @@ IF "%JAVA_VENDOR%" == "IBM J9" (
set GCLOG_OPT="-Xloggc:!SOLR_LOGS_DIR!\solr_gc.log" -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=9 -XX:GCLogFileSize=20M set GCLOG_OPT="-Xloggc:!SOLR_LOGS_DIR!\solr_gc.log" -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=9 -XX:GCLogFileSize=20M
) )
IF "%DEFAULT_CONFDIR%"=="" set "DEFAULT_CONFDIR=%SOLR_SERVER_DIR%\solr\configsets\_default\conf"
IF "%FG%"=="1" ( IF "%FG%"=="1" (
REM run solr in the foreground REM run solr in the foreground
title "Solr-%SOLR_PORT%" title "Solr-%SOLR_PORT%"
echo %SOLR_PORT%>"%SOLR_TIP%"\bin\solr-%SOLR_PORT%.port echo %SOLR_PORT%>"%SOLR_TIP%"\bin\solr-%SOLR_PORT%.port
"%JAVA%" %SERVEROPT% %SOLR_JAVA_MEM% %START_OPTS% %GCLOG_OPT% ^ "%JAVA%" %SERVEROPT% %SOLR_JAVA_MEM% %START_OPTS% %GCLOG_OPT% ^
-Dlog4j.configuration="%LOG4J_CONFIG%" -DSTOP.PORT=!STOP_PORT! -DSTOP.KEY=%STOP_KEY% ^ -Dlog4j.configuration="%LOG4J_CONFIG%" -DSTOP.PORT=!STOP_PORT! -DSTOP.KEY=%STOP_KEY% ^
-Dsolr.solr.home="%SOLR_HOME%" -Dsolr.install.dir="%SOLR_TIP%" ^ -Dsolr.solr.home="%SOLR_HOME%" -Dsolr.install.dir="%SOLR_TIP%" -Dsolr.default.confdir="%DEFAULT_CONFDIR%" ^
-Djetty.host=%SOLR_JETTY_HOST% -Djetty.port=%SOLR_PORT% -Djetty.home="%SOLR_SERVER_DIR%" ^ -Djetty.host=%SOLR_JETTY_HOST% -Djetty.port=%SOLR_PORT% -Djetty.home="%SOLR_SERVER_DIR%" ^
-Djava.io.tmpdir="%SOLR_SERVER_DIR%\tmp" -jar start.jar "%SOLR_JETTY_CONFIG%" "%SOLR_JETTY_ADDL_CONFIG%" -Djava.io.tmpdir="%SOLR_SERVER_DIR%\tmp" -jar start.jar "%SOLR_JETTY_CONFIG%" "%SOLR_JETTY_ADDL_CONFIG%"
) ELSE ( ) ELSE (
@ -1226,13 +1228,13 @@ IF "%FG%"=="1" (
"%JAVA%" %SERVEROPT% %SOLR_JAVA_MEM% %START_OPTS% %GCLOG_OPT% ^ "%JAVA%" %SERVEROPT% %SOLR_JAVA_MEM% %START_OPTS% %GCLOG_OPT% ^
-Dlog4j.configuration="%LOG4J_CONFIG%" -DSTOP.PORT=!STOP_PORT! -DSTOP.KEY=%STOP_KEY% ^ -Dlog4j.configuration="%LOG4J_CONFIG%" -DSTOP.PORT=!STOP_PORT! -DSTOP.KEY=%STOP_KEY% ^
-Dsolr.log.muteconsole ^ -Dsolr.log.muteconsole ^
-Dsolr.solr.home="%SOLR_HOME%" -Dsolr.install.dir="%SOLR_TIP%" ^ -Dsolr.solr.home="%SOLR_HOME%" -Dsolr.install.dir="%SOLR_TIP%" -Dsolr.default.confdir="%DEFAULT_CONFDIR%" ^
-Djetty.host=%SOLR_JETTY_HOST% -Djetty.port=%SOLR_PORT% -Djetty.home="%SOLR_SERVER_DIR%" ^ -Djetty.host=%SOLR_JETTY_HOST% -Djetty.port=%SOLR_PORT% -Djetty.home="%SOLR_SERVER_DIR%" ^
-Djava.io.tmpdir="%SOLR_SERVER_DIR%\tmp" -jar start.jar "%SOLR_JETTY_CONFIG%" "%SOLR_JETTY_ADDL_CONFIG%" > "!SOLR_LOGS_DIR!\solr-%SOLR_PORT%-console.log" -Djava.io.tmpdir="%SOLR_SERVER_DIR%\tmp" -jar start.jar "%SOLR_JETTY_CONFIG%" "%SOLR_JETTY_ADDL_CONFIG%" > "!SOLR_LOGS_DIR!\solr-%SOLR_PORT%-console.log"
echo %SOLR_PORT%>"%SOLR_TIP%"\bin\solr-%SOLR_PORT%.port echo %SOLR_PORT%>"%SOLR_TIP%"\bin\solr-%SOLR_PORT%.port
REM now wait to see Solr come online ... REM now wait to see Solr come online ...
"%JAVA%" %SOLR_SSL_OPTS% %AUTHC_OPTS% %SOLR_ZK_CREDS_AND_ACLS% -Dsolr.install.dir="%SOLR_TIP%" ^ "%JAVA%" %SOLR_SSL_OPTS% %AUTHC_OPTS% %SOLR_ZK_CREDS_AND_ACLS% -Dsolr.install.dir="%SOLR_TIP%" -Dsolr.default.confdir="%DEFAULT_CONFDIR%"^
-Dlog4j.configuration="file:%DEFAULT_SERVER_DIR%\scripts\cloud-scripts\log4j.properties" ^ -Dlog4j.configuration="file:%DEFAULT_SERVER_DIR%\scripts\cloud-scripts\log4j.properties" ^
-classpath "%DEFAULT_SERVER_DIR%\solr-webapp\webapp\WEB-INF\lib\*;%DEFAULT_SERVER_DIR%\lib\ext\*" ^ -classpath "%DEFAULT_SERVER_DIR%\solr-webapp\webapp\WEB-INF\lib\*;%DEFAULT_SERVER_DIR%\lib\ext\*" ^
org.apache.solr.util.SolrCLI status -maxWaitSecs 30 -solr !SOLR_URL_SCHEME!://%SOLR_TOOL_HOST%:%SOLR_PORT%/solr org.apache.solr.util.SolrCLI status -maxWaitSecs 30 -solr !SOLR_URL_SCHEME!://%SOLR_TOOL_HOST%:%SOLR_PORT%/solr
@ -1402,10 +1404,8 @@ IF "!CREATE_NAME!"=="" (
set "SCRIPT_ERROR=Name (-c) is a required parameter for %SCRIPT_CMD%" set "SCRIPT_ERROR=Name (-c) is a required parameter for %SCRIPT_CMD%"
goto invalid_cmd_line goto invalid_cmd_line
) )
IF "!CREATE_CONFDIR!"=="" set CREATE_CONFDIR=_default
IF "!CREATE_NUM_SHARDS!"=="" set CREATE_NUM_SHARDS=1 IF "!CREATE_NUM_SHARDS!"=="" set CREATE_NUM_SHARDS=1
IF "!CREATE_REPFACT!"=="" set CREATE_REPFACT=1 IF "!CREATE_REPFACT!"=="" set CREATE_REPFACT=1
IF "!CREATE_CONFNAME!"=="" set CREATE_CONFNAME=!CREATE_NAME!
REM Find a port that Solr is running on REM Find a port that Solr is running on
if "!CREATE_PORT!"=="" ( if "!CREATE_PORT!"=="" (
@ -1431,7 +1431,7 @@ if "%SCRIPT_CMD%"=="create_core" (
org.apache.solr.util.SolrCLI create_core -name !CREATE_NAME! -solrUrl !SOLR_URL_SCHEME!://%SOLR_TOOL_HOST%:!CREATE_PORT!/solr ^ org.apache.solr.util.SolrCLI create_core -name !CREATE_NAME! -solrUrl !SOLR_URL_SCHEME!://%SOLR_TOOL_HOST%:!CREATE_PORT!/solr ^
-confdir !CREATE_CONFDIR! -configsetsDir "%SOLR_TIP%\server\solr\configsets" -confdir !CREATE_CONFDIR! -configsetsDir "%SOLR_TIP%\server\solr\configsets"
) else ( ) else (
"%JAVA%" %SOLR_SSL_OPTS% %AUTHC_OPTS% %SOLR_ZK_CREDS_AND_ACLS% -Dsolr.install.dir="%SOLR_TIP%" ^ + "%JAVA%" %SOLR_SSL_OPTS% %AUTHC_OPTS% %SOLR_ZK_CREDS_AND_ACLS% -Dsolr.install.dir="%SOLR_TIP%" -Dsolr.default.confdir="%DEFAULT_CONFDIR%"^
-Dlog4j.configuration="file:%DEFAULT_SERVER_DIR%\scripts\cloud-scripts\log4j.properties" ^ -Dlog4j.configuration="file:%DEFAULT_SERVER_DIR%\scripts\cloud-scripts\log4j.properties" ^
-classpath "%DEFAULT_SERVER_DIR%\solr-webapp\webapp\WEB-INF\lib\*;%DEFAULT_SERVER_DIR%\lib\ext\*" ^ -classpath "%DEFAULT_SERVER_DIR%\solr-webapp\webapp\WEB-INF\lib\*;%DEFAULT_SERVER_DIR%\lib\ext\*" ^
org.apache.solr.util.SolrCLI create -name !CREATE_NAME! -shards !CREATE_NUM_SHARDS! -replicationFactor !CREATE_REPFACT! ^ org.apache.solr.util.SolrCLI create -name !CREATE_NAME! -shards !CREATE_NUM_SHARDS! -replicationFactor !CREATE_REPFACT! ^

View File

@ -22,6 +22,7 @@ import java.lang.invoke.MethodHandles;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap; import java.util.LinkedHashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -47,6 +48,7 @@ import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.NamedList; import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap; import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.common.util.Utils; import org.apache.solr.common.util.Utils;
import org.apache.solr.handler.admin.ConfigSetsHandlerApi;
import org.apache.solr.handler.component.ShardHandler; import org.apache.solr.handler.component.ShardHandler;
import org.apache.solr.handler.component.ShardRequest; import org.apache.solr.handler.component.ShardRequest;
import org.apache.solr.util.TimeOut; import org.apache.solr.util.TimeOut;
@ -296,20 +298,49 @@ public class CreateCollectionCmd implements Cmd {
List<String> configNames = null; List<String> configNames = null;
try { try {
configNames = ocmh.zkStateReader.getZkClient().getChildren(ZkConfigManager.CONFIGS_ZKNODE, null, true); configNames = ocmh.zkStateReader.getZkClient().getChildren(ZkConfigManager.CONFIGS_ZKNODE, null, true);
if (configNames != null && configNames.size() == 1) { if (configNames.contains(ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME)) {
if (!".system".equals(coll)) {
copyDefaultConfigSetTo(configNames, coll);
}
return coll;
} else if (configNames != null && configNames.size() == 1) {
configName = configNames.get(0); configName = configNames.get(0);
// no config set named, but there is only 1 - use it // no config set named, but there is only 1 - use it
log.info("Only one config set found in zk - using it:" + configName); log.info("Only one config set found in zk - using it:" + configName);
} else if (configNames.contains(coll)) {
configName = coll;
} }
} catch (KeeperException.NoNodeException e) { } catch (KeeperException.NoNodeException e) {
} }
} }
return configName; return "".equals(configName)? null: configName;
} }
/**
* Copies the _default configset to the specified configset name (overwrites if pre-existing)
*/
private void copyDefaultConfigSetTo(List<String> configNames, String targetConfig) {
ZkConfigManager configManager = new ZkConfigManager(ocmh.zkStateReader.getZkClient());
// if a configset named coll exists, delete the configset so that _default can be copied over
if (configNames.contains(targetConfig)) {
log.info("There exists a configset by the same name as the collection we're trying to create: " + targetConfig +
", deleting it so that we can copy the _default configs over and create the collection.");
try {
configManager.deleteConfigDir(targetConfig);
} catch (Exception e) {
throw new SolrException(ErrorCode.INVALID_STATE, "Error while deleting configset: " + targetConfig, e);
}
} else {
log.info("Only _default config set found, using it.");
}
// Copy _default into targetConfig
try {
configManager.copyConfigDir(ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME, targetConfig, new HashSet<>());
} catch (Exception e) {
throw new SolrException(ErrorCode.INVALID_STATE, "Error while copying _default to " + targetConfig, e);
}
}
public static void createCollectionZkNode(SolrZkClient zkClient, String collection, Map<String,String> params) { public static void createCollectionZkNode(SolrZkClient zkClient, String collection, Map<String,String> params) {
log.debug("Check for collection zkNode:" + collection); log.debug("Check for collection zkNode:" + collection);
String collectionPath = ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection; String collectionPath = ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection;
@ -384,14 +415,14 @@ public class CreateCollectionCmd implements Cmd {
} }
private static void getConfName(SolrZkClient zkClient, String collection, String collectionPath, Map<String,Object> collectionProps) throws KeeperException, private static void getConfName(SolrZkClient zkClient, String collection, String collectionPath, Map<String,Object> collectionProps) throws KeeperException,
InterruptedException { InterruptedException {
// check for configName // check for configName
log.debug("Looking for collection configName"); log.debug("Looking for collection configName");
if (collectionProps.containsKey("configName")) { if (collectionProps.containsKey("configName")) {
log.info("configName was passed as a param {}", collectionProps.get("configName")); log.info("configName was passed as a param {}", collectionProps.get("configName"));
return; return;
} }
List<String> configNames = null; List<String> configNames = null;
int retry = 1; int retry = 1;
int retryLimt = 6; int retryLimt = 6;
@ -403,26 +434,34 @@ public class CreateCollectionCmd implements Cmd {
} }
} }
// if there is only one conf, use that
try { try {
configNames = zkClient.getChildren(ZkConfigManager.CONFIGS_ZKNODE, null, configNames = zkClient.getChildren(ZkConfigManager.CONFIGS_ZKNODE, null,
true); true);
} catch (NoNodeException e) { } catch (NoNodeException e) {
// just keep trying // just keep trying
} }
if (configNames != null && configNames.size() == 1) {
// no config set named, but there is only 1 - use it
log.info("Only one config set found in zk - using it:" + configNames.get(0));
collectionProps.put(ZkController.CONFIGNAME_PROP, configNames.get(0));
break;
}
// check if there's a config set with the same name as the collection
if (configNames != null && configNames.contains(collection)) { if (configNames != null && configNames.contains(collection)) {
log.info( log.info(
"Could not find explicit collection configName, but found config name matching collection name - using that set."); "Could not find explicit collection configName, but found config name matching collection name - using that set.");
collectionProps.put(ZkController.CONFIGNAME_PROP, collection); collectionProps.put(ZkController.CONFIGNAME_PROP, collection);
break; break;
} }
// if _default exists, use that
if (configNames != null && configNames.contains(ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME)) {
log.info(
"Could not find explicit collection configName, but found _default config set - using that set.");
collectionProps.put(ZkController.CONFIGNAME_PROP, ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME);
break;
}
// if there is only one conf, use that
if (configNames != null && configNames.size() == 1) {
// no config set named, but there is only 1 - use it
log.info("Only one config set found in zk - using it:" + configNames.get(0));
collectionProps.put(ZkController.CONFIGNAME_PROP, configNames.get(0));
break;
}
log.info("Could not find collection configName - pausing for 3 seconds and trying again - try: " + retry); log.info("Could not find collection configName - pausing for 3 seconds and trying again - try: " + retry);
Thread.sleep(3000); Thread.sleep(3000);

View File

@ -16,15 +16,19 @@
*/ */
package org.apache.solr.cloud; package org.apache.solr.cloud;
import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.io.UnsupportedEncodingException; import java.io.UnsupportedEncodingException;
import java.lang.invoke.MethodHandles; import java.lang.invoke.MethodHandles;
import java.net.InetAddress; import java.net.InetAddress;
import java.net.NetworkInterface; import java.net.NetworkInterface;
import java.net.URISyntaxException;
import java.net.URL;
import java.net.URLEncoder; import java.net.URLEncoder;
import java.net.UnknownHostException; import java.net.UnknownHostException;
import java.nio.charset.StandardCharsets; import java.nio.charset.StandardCharsets;
import java.nio.file.Path; import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
@ -70,6 +74,7 @@ import org.apache.solr.common.cloud.ZkCmdExecutor;
import org.apache.solr.common.cloud.ZkConfigManager; import org.apache.solr.common.cloud.ZkConfigManager;
import org.apache.solr.common.cloud.ZkCoreNodeProps; import org.apache.solr.common.cloud.ZkCoreNodeProps;
import org.apache.solr.common.cloud.ZkCredentialsProvider; import org.apache.solr.common.cloud.ZkCredentialsProvider;
import org.apache.solr.common.cloud.ZkMaintenanceUtils;
import org.apache.solr.common.cloud.ZkNodeProps; import org.apache.solr.common.cloud.ZkNodeProps;
import org.apache.solr.common.cloud.ZkStateReader; import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.cloud.ZooKeeperException; import org.apache.solr.common.cloud.ZooKeeperException;
@ -87,7 +92,9 @@ import org.apache.solr.core.CoreContainer;
import org.apache.solr.core.CoreDescriptor; import org.apache.solr.core.CoreDescriptor;
import org.apache.solr.core.SolrCore; import org.apache.solr.core.SolrCore;
import org.apache.solr.core.SolrCoreInitializationException; import org.apache.solr.core.SolrCoreInitializationException;
import org.apache.solr.handler.admin.ConfigSetsHandlerApi;
import org.apache.solr.logging.MDCLoggingContext; import org.apache.solr.logging.MDCLoggingContext;
import org.apache.solr.servlet.SolrDispatchFilter;
import org.apache.solr.update.UpdateLog; import org.apache.solr.update.UpdateLog;
import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException;
@ -660,7 +667,7 @@ public class ZkController {
* @throws KeeperException if there is a Zookeeper error * @throws KeeperException if there is a Zookeeper error
* @throws InterruptedException on interrupt * @throws InterruptedException on interrupt
*/ */
public static void createClusterZkNodes(SolrZkClient zkClient) throws KeeperException, InterruptedException { public static void createClusterZkNodes(SolrZkClient zkClient) throws KeeperException, InterruptedException, IOException {
ZkCmdExecutor cmdExecutor = new ZkCmdExecutor(zkClient.getZkClientTimeout()); ZkCmdExecutor cmdExecutor = new ZkCmdExecutor(zkClient.getZkClientTimeout());
cmdExecutor.ensureExists(ZkStateReader.LIVE_NODES_ZKNODE, zkClient); cmdExecutor.ensureExists(ZkStateReader.LIVE_NODES_ZKNODE, zkClient);
cmdExecutor.ensureExists(ZkStateReader.COLLECTIONS_ZKNODE, zkClient); cmdExecutor.ensureExists(ZkStateReader.COLLECTIONS_ZKNODE, zkClient);
@ -669,6 +676,48 @@ public class ZkController {
cmdExecutor.ensureExists(ZkStateReader.CLUSTER_STATE, emptyJson, CreateMode.PERSISTENT, zkClient); cmdExecutor.ensureExists(ZkStateReader.CLUSTER_STATE, emptyJson, CreateMode.PERSISTENT, zkClient);
cmdExecutor.ensureExists(ZkStateReader.SOLR_SECURITY_CONF_PATH, emptyJson, CreateMode.PERSISTENT, zkClient); cmdExecutor.ensureExists(ZkStateReader.SOLR_SECURITY_CONF_PATH, emptyJson, CreateMode.PERSISTENT, zkClient);
cmdExecutor.ensureExists(ZkStateReader.SOLR_AUTOSCALING_CONF_PATH, emptyJson, CreateMode.PERSISTENT, zkClient); cmdExecutor.ensureExists(ZkStateReader.SOLR_AUTOSCALING_CONF_PATH, emptyJson, CreateMode.PERSISTENT, zkClient);
bootstrapDefaultConfigSet(zkClient);
}
private static void bootstrapDefaultConfigSet(SolrZkClient zkClient) throws KeeperException, InterruptedException, IOException {
if (zkClient.exists("/configs/_default", true) == false) {
String configDirPath = getDefaultConfigDirPath();
if (configDirPath == null) {
log.warn("The _default configset could not be uploaded. Please provide 'solr.default.confdir' parameter that points to a configset" +
" intended to be the default. Current 'solr.default.confdir' value: {}", System.getProperty(SolrDispatchFilter.SOLR_DEFAULT_CONFDIR_ATTRIBUTE));
} else {
ZkMaintenanceUtils.upConfig(zkClient, Paths.get(configDirPath), ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME);
}
}
}
/**
* Gets the absolute filesystem path of the _default configset to bootstrap from.
* First tries the sysprop "solr.default.confdir". If not found, tries to find
* the _default dir relative to the sysprop "solr.install.dir".
* If that fails as well, tries to get the _default from the
* classpath. Returns null if not found anywhere.
*/
private static String getDefaultConfigDirPath() {
String configDirPath = null;
String serverSubPath = "solr" + File.separator +
"configsets" + File.separator + "_default" +
File.separator + "conf";
String subPath = File.separator + "server" + File.separator + serverSubPath;
if (System.getProperty(SolrDispatchFilter.SOLR_DEFAULT_CONFDIR_ATTRIBUTE) != null && new File(System.getProperty(SolrDispatchFilter.SOLR_DEFAULT_CONFDIR_ATTRIBUTE)).exists()) {
configDirPath = new File(System.getProperty(SolrDispatchFilter.SOLR_DEFAULT_CONFDIR_ATTRIBUTE)).getAbsolutePath();
} else if (System.getProperty(SolrDispatchFilter.SOLR_INSTALL_DIR_ATTRIBUTE) != null &&
new File(System.getProperty(SolrDispatchFilter.SOLR_INSTALL_DIR_ATTRIBUTE) + subPath).exists()) {
configDirPath = new File(System.getProperty(SolrDispatchFilter.SOLR_INSTALL_DIR_ATTRIBUTE) + subPath).getAbsolutePath();
} else { // find "_default" in the classpath. This one is used for tests
URL classpathUrl = Thread.currentThread().getContextClassLoader().getResource(serverSubPath);
try {
if (classpathUrl != null && new File(classpathUrl.toURI()).exists()) {
configDirPath = new File(classpathUrl.toURI()).getAbsolutePath();
}
} catch (URISyntaxException ex) {}
}
return configDirPath;
} }
private void init(CurrentCoreDescriptorProvider registerOnReconnect) { private void init(CurrentCoreDescriptorProvider registerOnReconnect) {

View File

@ -31,6 +31,8 @@ import org.apache.solr.response.SolrQueryResponse;
public class ConfigSetsHandlerApi extends BaseHandlerApiSupport { public class ConfigSetsHandlerApi extends BaseHandlerApiSupport {
final public static String DEFAULT_CONFIGSET_NAME = "_default";
final ConfigSetsHandler configSetHandler; final ConfigSetsHandler configSetHandler;
static Collection<ApiCommand> apiCommands = createMapping(); static Collection<ApiCommand> apiCommands = createMapping();

View File

@ -133,6 +133,10 @@ public class SolrDispatchFilter extends BaseSolrFilter {
public static final String SOLRHOME_ATTRIBUTE = "solr.solr.home"; public static final String SOLRHOME_ATTRIBUTE = "solr.solr.home";
public static final String SOLR_INSTALL_DIR_ATTRIBUTE = "solr.install.dir";
public static final String SOLR_DEFAULT_CONFDIR_ATTRIBUTE = "solr.default.confdir";
public static final String SOLR_LOG_MUTECONSOLE = "solr.log.muteconsole"; public static final String SOLR_LOG_MUTECONSOLE = "solr.log.muteconsole";
public static final String SOLR_LOG_LEVEL = "solr.log.level"; public static final String SOLR_LOG_LEVEL = "solr.log.level";
@ -223,7 +227,7 @@ public class SolrDispatchFilter extends BaseSolrFilter {
private void logWelcomeBanner() { private void logWelcomeBanner() {
log.info(" ___ _ Welcome to Apache Solr™ version {}", solrVersion()); log.info(" ___ _ Welcome to Apache Solr™ version {}", solrVersion());
log.info("/ __| ___| |_ _ Starting in {} mode on port {}", isCloudMode() ? "cloud" : "standalone", getSolrPort()); log.info("/ __| ___| |_ _ Starting in {} mode on port {}", isCloudMode() ? "cloud" : "standalone", getSolrPort());
log.info("\\__ \\/ _ \\ | '_| Install dir: {}", System.getProperty("solr.install.dir")); log.info("\\__ \\/ _ \\ | '_| Install dir: {}, Default config dir: {}", System.getProperty(SOLR_INSTALL_DIR_ATTRIBUTE), System.getProperty(SOLR_DEFAULT_CONFDIR_ATTRIBUTE));
log.info("|___/\\___/_|_| Start time: {}", Instant.now().toString()); log.info("|___/\\___/_|_| Start time: {}", Instant.now().toString());
} }

View File

@ -1503,17 +1503,23 @@ public class SolrCLI {
maxShardsPerNode = ((numShards*replicationFactor)+numNodes-1)/numNodes; maxShardsPerNode = ((numShards*replicationFactor)+numNodes-1)/numNodes;
} }
String confname = cli.getOptionValue("confname", collectionName); String confname = cli.getOptionValue("confname");
boolean configExistsInZk = String confdir = cli.getOptionValue("confdir");
String configsetsDir = cli.getOptionValue("configsetsDir");
boolean configExistsInZk = confname != null && !"".equals(confname.trim()) &&
cloudSolrClient.getZkStateReader().getZkClient().exists("/configs/" + confname, true); cloudSolrClient.getZkStateReader().getZkClient().exists("/configs/" + confname, true);
if (".system".equals(collectionName)) { if (".system".equals(collectionName)) {
//do nothing //do nothing
} else if (configExistsInZk) { } else if (configExistsInZk) {
echo("Re-using existing configuration directory "+confname); echo("Re-using existing configuration directory "+confname);
} else { } else if (confdir != null && !"".equals(confdir.trim())){
Path confPath = ZkConfigManager.getConfigsetPath(cli.getOptionValue("confdir", DEFAULT_CONFIG_SET), if (confname == null || "".equals(confname.trim())) {
cli.getOptionValue("configsetsDir")); confname = collectionName;
}
Path confPath = ZkConfigManager.getConfigsetPath(confdir,
configsetsDir);
echo("Uploading " + confPath.toAbsolutePath().toString() + echo("Uploading " + confPath.toAbsolutePath().toString() +
" for config " + confname + " to ZooKeeper at " + cloudSolrClient.getZkHost()); " for config " + confname + " to ZooKeeper at " + cloudSolrClient.getZkHost());
@ -1531,13 +1537,15 @@ public class SolrCLI {
// doesn't seem to exist ... try to create // doesn't seem to exist ... try to create
String createCollectionUrl = String createCollectionUrl =
String.format(Locale.ROOT, String.format(Locale.ROOT,
"%s/admin/collections?action=CREATE&name=%s&numShards=%d&replicationFactor=%d&maxShardsPerNode=%d&collection.configName=%s", "%s/admin/collections?action=CREATE&name=%s&numShards=%d&replicationFactor=%d&maxShardsPerNode=%d",
baseUrl, baseUrl,
collectionName, collectionName,
numShards, numShards,
replicationFactor, replicationFactor,
maxShardsPerNode, maxShardsPerNode);
confname); if (confname != null && !"".equals(confname.trim())) {
createCollectionUrl = createCollectionUrl + String.format(Locale.ROOT, "&collection.configName=%s", confname);
}
echo("\nCreating new collection '"+collectionName+"' using command:\n"+createCollectionUrl+"\n"); echo("\nCreating new collection '"+collectionName+"' using command:\n"+createCollectionUrl+"\n");

View File

@ -0,0 +1,37 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<schema name="default-config" version="1.6">
<field name="id" type="string" indexed="true" stored="true" required="true" multiValued="false" />
<field name="_version_" type="long" indexed="false" stored="false"/>
<field name="_root_" type="string" indexed="true" stored="false" docValues="false" />
<field name="_text_" type="text_general" indexed="true" stored="false" multiValued="true"/>
<fieldType name="string" class="solr.StrField" sortMissingLast="true" docValues="true" />
<fieldType name="long" class="solr.TrieLongField" docValues="true" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="text_general" class="solr.TextField" positionIncrementGap="100" multiValued="true">
<analyzer type="index">
<tokenizer class="solr.StandardTokenizerFactory"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
<analyzer type="query">
<tokenizer class="solr.StandardTokenizerFactory"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
</fieldType>
</schema>

View File

@ -0,0 +1,32 @@
<?xml version="1.0" ?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<config>
<directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}"/>
<luceneMatchVersion>${tests.luceneMatchVersion:LATEST}</luceneMatchVersion>
<requestHandler name="my_error_handler" class="solr.ThrowErrorOnInitRequestHandler">
<str name="error">This is the _default configset, which is designed to throw error upon collection creation.</str>
</requestHandler>
<schemaFactory class="ClassicIndexSchemaFactory"/>
</config>

View File

@ -432,7 +432,7 @@ public class BaseCdcrDistributedZkTest extends AbstractDistribZkTestBase {
REPLICATION_FACTOR, replicationFactor, REPLICATION_FACTOR, replicationFactor,
CREATE_NODE_SET, createNodeSetStr, CREATE_NODE_SET, createNodeSetStr,
MAX_SHARDS_PER_NODE, maxShardsPerNode), MAX_SHARDS_PER_NODE, maxShardsPerNode),
client, null); client, "conf1");
} }
private CollectionAdminResponse createCollection(Map<String, List<Integer>> collectionInfos, String collectionName, private CollectionAdminResponse createCollection(Map<String, List<Integer>> collectionInfos, String collectionName,
@ -588,7 +588,7 @@ public class BaseCdcrDistributedZkTest extends AbstractDistribZkTestBase {
try (SolrClient client = createCloudClient(temporaryCollection)) { try (SolrClient client = createCloudClient(temporaryCollection)) {
assertEquals(0, CollectionAdminRequest assertEquals(0, CollectionAdminRequest
.createCollection(temporaryCollection, shardCount, 1) .createCollection(temporaryCollection, "conf1", shardCount, 1)
.setCreateNodeSet("") .setCreateNodeSet("")
.process(client).getStatus()); .process(client).getStatus());
for (int i = 0; i < jettys.size(); i++) { for (int i = 0; i < jettys.size(); i++) {

View File

@ -157,7 +157,7 @@ public class BasicDistributedZk2Test extends AbstractFullDistribZkTestBase {
private void testNodeWithoutCollectionForwarding() throws Exception { private void testNodeWithoutCollectionForwarding() throws Exception {
assertEquals(0, CollectionAdminRequest assertEquals(0, CollectionAdminRequest
.createCollection(ONE_NODE_COLLECTION, 1, 1) .createCollection(ONE_NODE_COLLECTION, "conf1", 1, 1)
.setCreateNodeSet("") .setCreateNodeSet("")
.process(cloudClient).getStatus()); .process(cloudClient).getStatus());
assertTrue(CollectionAdminRequest assertTrue(CollectionAdminRequest

View File

@ -576,7 +576,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
protected void createCores(final HttpSolrClient client, protected void createCores(final HttpSolrClient client,
ThreadPoolExecutor executor, final String collection, final int numShards, int cnt) { ThreadPoolExecutor executor, final String collection, final int numShards, int cnt) {
try { try {
assertEquals(0, CollectionAdminRequest.createCollection(collection, numShards, 1) assertEquals(0, CollectionAdminRequest.createCollection(collection, "conf1", numShards, 1)
.setCreateNodeSet("") .setCreateNodeSet("")
.process(client).getStatus()); .process(client).getStatus());
} catch (SolrServerException | IOException e) { } catch (SolrServerException | IOException e) {
@ -614,8 +614,9 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
return url2; return url2;
} }
@Override
protected CollectionAdminResponse createCollection(Map<String, List<Integer>> collectionInfos, protected CollectionAdminResponse createCollection(Map<String, List<Integer>> collectionInfos,
String collectionName, int numShards, int numReplicas, int maxShardsPerNode, SolrClient client, String createNodeSetStr) throws SolrServerException, IOException { String collectionName, String configSetName, int numShards, int numReplicas, int maxShardsPerNode, SolrClient client, String createNodeSetStr) throws SolrServerException, IOException {
// TODO: Use CollectionAdminRequest for this test // TODO: Use CollectionAdminRequest for this test
ModifiableSolrParams params = new ModifiableSolrParams(); ModifiableSolrParams params = new ModifiableSolrParams();
params.set("action", CollectionAction.CREATE.toString()); params.set("action", CollectionAction.CREATE.toString());
@ -633,6 +634,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
collectionInfos.put(collectionName, list); collectionInfos.put(collectionName, list);
} }
params.set("name", collectionName); params.set("name", collectionName);
params.set("collection.configName", configSetName);
SolrRequest request = new QueryRequest(params); SolrRequest request = new QueryRequest(params);
request.setPath("/admin/collections"); request.setPath("/admin/collections");
@ -795,7 +797,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
private void testANewCollectionInOneInstanceWithManualShardAssignement() throws Exception { private void testANewCollectionInOneInstanceWithManualShardAssignement() throws Exception {
log.info("### STARTING testANewCollectionInOneInstanceWithManualShardAssignement"); log.info("### STARTING testANewCollectionInOneInstanceWithManualShardAssignement");
assertEquals(0, CollectionAdminRequest.createCollection(oneInstanceCollection2, 2, 2) assertEquals(0, CollectionAdminRequest.createCollection(oneInstanceCollection2, "conf1", 2, 2)
.setCreateNodeSet("") .setCreateNodeSet("")
.setMaxShardsPerNode(4) .setMaxShardsPerNode(4)
.process(cloudClient).getStatus()); .process(cloudClient).getStatus());
@ -921,7 +923,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
private void testANewCollectionInOneInstance() throws Exception { private void testANewCollectionInOneInstance() throws Exception {
log.info("### STARTING testANewCollectionInOneInstance"); log.info("### STARTING testANewCollectionInOneInstance");
CollectionAdminResponse response = CollectionAdminRequest.createCollection(oneInstanceCollection, 2, 2) CollectionAdminResponse response = CollectionAdminRequest.createCollection(oneInstanceCollection, "conf1", 2, 2)
.setCreateNodeSet(jettys.get(0).getNodeName()) .setCreateNodeSet(jettys.get(0).getNodeName())
.setMaxShardsPerNode(4) .setMaxShardsPerNode(4)
.process(cloudClient); .process(cloudClient);
@ -1087,7 +1089,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
private void createNewCollection(final String collection) throws InterruptedException { private void createNewCollection(final String collection) throws InterruptedException {
try { try {
assertEquals(0, CollectionAdminRequest assertEquals(0, CollectionAdminRequest
.createCollection(collection, 2, 1) .createCollection(collection, "conf1", 2, 1)
.setCreateNodeSet("") .setCreateNodeSet("")
.process(cloudClient).getStatus()); .process(cloudClient).getStatus());
} catch (Exception e) { } catch (Exception e) {

View File

@ -61,7 +61,7 @@ public class ClusterStateUpdateTest extends SolrCloudTestCase {
public void testCoreRegistration() throws Exception { public void testCoreRegistration() throws Exception {
System.setProperty("solrcloud.update.delay", "1"); System.setProperty("solrcloud.update.delay", "1");
assertEquals(0, CollectionAdminRequest.createCollection("testcore", 1,1) assertEquals(0, CollectionAdminRequest.createCollection("testcore", "conf", 1, 1)
.setCreateNodeSet(cluster.getJettySolrRunner(0).getNodeName()) .setCreateNodeSet(cluster.getJettySolrRunner(0).getNodeName())
.process(cluster.getSolrClient()).getStatus()); .process(cluster.getSolrClient()).getStatus());

View File

@ -55,6 +55,25 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
.configure(); .configure();
} }
/**
* When a config name is not specified during collection creation, the _default should
* be used.
*/
@Test
public void testCreateWithDefaultConfigSet() throws Exception {
String collectionName = "solrj_default_configset";
CollectionAdminResponse response = CollectionAdminRequest.createCollection(collectionName, 2, 2) // no configset specified
.process(cluster.getSolrClient());
// The _default configset (for the tests) is designed to error out upon collection creation,
// so we just ensure that the correct error message was obtained.
assertFalse(response.isSuccess());
System.out.println("Errors are: "+response.getErrorMessages());
assertTrue(response.getErrorMessages() != null && response.getErrorMessages().size() > 0);
assertTrue(response.getErrorMessages().getVal(0).contains("This is the _default configset, which is designed"
+ " to throw error upon collection creation."));
}
@Test @Test
public void testCreateAndDeleteCollection() throws Exception { public void testCreateAndDeleteCollection() throws Exception {
String collectionName = "solrj_test"; String collectionName = "solrj_test";

View File

@ -80,7 +80,7 @@ public class ForceLeaderTest extends HttpPartitionTest {
handle.put("timestamp", SKIPVAL); handle.put("timestamp", SKIPVAL);
String testCollectionName = "forceleader_test_collection"; String testCollectionName = "forceleader_test_collection";
createCollection(testCollectionName, 1, 3, 1); createCollection(testCollectionName, "conf1", 1, 3, 1);
cloudClient.setDefaultCollection(testCollectionName); cloudClient.setDefaultCollection(testCollectionName);
try { try {
@ -166,7 +166,7 @@ public class ForceLeaderTest extends HttpPartitionTest {
handle.put("timestamp", SKIPVAL); handle.put("timestamp", SKIPVAL);
String testCollectionName = "forceleader_last_published"; String testCollectionName = "forceleader_last_published";
createCollection(testCollectionName, 1, 3, 1); createCollection(testCollectionName, "conf1", 1, 3, 1);
cloudClient.setDefaultCollection(testCollectionName); cloudClient.setDefaultCollection(testCollectionName);
log.info("Collection created: " + testCollectionName); log.info("Collection created: " + testCollectionName);

View File

@ -152,7 +152,7 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase {
protected void testLeaderInitiatedRecoveryCRUD() throws Exception { protected void testLeaderInitiatedRecoveryCRUD() throws Exception {
String testCollectionName = "c8n_crud_1x2"; String testCollectionName = "c8n_crud_1x2";
String shardId = "shard1"; String shardId = "shard1";
createCollectionRetry(testCollectionName, 1, 2, 1); createCollectionRetry(testCollectionName, "conf1", 1, 2, 1);
cloudClient.setDefaultCollection(testCollectionName); cloudClient.setDefaultCollection(testCollectionName);
Replica leader = cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, shardId); Replica leader = cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, shardId);
@ -204,7 +204,7 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase {
protected void testMinRf() throws Exception { protected void testMinRf() throws Exception {
// create a collection that has 1 shard and 3 replicas // create a collection that has 1 shard and 3 replicas
String testCollectionName = "collMinRf_1x3"; String testCollectionName = "collMinRf_1x3";
createCollection(testCollectionName, 1, 3, 1); createCollection(testCollectionName, "conf1", 1, 3, 1);
cloudClient.setDefaultCollection(testCollectionName); cloudClient.setDefaultCollection(testCollectionName);
sendDoc(1, 2); sendDoc(1, 2);
@ -290,7 +290,7 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase {
protected void testRf2() throws Exception { protected void testRf2() throws Exception {
// create a collection that has 1 shard but 2 replicas // create a collection that has 1 shard but 2 replicas
String testCollectionName = "c8n_1x2"; String testCollectionName = "c8n_1x2";
createCollectionRetry(testCollectionName, 1, 2, 1); createCollectionRetry(testCollectionName, "conf1", 1, 2, 1);
cloudClient.setDefaultCollection(testCollectionName); cloudClient.setDefaultCollection(testCollectionName);
sendDoc(1); sendDoc(1);
@ -386,7 +386,7 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase {
protected void testRf3() throws Exception { protected void testRf3() throws Exception {
// create a collection that has 1 shard but 2 replicas // create a collection that has 1 shard but 2 replicas
String testCollectionName = "c8n_1x3"; String testCollectionName = "c8n_1x3";
createCollectionRetry(testCollectionName, 1, 3, 1); createCollectionRetry(testCollectionName, "conf1", 1, 3, 1);
cloudClient.setDefaultCollection(testCollectionName); cloudClient.setDefaultCollection(testCollectionName);
@ -437,7 +437,7 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase {
protected void testLeaderZkSessionLoss() throws Exception { protected void testLeaderZkSessionLoss() throws Exception {
String testCollectionName = "c8n_1x2_leader_session_loss"; String testCollectionName = "c8n_1x2_leader_session_loss";
createCollectionRetry(testCollectionName, 1, 2, 1); createCollectionRetry(testCollectionName, "conf1", 1, 2, 1);
cloudClient.setDefaultCollection(testCollectionName); cloudClient.setDefaultCollection(testCollectionName);
sendDoc(1); sendDoc(1);

View File

@ -61,7 +61,7 @@ public class LeaderFailoverAfterPartitionTest extends HttpPartitionTest {
// kill the leader ... see what happens // kill the leader ... see what happens
// create a collection that has 1 shard but 3 replicas // create a collection that has 1 shard but 3 replicas
String testCollectionName = "c8n_1x3_lf"; // _lf is leader fails String testCollectionName = "c8n_1x3_lf"; // _lf is leader fails
createCollection(testCollectionName, 1, 3, 1); createCollection(testCollectionName, "conf1", 1, 3, 1);
cloudClient.setDefaultCollection(testCollectionName); cloudClient.setDefaultCollection(testCollectionName);
sendDoc(1); sendDoc(1);

View File

@ -62,7 +62,7 @@ public class LeaderInitiatedRecoveryOnCommitTest extends BasicDistributedZkTest
// create a collection that has 2 shard and 2 replicas // create a collection that has 2 shard and 2 replicas
String testCollectionName = "c8n_2x2_commits"; String testCollectionName = "c8n_2x2_commits";
createCollection(testCollectionName, 2, 2, 1); createCollection(testCollectionName, "conf1", 2, 2, 1);
cloudClient.setDefaultCollection(testCollectionName); cloudClient.setDefaultCollection(testCollectionName);
List<Replica> notLeaders = List<Replica> notLeaders =
@ -105,7 +105,7 @@ public class LeaderInitiatedRecoveryOnCommitTest extends BasicDistributedZkTest
// create a collection that has 1 shard and 3 replicas // create a collection that has 1 shard and 3 replicas
String testCollectionName = "c8n_1x3_commits"; String testCollectionName = "c8n_1x3_commits";
createCollection(testCollectionName, 1, 3, 1); createCollection(testCollectionName, "conf1", 1, 3, 1);
cloudClient.setDefaultCollection(testCollectionName); cloudClient.setDefaultCollection(testCollectionName);
List<Replica> notLeaders = List<Replica> notLeaders =

View File

@ -86,7 +86,7 @@ public class LeaderInitiatedRecoveryOnShardRestartTest extends AbstractFullDistr
String testCollectionName = "all_in_lir"; String testCollectionName = "all_in_lir";
String shardId = "shard1"; String shardId = "shard1";
createCollection(testCollectionName, 1, 3, 1); createCollection(testCollectionName, "conf1", 1, 3, 1);
waitForRecoveriesToFinish(testCollectionName, false); waitForRecoveriesToFinish(testCollectionName, false);

View File

@ -98,12 +98,12 @@ public class ReplicationFactorTest extends AbstractFullDistribZkTestBase {
String shardId = "shard1"; String shardId = "shard1";
int minRf = 2; int minRf = 2;
CollectionAdminResponse resp = createCollection(testCollectionName, numShards, replicationFactor, maxShardsPerNode); CollectionAdminResponse resp = createCollection(testCollectionName, "conf1", numShards, replicationFactor, maxShardsPerNode);
if (resp.getResponse().get("failure") != null) { if (resp.getResponse().get("failure") != null) {
CollectionAdminRequest.deleteCollection(testCollectionName).process(cloudClient); CollectionAdminRequest.deleteCollection(testCollectionName).process(cloudClient);
resp = createCollection(testCollectionName, numShards, replicationFactor, maxShardsPerNode); resp = createCollection(testCollectionName, "conf1", numShards, replicationFactor, maxShardsPerNode);
if (resp.getResponse().get("failure") != null) { if (resp.getResponse().get("failure") != null) {
fail("Could not create " + testCollectionName); fail("Could not create " + testCollectionName);
@ -184,7 +184,7 @@ public class ReplicationFactorTest extends AbstractFullDistribZkTestBase {
String shardId = "shard1"; String shardId = "shard1";
int minRf = 2; int minRf = 2;
createCollection(testCollectionName, numShards, replicationFactor, maxShardsPerNode); createCollection(testCollectionName, "conf1", numShards, replicationFactor, maxShardsPerNode);
cloudClient.setDefaultCollection(testCollectionName); cloudClient.setDefaultCollection(testCollectionName);
List<Replica> replicas = List<Replica> replicas =

View File

@ -63,7 +63,7 @@ public class ShardRoutingCustomTest extends AbstractFullDistribZkTestBase {
setupJettySolrHome(jettyDir); setupJettySolrHome(jettyDir);
JettySolrRunner j = createJetty(jettyDir, createTempDir().toFile().getAbsolutePath(), "shardA", "solrconfig.xml", null); JettySolrRunner j = createJetty(jettyDir, createTempDir().toFile().getAbsolutePath(), "shardA", "solrconfig.xml", null);
assertEquals(0, CollectionAdminRequest assertEquals(0, CollectionAdminRequest
.createCollection(DEFAULT_COLLECTION, 1, 1) .createCollection(DEFAULT_COLLECTION, "conf1", 1, 1)
.setStateFormat(Integer.parseInt(getStateFormat())) .setStateFormat(Integer.parseInt(getStateFormat()))
.setCreateNodeSet("") .setCreateNodeSet("")
.process(cloudClient).getStatus()); .process(cloudClient).getStatus());

View File

@ -523,7 +523,7 @@ public class ShardSplitTest extends BasicDistributedZkTest {
log.info("Starting testSplitShardWithRule"); log.info("Starting testSplitShardWithRule");
String collectionName = "shardSplitWithRule"; String collectionName = "shardSplitWithRule";
CollectionAdminRequest.Create createRequest = CollectionAdminRequest.createCollection(collectionName,1,2) CollectionAdminRequest.Create createRequest = CollectionAdminRequest.createCollection(collectionName, "conf1", 1, 2)
.setRule("shard:*,replica:<2,node:*"); .setRule("shard:*,replica:<2,node:*");
CollectionAdminResponse response = createRequest.process(cloudClient); CollectionAdminResponse response = createRequest.process(cloudClient);
assertEquals(0, response.getStatus()); assertEquals(0, response.getStatus());

View File

@ -695,7 +695,7 @@ public class TestConfigSetsAPI extends SolrTestCaseJ4 {
ConfigSetAdminRequest.List list = new ConfigSetAdminRequest.List(); ConfigSetAdminRequest.List list = new ConfigSetAdminRequest.List();
ConfigSetAdminResponse.List response = list.process(solrClient); ConfigSetAdminResponse.List response = list.process(solrClient);
Collection<String> actualConfigSets = response.getConfigSets(); Collection<String> actualConfigSets = response.getConfigSets();
assertEquals(0, actualConfigSets.size()); assertEquals(1, actualConfigSets.size()); // only the _default configset
// test multiple // test multiple
Set<String> configSets = new HashSet<String>(); Set<String> configSets = new HashSet<String>();
@ -706,8 +706,8 @@ public class TestConfigSetsAPI extends SolrTestCaseJ4 {
} }
response = list.process(solrClient); response = list.process(solrClient);
actualConfigSets = response.getConfigSets(); actualConfigSets = response.getConfigSets();
assertEquals(configSets.size(), actualConfigSets.size()); assertEquals(configSets.size() + 1, actualConfigSets.size());
assertTrue(configSets.containsAll(actualConfigSets)); assertTrue(actualConfigSets.containsAll(configSets));
} finally { } finally {
zkClient.close(); zkClient.close();
} }

View File

@ -63,7 +63,7 @@ public class TestOnReconnectListenerSupport extends AbstractFullDistribZkTestBas
String testCollectionName = "c8n_onreconnect_1x1"; String testCollectionName = "c8n_onreconnect_1x1";
String shardId = "shard1"; String shardId = "shard1";
createCollectionRetry(testCollectionName, 1, 1, 1); createCollectionRetry(testCollectionName, "conf1", 1, 1, 1);
cloudClient.setDefaultCollection(testCollectionName); cloudClient.setDefaultCollection(testCollectionName);
Replica leader = getShardLeader(testCollectionName, shardId, 30 /* timeout secs */); Replica leader = getShardLeader(testCollectionName, shardId, 30 /* timeout secs */);

View File

@ -130,9 +130,9 @@ public class TestPullReplica extends SolrCloudTestCase {
break; break;
case 1: case 1:
// Sometimes use v1 API // Sometimes use v1 API
String url = String.format(Locale.ROOT, "%s/admin/collections?action=CREATE&name=%s&numShards=%s&pullReplicas=%s&maxShardsPerNode=%s", String url = String.format(Locale.ROOT, "%s/admin/collections?action=CREATE&name=%s&collection.configName=%s&numShards=%s&pullReplicas=%s&maxShardsPerNode=%s",
cluster.getRandomJetty(random()).getBaseUrl(), cluster.getRandomJetty(random()).getBaseUrl(),
collectionName, collectionName, "conf",
2, // numShards 2, // numShards
3, // pullReplicas 3, // pullReplicas
100); // maxShardsPerNode 100); // maxShardsPerNode
@ -143,8 +143,8 @@ public class TestPullReplica extends SolrCloudTestCase {
case 2: case 2:
// Sometimes use V2 API // Sometimes use V2 API
url = cluster.getRandomJetty(random()).getBaseUrl().toString() + "/____v2/c"; url = cluster.getRandomJetty(random()).getBaseUrl().toString() + "/____v2/c";
String requestBody = String.format(Locale.ROOT, "{create:{name:%s, numShards:%s, pullReplicas:%s, maxShardsPerNode:%s %s}}", String requestBody = String.format(Locale.ROOT, "{create:{name:%s, config:%s, numShards:%s, pullReplicas:%s, maxShardsPerNode:%s %s}}",
collectionName, collectionName, "conf",
2, // numShards 2, // numShards
3, // pullReplicas 3, // pullReplicas
100, // maxShardsPerNode 100, // maxShardsPerNode

View File

@ -73,11 +73,11 @@ public class TestRandomRequestDistribution extends AbstractFullDistribZkTestBase
*/ */
private void testRequestTracking() throws Exception { private void testRequestTracking() throws Exception {
CollectionAdminRequest.createCollection("a1x2",1,2) CollectionAdminRequest.createCollection("a1x2", "conf1", 1, 2)
.setCreateNodeSet(nodeNames.get(0) + ',' + nodeNames.get(1)) .setCreateNodeSet(nodeNames.get(0) + ',' + nodeNames.get(1))
.process(cloudClient); .process(cloudClient);
CollectionAdminRequest.createCollection("b1x1",1,1) CollectionAdminRequest.createCollection("b1x1", "conf1", 1, 1)
.setCreateNodeSet(nodeNames.get(2)) .setCreateNodeSet(nodeNames.get(2))
.process(cloudClient); .process(cloudClient);
@ -128,7 +128,7 @@ public class TestRandomRequestDistribution extends AbstractFullDistribZkTestBase
private void testQueryAgainstDownReplica() throws Exception { private void testQueryAgainstDownReplica() throws Exception {
log.info("Creating collection 'football' with 1 shard and 2 replicas"); log.info("Creating collection 'football' with 1 shard and 2 replicas");
CollectionAdminRequest.createCollection("football",1,2) CollectionAdminRequest.createCollection("football", "conf1", 1, 2)
.setCreateNodeSet(nodeNames.get(0) + ',' + nodeNames.get(1)) .setCreateNodeSet(nodeNames.get(0) + ',' + nodeNames.get(1))
.process(cloudClient); .process(cloudClient);

View File

@ -174,7 +174,7 @@ public class TestSolrCloudWithKerberosAlt extends LuceneTestCase {
String configName = "solrCloudCollectionConfig"; String configName = "solrCloudCollectionConfig";
miniCluster.uploadConfigSet(SolrTestCaseJ4.TEST_PATH().resolve("collection1/conf"), configName); miniCluster.uploadConfigSet(SolrTestCaseJ4.TEST_PATH().resolve("collection1/conf"), configName);
CollectionAdminRequest.Create createRequest = CollectionAdminRequest.createCollection(collectionName,NUM_SHARDS,REPLICATION_FACTOR); CollectionAdminRequest.Create createRequest = CollectionAdminRequest.createCollection(collectionName, configName, NUM_SHARDS,REPLICATION_FACTOR);
Properties properties = new Properties(); Properties properties = new Properties();
properties.put(CoreDescriptor.CORE_CONFIG, "solrconfig-tlog.xml"); properties.put(CoreDescriptor.CORE_CONFIG, "solrconfig-tlog.xml");
properties.put("solr.tests.maxBufferedDocs", "100000"); properties.put("solr.tests.maxBufferedDocs", "100000");

View File

@ -157,9 +157,9 @@ public class TestTlogReplica extends SolrCloudTestCase {
break; break;
case 1: case 1:
// Sometimes don't use SolrJ // Sometimes don't use SolrJ
String url = String.format(Locale.ROOT, "%s/admin/collections?action=CREATE&name=%s&numShards=%s&tlogReplicas=%s&maxShardsPerNode=%s", String url = String.format(Locale.ROOT, "%s/admin/collections?action=CREATE&name=%s&collection.configName=%s&numShards=%s&tlogReplicas=%s&maxShardsPerNode=%s",
cluster.getRandomJetty(random()).getBaseUrl(), cluster.getRandomJetty(random()).getBaseUrl(),
collectionName, collectionName, "conf",
2, // numShards 2, // numShards
4, // tlogReplicas 4, // tlogReplicas
100); // maxShardsPerNode 100); // maxShardsPerNode
@ -170,8 +170,8 @@ public class TestTlogReplica extends SolrCloudTestCase {
case 2: case 2:
// Sometimes use V2 API // Sometimes use V2 API
url = cluster.getRandomJetty(random()).getBaseUrl().toString() + "/____v2/c"; url = cluster.getRandomJetty(random()).getBaseUrl().toString() + "/____v2/c";
String requestBody = String.format(Locale.ROOT, "{create:{name:%s, numShards:%s, tlogReplicas:%s, maxShardsPerNode:%s}}", String requestBody = String.format(Locale.ROOT, "{create:{name:%s, config:%s, numShards:%s, tlogReplicas:%s, maxShardsPerNode:%s}}",
collectionName, collectionName, "conf",
2, // numShards 2, // numShards
4, // tlogReplicas 4, // tlogReplicas
100); // maxShardsPerNode 100); // maxShardsPerNode

View File

@ -109,7 +109,7 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
final String coreName1 = collection+"_1"; final String coreName1 = collection+"_1";
final String coreName2 = collection+"_2"; final String coreName2 = collection+"_2";
assertEquals(0, CollectionAdminRequest.createCollection(collection, numShards, 1) assertEquals(0, CollectionAdminRequest.createCollection(collection, "conf1", numShards, 1)
.setCreateNodeSet("") .setCreateNodeSet("")
.process(cloudClient).getStatus()); .process(cloudClient).getStatus());
assertTrue(CollectionAdminRequest.addReplicaToShard(collection, "shard1") assertTrue(CollectionAdminRequest.addReplicaToShard(collection, "shard1")
@ -168,7 +168,7 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
JettySolrRunner jetty1 = jettys.get(0); JettySolrRunner jetty1 = jettys.get(0);
assertEquals(0, CollectionAdminRequest assertEquals(0, CollectionAdminRequest
.createCollection("unloadcollection", 1,1) .createCollection("unloadcollection", "conf1", 1,1)
.setCreateNodeSet(jetty1.getNodeName()) .setCreateNodeSet(jetty1.getNodeName())
.process(cloudClient).getStatus()); .process(cloudClient).getStatus());
ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader(); ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader();

View File

@ -49,10 +49,11 @@ import static org.apache.solr.common.cloud.ZkStateReader.SOLR_AUTOSCALING_CONF_P
* Test for AutoScalingHandler * Test for AutoScalingHandler
*/ */
public class AutoScalingHandlerTest extends SolrCloudTestCase { public class AutoScalingHandlerTest extends SolrCloudTestCase {
final static String CONFIGSET_NAME = "conf";
@BeforeClass @BeforeClass
public static void setupCluster() throws Exception { public static void setupCluster() throws Exception {
configureCluster(2) configureCluster(2)
.addConfig("conf", configset("cloud-minimal")) .addConfig(CONFIGSET_NAME, configset("cloud-minimal"))
.configure(); .configure();
} }
@ -275,7 +276,7 @@ public class AutoScalingHandlerTest extends SolrCloudTestCase {
assertEquals(0, violations.size()); assertEquals(0, violations.size());
// lets create a collection which violates the rule replicas < 2 // lets create a collection which violates the rule replicas < 2
CollectionAdminRequest.Create create = CollectionAdminRequest.Create.createCollection("readApiTestViolations", 1, 6); CollectionAdminRequest.Create create = CollectionAdminRequest.Create.createCollection("readApiTestViolations", CONFIGSET_NAME, 1, 6);
create.setMaxShardsPerNode(10); create.setMaxShardsPerNode(10);
CollectionAdminResponse adminResponse = create.process(solrClient); CollectionAdminResponse adminResponse = create.process(solrClient);
assertTrue(adminResponse.isSuccess()); assertTrue(adminResponse.isSuccess());

View File

@ -73,7 +73,7 @@ public class TestPolicyCloud extends SolrCloudTestCase {
cluster.getSolrClient().request(AutoScalingHandlerTest.createAutoScalingRequest(SolrRequest.METHOD.POST, commands)); cluster.getSolrClient().request(AutoScalingHandlerTest.createAutoScalingRequest(SolrRequest.METHOD.POST, commands));
String collectionName = "testCreateCollectionAddReplica"; String collectionName = "testCreateCollectionAddReplica";
CollectionAdminRequest.createCollection(collectionName, 1, 1) CollectionAdminRequest.createCollection(collectionName, "conf", 1, 1)
.setPolicy("c1") .setPolicy("c1")
.process(cluster.getSolrClient()); .process(cluster.getSolrClient());
@ -102,7 +102,7 @@ public class TestPolicyCloud extends SolrCloudTestCase {
assertEquals("success", response.get("result")); assertEquals("success", response.get("result"));
String collectionName = "testCreateCollectionSplitShard"; String collectionName = "testCreateCollectionSplitShard";
CollectionAdminRequest.createCollection(collectionName, 1, 2) CollectionAdminRequest.createCollection(collectionName, "conf", 1, 2)
.setPolicy("c1") .setPolicy("c1")
.setMaxShardsPerNode(10) .setMaxShardsPerNode(10)
.process(cluster.getSolrClient()); .process(cluster.getSolrClient());
@ -140,7 +140,7 @@ public class TestPolicyCloud extends SolrCloudTestCase {
Map<String, Object> json = Utils.getJson(cluster.getZkClient(), ZkStateReader.SOLR_AUTOSCALING_CONF_PATH, true); Map<String, Object> json = Utils.getJson(cluster.getZkClient(), ZkStateReader.SOLR_AUTOSCALING_CONF_PATH, true);
assertEquals("full json:"+ Utils.toJSONString(json) , "#EACH", assertEquals("full json:"+ Utils.toJSONString(json) , "#EACH",
Utils.getObjectByPath(json, true, "/policies/c1[0]/shard")); Utils.getObjectByPath(json, true, "/policies/c1[0]/shard"));
CollectionAdminRequest.createCollectionWithImplicitRouter("policiesTest", null, "s1,s2", 1) CollectionAdminRequest.createCollectionWithImplicitRouter("policiesTest", "conf", "s1,s2", 1)
.setPolicy("c1") .setPolicy("c1")
.process(cluster.getSolrClient()); .process(cluster.getSolrClient());

View File

@ -66,7 +66,7 @@ public class HdfsNNFailoverTest extends BasicDistributedZkTest {
@Test @Test
public void test() throws Exception { public void test() throws Exception {
createCollection(COLLECTION, 1, 1, 1); createCollection(COLLECTION, "conf1", 1, 1, 1);
waitForRecoveriesToFinish(COLLECTION, false); waitForRecoveriesToFinish(COLLECTION, false);

View File

@ -95,7 +95,7 @@ public class HdfsWriteToMultipleCollectionsTest extends BasicDistributedZkTest {
int docCount = random().nextInt(1313) + 1; int docCount = random().nextInt(1313) + 1;
int cnt = random().nextInt(4) + 1; int cnt = random().nextInt(4) + 1;
for (int i = 0; i < cnt; i++) { for (int i = 0; i < cnt; i++) {
createCollection(ACOLLECTION + i, 2, 2, 9); createCollection(ACOLLECTION + i, "conf1", 2, 2, 9);
} }
for (int i = 0; i < cnt; i++) { for (int i = 0; i < cnt; i++) {
waitForRecoveriesToFinish(ACOLLECTION + i, false); waitForRecoveriesToFinish(ACOLLECTION + i, false);

View File

@ -107,7 +107,7 @@ public class StressHdfsTest extends BasicDistributedZkTest {
Timer timer = new Timer(); Timer timer = new Timer();
try { try {
createCollection(DELETE_DATA_DIR_COLLECTION, 1, 1, 1); createCollection(DELETE_DATA_DIR_COLLECTION, "conf1", 1, 1, 1);
waitForRecoveriesToFinish(DELETE_DATA_DIR_COLLECTION, false); waitForRecoveriesToFinish(DELETE_DATA_DIR_COLLECTION, false);
@ -154,7 +154,7 @@ public class StressHdfsTest extends BasicDistributedZkTest {
if (nShards == 0) nShards = 1; if (nShards == 0) nShards = 1;
} }
createCollection(DELETE_DATA_DIR_COLLECTION, nShards, rep, maxReplicasPerNode); createCollection(DELETE_DATA_DIR_COLLECTION, "conf1", nShards, rep, maxReplicasPerNode);
waitForRecoveriesToFinish(DELETE_DATA_DIR_COLLECTION, false); waitForRecoveriesToFinish(DELETE_DATA_DIR_COLLECTION, false);

View File

@ -42,6 +42,10 @@ public class ThrowErrorOnInitRequestHandler extends RequestHandlerBase
@Override @Override
public void init(NamedList args) { public void init(NamedList args) {
String errorMessage = (String) args.get("error");
if (errorMessage != null) {
throw new Error(errorMessage);
}
throw new Error("Doing my job, throwing a java.lang.Error"); throw new Error("Doing my job, throwing a java.lang.Error");
} }
} }

View File

@ -65,7 +65,7 @@ public class TestTrackingShardHandlerFactory extends AbstractFullDistribZkTestBa
assertSame(trackingQueue, trackingShardHandlerFactory.getTrackingQueue()); assertSame(trackingQueue, trackingShardHandlerFactory.getTrackingQueue());
} }
createCollection(collectionName, 2, 1, 1); createCollection(collectionName, "conf1", 2, 1, 1);
waitForRecoveriesToFinish(collectionName, true); waitForRecoveriesToFinish(collectionName, true);

View File

@ -1,22 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Common classes for autoscaling parsing filtering nodes and sorting
*/
package org.apache.solr.cloud.autoscaling;

View File

@ -312,7 +312,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
+ "/control/data") : null); + "/control/data") : null);
try (SolrClient client = createCloudClient("control_collection")) { try (SolrClient client = createCloudClient("control_collection")) {
assertEquals(0, CollectionAdminRequest assertEquals(0, CollectionAdminRequest
.createCollection("control_collection", 1, 1) .createCollection("control_collection", "conf1", 1, 1)
.setCreateNodeSet(controlJetty.getNodeName()) .setCreateNodeSet(controlJetty.getNodeName())
.process(client).getStatus()); .process(client).getStatus());
} }
@ -380,7 +380,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
StringBuilder sb = new StringBuilder(); StringBuilder sb = new StringBuilder();
assertEquals(0, CollectionAdminRequest assertEquals(0, CollectionAdminRequest
.createCollection(DEFAULT_COLLECTION, sliceCount, 1) .createCollection(DEFAULT_COLLECTION, "conf1", sliceCount, 1)
.setStateFormat(Integer.parseInt(getStateFormat())) .setStateFormat(Integer.parseInt(getStateFormat()))
.setCreateNodeSet("") .setCreateNodeSet("")
.process(cloudClient).getStatus()); .process(cloudClient).getStatus());
@ -1600,12 +1600,12 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
super.destroyServers(); super.destroyServers();
} }
protected CollectionAdminResponse createCollection(String collectionName, int numShards, int replicationFactor, int maxShardsPerNode) throws SolrServerException, IOException { protected CollectionAdminResponse createCollection(String collectionName, String configSetName, int numShards, int replicationFactor, int maxShardsPerNode) throws SolrServerException, IOException {
return createCollection(null, collectionName, numShards, replicationFactor, maxShardsPerNode, null, null); return createCollection(null, collectionName, configSetName, numShards, replicationFactor, maxShardsPerNode, null, null);
} }
protected CollectionAdminResponse createCollection(Map<String,List<Integer>> collectionInfos, String collectionName, Map<String,Object> collectionProps, SolrClient client) throws SolrServerException, IOException{ protected CollectionAdminResponse createCollection(Map<String,List<Integer>> collectionInfos, String collectionName, Map<String,Object> collectionProps, SolrClient client) throws SolrServerException, IOException{
return createCollection(collectionInfos, collectionName, collectionProps, client, null); return createCollection(collectionInfos, collectionName, collectionProps, client, "conf1");
} }
// TODO: Use CollectionAdminRequest#createCollection() instead of a raw request // TODO: Use CollectionAdminRequest#createCollection() instead of a raw request
@ -1640,6 +1640,8 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
} }
if (confSetName != null) { if (confSetName != null) {
params.set("collection.configName", confSetName); params.set("collection.configName", confSetName);
} else {
params.set("collection.configName", "conf1");
} }
int clientIndex = random().nextInt(2); int clientIndex = random().nextInt(2);
@ -1671,7 +1673,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
protected CollectionAdminResponse createCollection(Map<String,List<Integer>> collectionInfos, protected CollectionAdminResponse createCollection(Map<String,List<Integer>> collectionInfos,
String collectionName, int numShards, int replicationFactor, int maxShardsPerNode, SolrClient client, String createNodeSetStr) throws SolrServerException, IOException { String collectionName, String configSetName, int numShards, int replicationFactor, int maxShardsPerNode, SolrClient client, String createNodeSetStr) throws SolrServerException, IOException {
int numNrtReplicas = useTlogReplicas()?0:replicationFactor; int numNrtReplicas = useTlogReplicas()?0:replicationFactor;
int numTlogReplicas = useTlogReplicas()?replicationFactor:0; int numTlogReplicas = useTlogReplicas()?replicationFactor:0;
@ -1683,7 +1685,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
ZkStateReader.PULL_REPLICAS, getPullReplicaCount(), ZkStateReader.PULL_REPLICAS, getPullReplicaCount(),
CREATE_NODE_SET, createNodeSetStr, CREATE_NODE_SET, createNodeSetStr,
ZkStateReader.MAX_SHARDS_PER_NODE, maxShardsPerNode), ZkStateReader.MAX_SHARDS_PER_NODE, maxShardsPerNode),
client); client, configSetName);
} }
protected CollectionAdminResponse createCollection(Map<String, List<Integer>> collectionInfos, protected CollectionAdminResponse createCollection(Map<String, List<Integer>> collectionInfos,
@ -1893,14 +1895,14 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
createCollection(collectionInfos, collName, props, client); createCollection(collectionInfos, collName, props, client);
} }
protected void createCollectionRetry(String testCollectionName, int numShards, int replicationFactor, int maxShardsPerNode) protected void createCollectionRetry(String testCollectionName, String configSetName, int numShards, int replicationFactor, int maxShardsPerNode)
throws SolrServerException, IOException { throws SolrServerException, IOException {
CollectionAdminResponse resp = createCollection(testCollectionName, numShards, replicationFactor, maxShardsPerNode); CollectionAdminResponse resp = createCollection(testCollectionName, configSetName, numShards, replicationFactor, maxShardsPerNode);
if (resp.getResponse().get("failure") != null) { if (resp.getResponse().get("failure") != null) {
CollectionAdminRequest.Delete req = CollectionAdminRequest.deleteCollection(testCollectionName); CollectionAdminRequest.Delete req = CollectionAdminRequest.deleteCollection(testCollectionName);
req.process(cloudClient); req.process(cloudClient);
resp = createCollection(testCollectionName, numShards, replicationFactor, maxShardsPerNode); resp = createCollection(testCollectionName, configSetName, numShards, replicationFactor, maxShardsPerNode);
if (resp.getResponse().get("failure") != null) { if (resp.getResponse().get("failure") != null) {
fail("Could not create " + testCollectionName); fail("Could not create " + testCollectionName);