mirror of https://github.com/apache/lucene.git
SOLR-11239: A special value of -1 can be specified for 'maxShardsPerNode' to denote that there is no limit. The bin/solr script send maxShardsPerNode=-1 when creating collections. The use of maxShardsPerNode is not supported when a cluster policy is in effect or when a collection specific policy is specified during collection creation
This commit is contained in:
parent
755cffdec8
commit
7a576ffa1b
|
@ -292,6 +292,9 @@ Upgrading from Solr 6.x
|
||||||
|
|
||||||
* SOLR-11023: EnumField has been deprecated in favor of new EnumFieldType.
|
* SOLR-11023: EnumField has been deprecated in favor of new EnumFieldType.
|
||||||
|
|
||||||
|
* SOLR-11239: The use of maxShardsPerNode is not supported when a cluster policy is in effect or
|
||||||
|
when a collection specific policy is specified during collection creation.
|
||||||
|
|
||||||
New Features
|
New Features
|
||||||
----------------------
|
----------------------
|
||||||
* SOLR-9857, SOLR-9858: Collect aggregated metrics from nodes and shard leaders in overseer. (ab)
|
* SOLR-9857, SOLR-9858: Collect aggregated metrics from nodes and shard leaders in overseer. (ab)
|
||||||
|
@ -668,6 +671,12 @@ Other Changes
|
||||||
|
|
||||||
* SOLR-10821: Ref guide documentation for Autoscaling (Noble Paul, Cassandra Targett, shalin)
|
* SOLR-10821: Ref guide documentation for Autoscaling (Noble Paul, Cassandra Targett, shalin)
|
||||||
|
|
||||||
|
* SOLR-11239: A special value of -1 can be specified for 'maxShardsPerNode' to denote that
|
||||||
|
there is no limit. The bin/solr script send maxShardsPerNode=-1 when creating collections. The
|
||||||
|
use of maxShardsPerNode is not supported when a cluster policy is in effect or when a
|
||||||
|
collection specific policy is specified during collection creation.
|
||||||
|
(Noble Paul, shalin)
|
||||||
|
|
||||||
================== 6.7.0 ==================
|
================== 6.7.0 ==================
|
||||||
|
|
||||||
Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.
|
Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.
|
||||||
|
|
|
@ -29,6 +29,7 @@ import java.util.Map;
|
||||||
import java.util.Properties;
|
import java.util.Properties;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
|
import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
|
||||||
import org.apache.solr.cloud.OverseerCollectionMessageHandler.Cmd;
|
import org.apache.solr.cloud.OverseerCollectionMessageHandler.Cmd;
|
||||||
import org.apache.solr.cloud.overseer.ClusterStateMutator;
|
import org.apache.solr.cloud.overseer.ClusterStateMutator;
|
||||||
import org.apache.solr.common.cloud.ReplicaPosition;
|
import org.apache.solr.common.cloud.ReplicaPosition;
|
||||||
|
@ -85,6 +86,7 @@ public class CreateCollectionCmd implements Cmd {
|
||||||
if (clusterState.hasCollection(collectionName)) {
|
if (clusterState.hasCollection(collectionName)) {
|
||||||
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "collection already exists: " + collectionName);
|
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "collection already exists: " + collectionName);
|
||||||
}
|
}
|
||||||
|
boolean usePolicyFramework = usePolicyFramework(ocmh.zkStateReader, message);
|
||||||
|
|
||||||
String configName = getConfigName(collectionName, message);
|
String configName = getConfigName(collectionName, message);
|
||||||
if (configName == null) {
|
if (configName == null) {
|
||||||
|
@ -118,7 +120,10 @@ public class CreateCollectionCmd implements Cmd {
|
||||||
}
|
}
|
||||||
|
|
||||||
int maxShardsPerNode = message.getInt(MAX_SHARDS_PER_NODE, 1);
|
int maxShardsPerNode = message.getInt(MAX_SHARDS_PER_NODE, 1);
|
||||||
|
if (usePolicyFramework && message.getStr(MAX_SHARDS_PER_NODE) != null && maxShardsPerNode > 0) {
|
||||||
|
throw new SolrException(ErrorCode.BAD_REQUEST, "'maxShardsPerNode>0' is not supported when autoScaling policies are used");
|
||||||
|
}
|
||||||
|
if (maxShardsPerNode == -1 || usePolicyFramework) maxShardsPerNode = Integer.MAX_VALUE;
|
||||||
if (numNrtReplicas + numTlogReplicas <= 0) {
|
if (numNrtReplicas + numTlogReplicas <= 0) {
|
||||||
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, NRT_REPLICAS + " + " + TLOG_REPLICAS + " must be greater than 0");
|
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, NRT_REPLICAS + " + " + TLOG_REPLICAS + " must be greater than 0");
|
||||||
}
|
}
|
||||||
|
@ -149,7 +154,9 @@ public class CreateCollectionCmd implements Cmd {
|
||||||
+ "). It's unusual to run two replica of the same slice on the same Solr-instance.");
|
+ "). It's unusual to run two replica of the same slice on the same Solr-instance.");
|
||||||
}
|
}
|
||||||
|
|
||||||
int maxShardsAllowedToCreate = maxShardsPerNode * nodeList.size();
|
int maxShardsAllowedToCreate = maxShardsPerNode == Integer.MAX_VALUE ?
|
||||||
|
Integer.MAX_VALUE :
|
||||||
|
maxShardsPerNode * nodeList.size();
|
||||||
int requestedShardsToCreate = numSlices * totalNumReplicas;
|
int requestedShardsToCreate = numSlices * totalNumReplicas;
|
||||||
if (maxShardsAllowedToCreate < requestedShardsToCreate) {
|
if (maxShardsAllowedToCreate < requestedShardsToCreate) {
|
||||||
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Cannot create collection " + collectionName + ". Value of "
|
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Cannot create collection " + collectionName + ". Value of "
|
||||||
|
@ -484,4 +491,15 @@ public class CreateCollectionCmd implements Cmd {
|
||||||
"Could not find configName for collection " + collection + " found:" + configNames);
|
"Could not find configName for collection " + collection + " found:" + configNames);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static boolean usePolicyFramework(ZkStateReader zkStateReader, ZkNodeProps message) {
|
||||||
|
Map autoScalingJson = Collections.emptyMap();
|
||||||
|
try {
|
||||||
|
autoScalingJson = Utils.getJson(zkStateReader.getZkClient(), SOLR_AUTOSCALING_CONF_PATH, true);
|
||||||
|
} catch (Exception e) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return autoScalingJson.get(Policy.CLUSTER_POLICY) != null || message.getStr(Policy.POLICY) != null;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1501,10 +1501,6 @@ public class SolrCLI {
|
||||||
|
|
||||||
if (cli.hasOption("maxShardsPerNode")) {
|
if (cli.hasOption("maxShardsPerNode")) {
|
||||||
maxShardsPerNode = Integer.parseInt(cli.getOptionValue("maxShardsPerNode"));
|
maxShardsPerNode = Integer.parseInt(cli.getOptionValue("maxShardsPerNode"));
|
||||||
} else {
|
|
||||||
// need number of live nodes to determine maxShardsPerNode if it is not set
|
|
||||||
int numNodes = liveNodes.size();
|
|
||||||
maxShardsPerNode = ((numShards*replicationFactor)+numNodes-1)/numNodes;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
String confname = cli.getOptionValue("confname");
|
String confname = cli.getOptionValue("confname");
|
||||||
|
|
|
@ -291,9 +291,24 @@ public class AutoScalingHandlerTest extends SolrCloudTestCase {
|
||||||
response = solrClient.request(req);
|
response = solrClient.request(req);
|
||||||
assertEquals(response.get("result").toString(), "success");
|
assertEquals(response.get("result").toString(), "success");
|
||||||
|
|
||||||
|
req = createAutoScalingRequest(SolrRequest.METHOD.POST, "{set-cluster-policy : []}");
|
||||||
|
response = solrClient.request(req);
|
||||||
|
assertEquals(response.get("result").toString(), "success");
|
||||||
|
|
||||||
|
// lets create a collection which violates the rule replicas < 2
|
||||||
|
try {
|
||||||
|
CollectionAdminRequest.Create create = CollectionAdminRequest.Create.createCollection("readApiTestViolations", CONFIGSET_NAME, 1, 6);
|
||||||
|
create.setMaxShardsPerNode(10);
|
||||||
|
create.process(solrClient);
|
||||||
|
fail();
|
||||||
|
} catch (Exception e) {
|
||||||
|
assertTrue(e.getMessage().contains("'maxShardsPerNode>0' is not supported when autoScaling policies are used"));
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
// lets create a collection which violates the rule replicas < 2
|
// lets create a collection which violates the rule replicas < 2
|
||||||
CollectionAdminRequest.Create create = CollectionAdminRequest.Create.createCollection("readApiTestViolations", CONFIGSET_NAME, 1, 6);
|
CollectionAdminRequest.Create create = CollectionAdminRequest.Create.createCollection("readApiTestViolations", CONFIGSET_NAME, 1, 6);
|
||||||
create.setMaxShardsPerNode(10);
|
|
||||||
CollectionAdminResponse adminResponse = create.process(solrClient);
|
CollectionAdminResponse adminResponse = create.process(solrClient);
|
||||||
assertTrue(adminResponse.isSuccess());
|
assertTrue(adminResponse.isSuccess());
|
||||||
|
|
||||||
|
@ -302,6 +317,11 @@ public class AutoScalingHandlerTest extends SolrCloudTestCase {
|
||||||
response = solrClient.request(req);
|
response = solrClient.request(req);
|
||||||
assertEquals(response.get("result").toString(), "success");
|
assertEquals(response.get("result").toString(), "success");
|
||||||
|
|
||||||
|
req = createAutoScalingRequest(SolrRequest.METHOD.POST, setClusterPolicyCommand);
|
||||||
|
response = solrClient.request(req);
|
||||||
|
assertEquals(response.get("result").toString(), "success");
|
||||||
|
|
||||||
|
|
||||||
// get the diagnostics output again
|
// get the diagnostics output again
|
||||||
req = createAutoScalingRequest(SolrRequest.METHOD.GET, "/diagnostics", null);
|
req = createAutoScalingRequest(SolrRequest.METHOD.GET, "/diagnostics", null);
|
||||||
response = solrClient.request(req);
|
response = solrClient.request(req);
|
||||||
|
|
|
@ -104,7 +104,6 @@ public class TestPolicyCloud extends SolrCloudTestCase {
|
||||||
String collectionName = "testCreateCollectionSplitShard";
|
String collectionName = "testCreateCollectionSplitShard";
|
||||||
CollectionAdminRequest.createCollection(collectionName, "conf", 1, 2)
|
CollectionAdminRequest.createCollection(collectionName, "conf", 1, 2)
|
||||||
.setPolicy("c1")
|
.setPolicy("c1")
|
||||||
.setMaxShardsPerNode(10)
|
|
||||||
.process(cluster.getSolrClient());
|
.process(cluster.getSolrClient());
|
||||||
|
|
||||||
DocCollection docCollection = getCollectionState(collectionName);
|
DocCollection docCollection = getCollectionState(collectionName);
|
||||||
|
|
|
@ -40,6 +40,7 @@ import org.apache.commons.exec.ExecuteResultHandler;
|
||||||
import org.apache.lucene.util.LuceneTestCase;
|
import org.apache.lucene.util.LuceneTestCase;
|
||||||
import org.apache.solr.SolrTestCaseJ4;
|
import org.apache.solr.SolrTestCaseJ4;
|
||||||
import org.apache.solr.client.solrj.SolrQuery;
|
import org.apache.solr.client.solrj.SolrQuery;
|
||||||
|
import org.apache.solr.client.solrj.SolrRequest;
|
||||||
import org.apache.solr.client.solrj.embedded.JettyConfig;
|
import org.apache.solr.client.solrj.embedded.JettyConfig;
|
||||||
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
||||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||||
|
@ -47,6 +48,7 @@ import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||||
import org.apache.solr.client.solrj.response.QueryResponse;
|
import org.apache.solr.client.solrj.response.QueryResponse;
|
||||||
import org.apache.solr.cloud.MiniSolrCloudCluster;
|
import org.apache.solr.cloud.MiniSolrCloudCluster;
|
||||||
import org.apache.solr.common.SolrInputDocument;
|
import org.apache.solr.common.SolrInputDocument;
|
||||||
|
import org.apache.solr.common.util.NamedList;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
|
@ -54,6 +56,8 @@ import org.junit.Test;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import static org.apache.solr.cloud.autoscaling.AutoScalingHandlerTest.createAutoScalingRequest;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tests the SolrCLI.RunExampleTool implementation that supports bin/solr -e [example]
|
* Tests the SolrCLI.RunExampleTool implementation that supports bin/solr -e [example]
|
||||||
*/
|
*/
|
||||||
|
@ -479,6 +483,115 @@ public class TestSolrCLIRunExample extends SolrTestCaseJ4 {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
File node1SolrHome = new File(solrExampleDir, "cloud/node1/solr");
|
||||||
|
if (!node1SolrHome.isDirectory()) {
|
||||||
|
fail(node1SolrHome.getAbsolutePath() + " not found! run cloud example failed; tool output: " + toolOutput);
|
||||||
|
}
|
||||||
|
|
||||||
|
// delete the collection
|
||||||
|
SolrCLI.DeleteTool deleteTool = new SolrCLI.DeleteTool(stdoutSim);
|
||||||
|
String[] deleteArgs = new String[]{"-name", collectionName, "-solrUrl", solrUrl};
|
||||||
|
deleteTool.runTool(
|
||||||
|
SolrCLI.processCommandLineArgs(SolrCLI.joinCommonAndToolOptions(deleteTool.getOptions()), deleteArgs));
|
||||||
|
|
||||||
|
// dump all the output written by the SolrCLI commands to stdout
|
||||||
|
//System.out.println(toolOutput);
|
||||||
|
|
||||||
|
// stop the test instance
|
||||||
|
executor.execute(org.apache.commons.exec.CommandLine.parse("bin/solr stop -p " + bindPort));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testInteractiveSolrCloudExampleWithAutoScalingPolicy() throws Exception {
|
||||||
|
File solrHomeDir = new File(ExternalPaths.SERVER_HOME);
|
||||||
|
if (!solrHomeDir.isDirectory())
|
||||||
|
fail(solrHomeDir.getAbsolutePath() + " not found and is required to run this test!");
|
||||||
|
|
||||||
|
Path tmpDir = createTempDir();
|
||||||
|
File solrExampleDir = tmpDir.toFile();
|
||||||
|
|
||||||
|
File solrServerDir = solrHomeDir.getParentFile();
|
||||||
|
|
||||||
|
String[] toolArgs = new String[]{
|
||||||
|
"-example", "cloud",
|
||||||
|
"-serverDir", solrServerDir.getAbsolutePath(),
|
||||||
|
"-exampleDir", solrExampleDir.getAbsolutePath()
|
||||||
|
};
|
||||||
|
|
||||||
|
int bindPort = -1;
|
||||||
|
try (ServerSocket socket = new ServerSocket(0)) {
|
||||||
|
bindPort = socket.getLocalPort();
|
||||||
|
}
|
||||||
|
|
||||||
|
String collectionName = "testCloudExamplePrompt1";
|
||||||
|
|
||||||
|
// this test only support launching one SolrCloud node due to how MiniSolrCloudCluster works
|
||||||
|
// and the need for setting the host and port system properties ...
|
||||||
|
String userInput = "1\n" + bindPort + "\n" + collectionName + "\n2\n2\n_default\n";
|
||||||
|
|
||||||
|
// simulate user input from stdin
|
||||||
|
InputStream userInputSim = new ByteArrayInputStream(userInput.getBytes(StandardCharsets.UTF_8));
|
||||||
|
|
||||||
|
// capture tool output to stdout
|
||||||
|
ByteArrayOutputStream baos = new ByteArrayOutputStream();
|
||||||
|
PrintStream stdoutSim = new PrintStream(baos, true, StandardCharsets.UTF_8.name());
|
||||||
|
|
||||||
|
RunExampleExecutor executor = new RunExampleExecutor(stdoutSim);
|
||||||
|
closeables.add(executor);
|
||||||
|
|
||||||
|
SolrCLI.RunExampleTool tool = new SolrCLI.RunExampleTool(executor, userInputSim, stdoutSim);
|
||||||
|
try {
|
||||||
|
tool.runTool(SolrCLI.processCommandLineArgs(SolrCLI.joinCommonAndToolOptions(tool.getOptions()), toolArgs));
|
||||||
|
} catch (Exception e) {
|
||||||
|
System.err.println("RunExampleTool failed due to: " + e +
|
||||||
|
"; stdout from tool prior to failure: " + baos.toString(StandardCharsets.UTF_8.name()));
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
|
|
||||||
|
String toolOutput = baos.toString(StandardCharsets.UTF_8.name());
|
||||||
|
|
||||||
|
// verify Solr is running on the expected port and verify the collection exists
|
||||||
|
String solrUrl = "http://localhost:" + bindPort + "/solr";
|
||||||
|
String collectionListUrl = solrUrl + "/admin/collections?action=list";
|
||||||
|
if (!SolrCLI.safeCheckCollectionExists(collectionListUrl, collectionName)) {
|
||||||
|
fail("After running Solr cloud example, test collection '" + collectionName +
|
||||||
|
"' not found in Solr at: " + solrUrl + "; tool output: " + toolOutput);
|
||||||
|
}
|
||||||
|
|
||||||
|
// index some docs - to verify all is good for both shards
|
||||||
|
CloudSolrClient cloudClient = null;
|
||||||
|
|
||||||
|
try {
|
||||||
|
cloudClient = getCloudSolrClient(executor.solrCloudCluster.getZkServer().getZkAddress());
|
||||||
|
String setClusterPolicyCommand = "{" +
|
||||||
|
" 'set-cluster-policy': [" +
|
||||||
|
" {'cores':'<10', 'node':'#ANY'}," +
|
||||||
|
" {'replica':'<2', 'shard': '#EACH', 'node': '#ANY'}," +
|
||||||
|
" {'nodeRole':'overseer', 'replica':0}" +
|
||||||
|
" ]" +
|
||||||
|
"}";
|
||||||
|
SolrRequest req = createAutoScalingRequest(SolrRequest.METHOD.POST, setClusterPolicyCommand);
|
||||||
|
NamedList<Object> response = cloudClient.request(req);
|
||||||
|
assertEquals(response.get("result").toString(), "success");
|
||||||
|
SolrCLI.CreateCollectionTool createCollectionTool = new SolrCLI.CreateCollectionTool(stdoutSim);
|
||||||
|
String[] createArgs = new String[]{"create_collection", "-name", "newColl", "-configsetsDir", "_default", "-solrUrl", solrUrl};
|
||||||
|
createCollectionTool.runTool(
|
||||||
|
SolrCLI.processCommandLineArgs(SolrCLI.joinCommonAndToolOptions(createCollectionTool.getOptions()), createArgs));
|
||||||
|
solrUrl = "http://localhost:" + bindPort + "/solr";
|
||||||
|
collectionListUrl = solrUrl + "/admin/collections?action=list";
|
||||||
|
if (!SolrCLI.safeCheckCollectionExists(collectionListUrl, "newColl")) {
|
||||||
|
toolOutput = baos.toString(StandardCharsets.UTF_8.name());
|
||||||
|
fail("After running Solr cloud example, test collection 'newColl' not found in Solr at: " + solrUrl + "; tool output: " + toolOutput);
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
if (cloudClient != null) {
|
||||||
|
try {
|
||||||
|
cloudClient.close();
|
||||||
|
} catch (Exception ignore) {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
File node1SolrHome = new File(solrExampleDir, "cloud/node1/solr");
|
File node1SolrHome = new File(solrExampleDir, "cloud/node1/solr");
|
||||||
if (!node1SolrHome.isDirectory()) {
|
if (!node1SolrHome.isDirectory()) {
|
||||||
fail(node1SolrHome.getAbsolutePath()+" not found! run cloud example failed; tool output: "+toolOutput);
|
fail(node1SolrHome.getAbsolutePath()+" not found! run cloud example failed; tool output: "+toolOutput);
|
||||||
|
@ -487,6 +600,10 @@ public class TestSolrCLIRunExample extends SolrTestCaseJ4 {
|
||||||
// delete the collection
|
// delete the collection
|
||||||
SolrCLI.DeleteTool deleteTool = new SolrCLI.DeleteTool(stdoutSim);
|
SolrCLI.DeleteTool deleteTool = new SolrCLI.DeleteTool(stdoutSim);
|
||||||
String[] deleteArgs = new String[] { "-name", collectionName, "-solrUrl", solrUrl };
|
String[] deleteArgs = new String[] { "-name", collectionName, "-solrUrl", solrUrl };
|
||||||
|
deleteTool.runTool(
|
||||||
|
SolrCLI.processCommandLineArgs(SolrCLI.joinCommonAndToolOptions(deleteTool.getOptions()), deleteArgs));
|
||||||
|
deleteTool = new SolrCLI.DeleteTool(stdoutSim);
|
||||||
|
deleteArgs = new String[]{"-name", "newColl", "-solrUrl", solrUrl};
|
||||||
deleteTool.runTool(
|
deleteTool.runTool(
|
||||||
SolrCLI.processCommandLineArgs(SolrCLI.joinCommonAndToolOptions(deleteTool.getOptions()), deleteArgs));
|
SolrCLI.processCommandLineArgs(SolrCLI.joinCommonAndToolOptions(deleteTool.getOptions()), deleteArgs));
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue