SOLR-12847: Remove support for maxShardsPerNode.

This commit is contained in:
Andrzej Bialecki 2020-07-08 17:57:24 +02:00
parent fc5887181b
commit cf742f4596
168 changed files with 227 additions and 784 deletions

View File

@ -8,6 +8,13 @@ https://github.com/apache/lucene-solr/blob/master/solr/solr-ref-guide/src/solr-u
Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.
Upgrade Notes
---------------------
* SOLR-12847: maxShardsPerNode parameter has been removed because it was broken and
inconsistent with other replica placement strategies. Other relevant placement strategies
should be used instead, such as autoscaling policy or rules-based placement.
New Features
---------------------
* SOLR-14440: Introduce new Certificate Authentication Plugin to load Principal from certificate subject. (Mike Drob)
@ -86,6 +93,8 @@ Other Changes
* LUCENE-9411: Fail complation on warnings, 9x gradle-only (Erick Erickson, Dawid Weiss)
Deserves mention here as well as Lucene CHANGES.txt since it affects both.
* SOLR-12847: Remove support for maxShardsPerNode.
Bug Fixes
---------------------
* SOLR-14546: Fix for a relatively hard to hit issue in OverseerTaskProcessor that could lead to out of order execution

View File

@ -104,7 +104,6 @@ public class TestZKPropertiesWriter extends SolrCloudTestCase {
@SuppressWarnings({"unchecked"})
public void testZKPropertiesWriter() throws Exception {
CollectionAdminRequest.createCollectionWithImplicitRouter("collection1", "conf", "1", 1)
.setMaxShardsPerNode(1)
.process(cluster.getSolrClient());
// DIH talks core, SolrCloud talks collection.

View File

@ -53,11 +53,10 @@ public class TestLTROnSolrCloud extends TestRerankBase {
int numberOfShards = random().nextInt(4)+1;
int numberOfReplicas = random().nextInt(2)+1;
int maxShardsPerNode = random().nextInt(4)+1;
int numberOfNodes = (numberOfShards*numberOfReplicas + (maxShardsPerNode-1))/maxShardsPerNode;
int numberOfNodes = numberOfShards * numberOfReplicas;
setupSolrCluster(numberOfShards, numberOfReplicas, numberOfNodes, maxShardsPerNode);
setupSolrCluster(numberOfShards, numberOfReplicas, numberOfNodes);
}
@ -197,7 +196,7 @@ public class TestLTROnSolrCloud extends TestRerankBase {
queryResponse.getResults().get(7).get("features").toString());
}
private void setupSolrCluster(int numShards, int numReplicas, int numServers, int maxShardsPerNode) throws Exception {
private void setupSolrCluster(int numShards, int numReplicas, int numServers) throws Exception {
JettyConfig jc = buildJettyConfig("/solr");
jc = JettyConfig.builder(jc).withServlets(extraServlets).build();
solrCluster = new MiniSolrCloudCluster(numServers, tmpSolrHome.toPath(), jc);
@ -206,7 +205,7 @@ public class TestLTROnSolrCloud extends TestRerankBase {
solrCluster.getSolrClient().setDefaultCollection(COLLECTION);
createCollection(COLLECTION, "conf1", numShards, numReplicas, maxShardsPerNode);
createCollection(COLLECTION, "conf1", numShards, numReplicas);
indexDocuments(COLLECTION);
for (JettySolrRunner solrRunner : solrCluster.getJettySolrRunners()) {
if (!solrRunner.getCoreContainer().getCores().isEmpty()){
@ -219,12 +218,11 @@ public class TestLTROnSolrCloud extends TestRerankBase {
}
private void createCollection(String name, String config, int numShards, int numReplicas, int maxShardsPerNode)
private void createCollection(String name, String config, int numShards, int numReplicas)
throws Exception {
CollectionAdminResponse response;
CollectionAdminRequest.Create create =
CollectionAdminRequest.createCollection(name, config, numShards, numReplicas);
create.setMaxShardsPerNode(maxShardsPerNode);
response = create.process(solrCluster.getSolrClient());
if (response.getStatus() != 0 || response.getErrorMessages() != null) {

View File

@ -31,8 +31,7 @@ public class PrometheusExporterTestBase extends SolrCloudTestCase {
public static final String CONF_DIR = getFile("solr/" + COLLECTION + "/conf").getAbsolutePath();
public static final int NUM_SHARDS = 2;
public static final int NUM_REPLICAS = 2;
public static final int MAX_SHARDS_PER_NODE = 1;
public static final int NUM_NODES = (NUM_SHARDS * NUM_REPLICAS + (MAX_SHARDS_PER_NODE - 1)) / MAX_SHARDS_PER_NODE;
public static final int NUM_NODES = NUM_SHARDS * NUM_REPLICAS;
public static final int TIMEOUT = 60;
public static final ImmutableMap<String, Double> FACET_VALUES = ImmutableMap.<String, Double>builder()
@ -66,7 +65,6 @@ public class PrometheusExporterTestBase extends SolrCloudTestCase {
CollectionAdminRequest
.createCollection(COLLECTION, CONF_NAME, NUM_SHARDS, NUM_REPLICAS)
.setMaxShardsPerNode(MAX_SHARDS_PER_NODE)
.process(cluster.getSolrClient());
AbstractDistribZkTestBase

View File

@ -62,6 +62,7 @@ import org.apache.solr.common.cloud.ReplicaPosition;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.ZkNodeProps;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.params.CollectionAdminParams;
import org.apache.solr.common.params.CommonAdminParams;
import org.apache.solr.common.params.CoreAdminParams;
import org.apache.solr.common.params.ModifiableSolrParams;
@ -76,12 +77,6 @@ import org.slf4j.LoggerFactory;
public class AddReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
/**
* When AddReplica is called with this set to true, then we do not try to find node assignments
* for the add replica API. If set to true, a valid "node" should be specified.
*/
public static final String SKIP_NODE_ASSIGNMENT = "skipNodeAssignment";
private final OverseerCollectionMessageHandler ocmh;
public AddReplicaCmd(OverseerCollectionMessageHandler ocmh) {
@ -233,7 +228,7 @@ public class AddReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
ZkStateReader.SHARD_ID_PROP, withCollectionShard,
"node", createReplica.node,
// since we already computed node assignments (which include assigning a node for this withCollection replica) we want to skip the assignment step
SKIP_NODE_ASSIGNMENT, "true",
CollectionAdminParams.SKIP_NODE_ASSIGNMENT, "true",
CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.TRUE.toString()); // set to true because we want `withCollection` to be ready after this collection is created
addReplica(clusterState, props, results, null);
}
@ -347,7 +342,7 @@ public class AddReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
EnumMap<Replica.Type, Integer> replicaTypeVsCount,
AtomicReference< PolicyHelper.SessionWrapper> sessionWrapper) throws IOException, InterruptedException {
boolean skipCreateReplicaInClusterState = message.getBool(SKIP_CREATE_REPLICA_IN_CLUSTER_STATE, false);
boolean skipNodeAssignment = message.getBool(SKIP_NODE_ASSIGNMENT, false);
boolean skipNodeAssignment = message.getBool(CollectionAdminParams.SKIP_NODE_ASSIGNMENT, false);
String sliceName = message.getStr(SHARD_ID_PROP);
DocCollection collection = clusterState.getCollection(collectionName);

View File

@ -322,8 +322,8 @@ public class Assign {
// Only called from addReplica (and by extension createShard) (so far).
//
// Gets a list of candidate nodes to put the required replica(s) on. Throws errors if not enough replicas
// could be created on live nodes given maxShardsPerNode, Replication factor (if from createShard) etc.
// Gets a list of candidate nodes to put the required replica(s) on. Throws errors if the AssignStrategy
// can't allocate valid positions.
@SuppressWarnings({"unchecked"})
public static List<ReplicaPosition> getNodesForNewReplicas(ClusterState clusterState, String collectionName,
String shard, int nrtReplicas, int tlogReplicas, int pullReplicas,
@ -331,8 +331,7 @@ public class Assign {
log.debug("getNodesForNewReplicas() shard: {} , nrtReplicas : {} , tlogReplicas: {} , pullReplicas: {} , createNodeSet {}"
, shard, nrtReplicas, tlogReplicas, pullReplicas, createNodeSet);
DocCollection coll = clusterState.getCollection(collectionName);
int maxShardsPerNode = coll.getMaxShardsPerNode() == -1 ? Integer.MAX_VALUE : coll.getMaxShardsPerNode();
List<String> createNodeList;
List<String> createNodeList = null;
if (createNodeSet instanceof List) {
createNodeList = (List<String>) createNodeSet;
@ -346,22 +345,6 @@ public class Assign {
// be satisfied which then requires study to diagnose the issue.
checkLiveNodes(createNodeList,clusterState);
if (createNodeList == null) { // We only care if we haven't been told to put new replicas on specific nodes.
HashMap<String, ReplicaCount> nodeNameVsShardCount = getNodeNameVsShardCount(collectionName, clusterState, null);
long availableSlots = 0;
for (Map.Entry<String, ReplicaCount> ent : nodeNameVsShardCount.entrySet()) {
//ADDREPLICA can put more than maxShardsPerNode on an instance, so this test is necessary.
if (maxShardsPerNode > ent.getValue().thisCollectionNodes) {
availableSlots += (maxShardsPerNode - ent.getValue().thisCollectionNodes);
}
}
if (availableSlots < nrtReplicas + tlogReplicas + pullReplicas) {
throw new AssignmentException(
String.format(Locale.ROOT, "Cannot create %d new replicas for collection %s given the current number of eligible live nodes %d and a maxShardsPerNode of %d",
nrtReplicas, collectionName, nodeNameVsShardCount.size(), maxShardsPerNode));
}
}
AssignRequest assignRequest = new AssignRequestBuilder()
.forCollection(collectionName)
.forShard(Collections.singletonList(shard))
@ -425,13 +408,12 @@ public class Assign {
}
// if we were given a list, just use that, don't worry about counts
if (createNodeList != null) { // Overrides petty considerations about maxShardsPerNode
if (createNodeList != null) {
return nodeNameVsShardCount;
}
// if we get here we were not given a createNodeList, build a map with real counts.
DocCollection coll = clusterState.getCollection(collectionName);
int maxShardsPerNode = coll.getMaxShardsPerNode() == -1 ? Integer.MAX_VALUE : coll.getMaxShardsPerNode();
Map<String, DocCollection> collections = clusterState.getCollectionsMap();
for (Map.Entry<String, DocCollection> entry : collections.entrySet()) {
DocCollection c = entry.getValue();
@ -444,7 +426,6 @@ public class Assign {
count.totalNodes++; // Used to "weigh" whether this node should be used later.
if (entry.getKey().equals(collectionName)) {
count.thisCollectionNodes++;
if (count.thisCollectionNodes >= maxShardsPerNode) nodeNameVsShardCount.remove(replica.getNodeName());
}
}
}

View File

@ -75,7 +75,6 @@ import org.apache.zookeeper.KeeperException.NoNodeException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS;
import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
@ -354,8 +353,6 @@ public class CreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd
int numPullReplicas = message.getInt(PULL_REPLICAS, 0);
int numSlices = shardNames.size();
int maxShardsPerNode = message.getInt(MAX_SHARDS_PER_NODE, 1);
if (maxShardsPerNode == -1) maxShardsPerNode = Integer.MAX_VALUE;
// we need to look at every node and see how many cores it serves
// add our new cores to existing nodes serving the least number of cores
@ -378,22 +375,6 @@ public class CreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd
, "It's unusual to run two replica of the same slice on the same Solr-instance.");
}
int maxShardsAllowedToCreate = maxShardsPerNode == Integer.MAX_VALUE ?
Integer.MAX_VALUE :
maxShardsPerNode * nodeList.size();
int requestedShardsToCreate = numSlices * totalNumReplicas;
if (maxShardsAllowedToCreate < requestedShardsToCreate) {
throw new Assign.AssignmentException("Cannot create collection " + collectionName + ". Value of "
+ MAX_SHARDS_PER_NODE + " is " + maxShardsPerNode
+ ", and the number of nodes currently live or live and part of your "+OverseerCollectionMessageHandler.CREATE_NODE_SET+" is " + nodeList.size()
+ ". This allows a maximum of " + maxShardsAllowedToCreate
+ " to be created. Value of " + OverseerCollectionMessageHandler.NUM_SLICES + " is " + numSlices
+ ", value of " + NRT_REPLICAS + " is " + numNrtReplicas
+ ", value of " + TLOG_REPLICAS + " is " + numTlogReplicas
+ " and value of " + PULL_REPLICAS + " is " + numPullReplicas
+ ". This requires " + requestedShardsToCreate
+ " shards to be created (higher than the allowed number)");
}
Assign.AssignRequest assignRequest = new Assign.AssignRequestBuilder()
.forCollection(collectionName)
.forShard(shardNames)

View File

@ -146,7 +146,6 @@ public class OverseerCollectionMessageHandler implements OverseerMessageHandler,
ZkStateReader.NRT_REPLICAS, "1",
ZkStateReader.TLOG_REPLICAS, "0",
ZkStateReader.PULL_REPLICAS, "0",
ZkStateReader.MAX_SHARDS_PER_NODE, "1",
ZkStateReader.AUTO_ADD_REPLICAS, "false",
DocCollection.RULE, null,
POLICY, null,

View File

@ -109,7 +109,6 @@ public class ReindexCollectionCmd implements OverseerCollectionMessageHandler.Cm
ZkStateReader.PULL_REPLICAS,
ZkStateReader.TLOG_REPLICAS,
ZkStateReader.REPLICATION_FACTOR,
ZkStateReader.MAX_SHARDS_PER_NODE,
"shards",
Policy.POLICY,
CollectionAdminParams.CREATE_NODE_SET_PARAM,
@ -242,7 +241,6 @@ public class ReindexCollectionCmd implements OverseerCollectionMessageHandler.Cm
Integer numTlog = message.getInt(ZkStateReader.TLOG_REPLICAS, coll.getNumTlogReplicas());
Integer numPull = message.getInt(ZkStateReader.PULL_REPLICAS, coll.getNumPullReplicas());
int numShards = message.getInt(ZkStateReader.NUM_SHARDS_PROP, coll.getActiveSlices().size());
int maxShardsPerNode = message.getInt(ZkStateReader.MAX_SHARDS_PER_NODE, coll.getMaxShardsPerNode());
DocRouter router = coll.getRouter();
if (router == null) {
router = DocRouter.DEFAULT;
@ -320,7 +318,6 @@ public class ReindexCollectionCmd implements OverseerCollectionMessageHandler.Cm
}
}
propMap.put(ZkStateReader.MAX_SHARDS_PER_NODE, maxShardsPerNode);
propMap.put(CommonAdminParams.WAIT_FOR_FINAL_STATE, true);
if (rf != null) {
propMap.put(ZkStateReader.REPLICATION_FACTOR, rf);

View File

@ -26,7 +26,6 @@ import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
@ -65,7 +64,6 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS;
import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
@ -139,16 +137,6 @@ public class RestoreCmd implements OverseerCollectionMessageHandler.Cmd {
int totalReplicasPerShard = numNrtReplicas + numTlogReplicas + numPullReplicas;
assert totalReplicasPerShard > 0;
int maxShardsPerNode = message.getInt(MAX_SHARDS_PER_NODE, backupCollectionState.getMaxShardsPerNode());
int availableNodeCount = nodeList.size();
if (maxShardsPerNode != -1 && (numShards * totalReplicasPerShard) > (availableNodeCount * maxShardsPerNode)) {
throw new SolrException(ErrorCode.BAD_REQUEST,
String.format(Locale.ROOT, "Solr cloud with available number of nodes:%d is insufficient for"
+ " restoring a collection with %d shards, total replicas per shard %d and maxShardsPerNode %d."
+ " Consider increasing maxShardsPerNode value OR number of available nodes.",
availableNodeCount, numShards, totalReplicasPerShard, maxShardsPerNode));
}
//Upload the configs
String configName = (String) properties.get(CollectionAdminParams.COLL_CONF);
String restoreConfigName = message.getStr(CollectionAdminParams.COLL_CONF, configName);
@ -172,7 +160,6 @@ public class RestoreCmd implements OverseerCollectionMessageHandler.Cmd {
propMap.put(NRT_REPLICAS, numNrtReplicas);
propMap.put(TLOG_REPLICAS, numTlogReplicas);
propMap.put(PULL_REPLICAS, numPullReplicas);
properties.put(MAX_SHARDS_PER_NODE, maxShardsPerNode);
// inherit settings from input API, defaulting to the backup's setting. Ex: replicationFactor
for (String collProp : OverseerCollectionMessageHandler.COLLECTION_PROPS_AND_DEFAULTS.keySet()) {

View File

@ -434,8 +434,6 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd {
List<String> nodeList = new ArrayList<>(nodes.size());
nodeList.addAll(nodes);
// TODO: Have maxShardsPerNode param for this operation?
// Remove the node that hosts the parent shard for replica creation.
nodeList.remove(nodeName);

View File

@ -105,7 +105,6 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS;
import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
@ -1009,8 +1008,6 @@ public class SimClusterStateProvider implements ClusterStateProvider {
// fail fast if parameters are wrong or incomplete
List<String> shardNames = CreateCollectionCmd.populateShardNames(props, router);
int maxShardsPerNode = props.getInt(MAX_SHARDS_PER_NODE, 1);
if (maxShardsPerNode == -1) maxShardsPerNode = Integer.MAX_VALUE;
CreateCollectionCmd.checkReplicaTypes(props);
// always force getting fresh state

View File

@ -225,7 +225,6 @@ public class SimUtils {
perColl.put("activeShards", coll.getActiveSlices().size());
perColl.put("inactiveShards", coll.getSlices().size() - coll.getActiveSlices().size());
perColl.put("rf", coll.getReplicationFactor());
perColl.put("maxShardsPerNode", coll.getMaxShardsPerNode());
perColl.put("maxActualShardsPerNode", maxActualShardsPerNode);
perColl.put("minActualShardsPerNode", minActualShardsPerNode);
perColl.put("maxShardReplicasPerNode", maxShardReplicasPerNode);

View File

@ -127,7 +127,6 @@ import static org.apache.solr.common.cloud.DocCollection.RULE;
import static org.apache.solr.common.cloud.DocCollection.SNITCH;
import static org.apache.solr.common.cloud.ZkStateReader.AUTO_ADD_REPLICAS;
import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
import static org.apache.solr.common.cloud.ZkStateReader.PROPERTY_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.PROPERTY_VALUE_PROP;
@ -144,6 +143,7 @@ import static org.apache.solr.common.params.CollectionAdminParams.COUNT_PROP;
import static org.apache.solr.common.params.CollectionAdminParams.FOLLOW_ALIASES;
import static org.apache.solr.common.params.CollectionAdminParams.PROPERTY_NAME;
import static org.apache.solr.common.params.CollectionAdminParams.PROPERTY_VALUE;
import static org.apache.solr.common.params.CollectionAdminParams.SKIP_NODE_ASSIGNMENT;
import static org.apache.solr.common.params.CollectionAdminParams.WITH_COLLECTION;
import static org.apache.solr.common.params.CollectionParams.CollectionAction.*;
import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
@ -459,7 +459,6 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
REPLICATION_FACTOR,
COLL_CONF,
NUM_SLICES,
MAX_SHARDS_PER_NODE,
CREATE_NODE_SET,
CREATE_NODE_SET_SHUFFLE,
SHARDS_PROP,
@ -559,7 +558,6 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
PULL_REPLICAS,
TLOG_REPLICAS,
REPLICATION_FACTOR,
MAX_SHARDS_PER_NODE,
POLICY,
CREATE_NODE_SET,
CREATE_NODE_SET_SHUFFLE,
@ -942,7 +940,8 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
TLOG_REPLICAS,
PULL_REPLICAS,
CREATE_NODE_SET,
FOLLOW_ALIASES);
FOLLOW_ALIASES,
SKIP_NODE_ASSIGNMENT);
return copyPropertiesWithPrefix(req.getParams(), props, COLL_PROP_PREFIX);
}),
OVERSEERSTATUS_OP(OVERSEERSTATUS, (req, rsp, h) -> new LinkedHashMap<>()),
@ -1170,7 +1169,7 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
}
// from CREATE_OP:
copy(req.getParams(), params, COLL_CONF, REPLICATION_FACTOR, NRT_REPLICAS, TLOG_REPLICAS,
PULL_REPLICAS, MAX_SHARDS_PER_NODE, AUTO_ADD_REPLICAS, CREATE_NODE_SET, CREATE_NODE_SET_SHUFFLE);
PULL_REPLICAS, AUTO_ADD_REPLICAS, CREATE_NODE_SET, CREATE_NODE_SET_SHUFFLE);
copyPropertiesWithPrefix(req.getParams(), params, COLL_PROP_PREFIX);
return params;
}),

View File

@ -1726,12 +1726,6 @@ public class SolrCLI implements CLIO {
.required(false)
.desc("Number of copies of each document across the collection (replicas per shard); default is 1")
.build(),
Option.builder("maxShardsPerNode")
.argName("#")
.hasArg()
.required(false)
.desc("Maximum number of shards per Solr node; default is determined based on the number of shards, replication factor, and live nodes.")
.build(),
Option.builder("confdir")
.argName("NAME")
.hasArg()
@ -1922,11 +1916,6 @@ public class SolrCLI implements CLIO {
// build a URL to create the collection
int numShards = optionAsInt(cli, "shards", 1);
int replicationFactor = optionAsInt(cli, "replicationFactor", 1);
int maxShardsPerNode = -1;
if (cli.hasOption("maxShardsPerNode")) {
maxShardsPerNode = Integer.parseInt(cli.getOptionValue("maxShardsPerNode"));
}
String confname = cli.getOptionValue("confname");
String confdir = cli.getOptionValue("confdir");
@ -1962,12 +1951,11 @@ public class SolrCLI implements CLIO {
// doesn't seem to exist ... try to create
String createCollectionUrl =
String.format(Locale.ROOT,
"%s/admin/collections?action=CREATE&name=%s&numShards=%d&replicationFactor=%d&maxShardsPerNode=%d",
"%s/admin/collections?action=CREATE&name=%s&numShards=%d&replicationFactor=%d",
baseUrl,
collectionName,
numShards,
replicationFactor,
maxShardsPerNode);
replicationFactor);
if (confname != null && !"".equals(confname.trim())) {
createCollectionUrl = createCollectionUrl + String.format(Locale.ROOT, "&collection.configName=%s", confname);
}

View File

@ -80,7 +80,6 @@
"type":"NRT",
"force_set_state":"false"}}}},
"router":{"name":"compositeId"},
"maxShardsPerNode":"1",
"autoAddReplicas":"true",
"nrtReplicas":"3",
"tlogReplicas":"0"},
@ -114,7 +113,6 @@
"force_set_state":"false",
"leader":"true"}}}},
"router":{"name":"compositeId"},
"maxShardsPerNode":"1",
"autoAddReplicas":"true",
"nrtReplicas":"3",
"tlogReplicas":"0"},
@ -148,7 +146,6 @@
"force_set_state":"false",
"leader":"true"}}}},
"router":{"name":"compositeId"},
"maxShardsPerNode":"1",
"autoAddReplicas":"true",
"nrtReplicas":"3",
"tlogReplicas":"0"},
@ -182,7 +179,6 @@
"type":"NRT",
"force_set_state":"false"}}}},
"router":{"name":"compositeId"},
"maxShardsPerNode":"1",
"autoAddReplicas":"true",
"nrtReplicas":"3",
"tlogReplicas":"0"},
@ -216,7 +212,6 @@
"type":"NRT",
"force_set_state":"false"}}}},
"router":{"name":"compositeId"},
"maxShardsPerNode":"1",
"autoAddReplicas":"true",
"nrtReplicas":"3",
"tlogReplicas":"0"},
@ -549,7 +544,6 @@
"force_set_state":"false"}},
"stateTimestamp":"1562040233681548279"}},
"router":{"name":"compositeId"},
"maxShardsPerNode":"1",
"autoAddReplicas":"true",
"nrtReplicas":"3",
"tlogReplicas":"0"},
@ -584,7 +578,6 @@
"type":"NRT",
"force_set_state":"false"}}}},
"router":{"name":"compositeId"},
"maxShardsPerNode":"1",
"autoAddReplicas":"true",
"nrtReplicas":"3",
"tlogReplicas":"0"},
@ -2538,7 +2531,6 @@
"force_set_state":"false"}},
"stateTimestamp":"1565369006475856752"}},
"router":{"name":"compositeId"},
"maxShardsPerNode":"1",
"autoAddReplicas":"true",
"nrtReplicas":"2",
"tlogReplicas":"0"},
@ -2572,7 +2564,6 @@
"force_set_state":"false",
"leader":"true"}}}},
"router":{"name":"compositeId"},
"maxShardsPerNode":"1",
"autoAddReplicas":"true",
"nrtReplicas":"3",
"tlogReplicas":"0"},
@ -2606,7 +2597,6 @@
"force_set_state":"false",
"leader":"true"}}}},
"router":{"name":"compositeId"},
"maxShardsPerNode":"1",
"autoAddReplicas":"true",
"nrtReplicas":"3",
"tlogReplicas":"0"},
@ -2640,7 +2630,6 @@
"force_set_state":"false",
"leader":"true"}}}},
"router":{"name":"compositeId"},
"maxShardsPerNode":"1",
"autoAddReplicas":"true",
"nrtReplicas":"3",
"tlogReplicas":"0"},
@ -2659,7 +2648,6 @@
"force_set_state":"false",
"leader":"true"}}}},
"router":{"name":"compositeId"},
"maxShardsPerNode":"1",
"autoAddReplicas":"true",
"nrtReplicas":"1",
"tlogReplicas":"0"},
@ -2693,7 +2681,6 @@
"force_set_state":"false",
"leader":"true"}}}},
"router":{"name":"compositeId"},
"maxShardsPerNode":"1",
"autoAddReplicas":"true",
"nrtReplicas":"3",
"tlogReplicas":"0"},
@ -2727,7 +2714,6 @@
"force_set_state":"false",
"leader":"true"}}}},
"router":{"name":"compositeId"},
"maxShardsPerNode":"1",
"autoAddReplicas":"true",
"nrtReplicas":"3",
"tlogReplicas":"0"},
@ -2814,7 +2800,6 @@
"type":"NRT",
"force_set_state":"false"}}}},
"router":{"name":"compositeId"},
"maxShardsPerNode":"1",
"autoAddReplicas":"true",
"nrtReplicas":"3",
"tlogReplicas":"0"},
@ -2848,7 +2833,6 @@
"force_set_state":"false",
"leader":"true"}}}},
"router":{"name":"compositeId"},
"maxShardsPerNode":"1",
"autoAddReplicas":"true",
"nrtReplicas":"3",
"tlogReplicas":"0"}}}
"tlogReplicas":"0"}}}

View File

@ -1791,7 +1791,6 @@
"activeShards":72,
"inactiveShards":0,
"rf":2,
"maxShardsPerNode":1,
"maxActualShardsPerNode":6,
"minActualShardsPerNode":6,
"maxShardReplicasPerNode":1,
@ -1807,7 +1806,6 @@
"activeShards":12,
"inactiveShards":0,
"rf":3,
"maxShardsPerNode":1,
"maxActualShardsPerNode":4,
"minActualShardsPerNode":4,
"maxShardReplicasPerNode":1,
@ -1823,7 +1821,6 @@
"activeShards":3,
"inactiveShards":0,
"rf":3,
"maxShardsPerNode":1,
"maxActualShardsPerNode":1,
"minActualShardsPerNode":1,
"maxShardReplicasPerNode":1,
@ -1839,7 +1836,6 @@
"activeShards":1,
"inactiveShards":0,
"rf":3,
"maxShardsPerNode":1,
"maxActualShardsPerNode":1,
"minActualShardsPerNode":1,
"maxShardReplicasPerNode":1,
@ -1855,7 +1851,6 @@
"activeShards":1,
"inactiveShards":0,
"rf":1,
"maxShardsPerNode":1,
"maxActualShardsPerNode":1,
"minActualShardsPerNode":1,
"maxShardReplicasPerNode":1,
@ -1871,7 +1866,6 @@
"activeShards":1,
"inactiveShards":0,
"rf":3,
"maxShardsPerNode":1,
"maxActualShardsPerNode":1,
"minActualShardsPerNode":1,
"maxShardReplicasPerNode":1,
@ -1887,7 +1881,6 @@
"activeShards":1,
"inactiveShards":0,
"rf":3,
"maxShardsPerNode":1,
"maxActualShardsPerNode":1,
"minActualShardsPerNode":1,
"maxShardReplicasPerNode":1,
@ -1903,7 +1896,6 @@
"activeShards":1,
"inactiveShards":0,
"rf":3,
"maxShardsPerNode":1,
"maxActualShardsPerNode":1,
"minActualShardsPerNode":1,
"maxShardReplicasPerNode":1,
@ -1919,7 +1911,6 @@
"activeShards":1,
"inactiveShards":0,
"rf":3,
"maxShardsPerNode":1,
"maxActualShardsPerNode":1,
"minActualShardsPerNode":1,
"maxShardReplicasPerNode":1,
@ -1935,7 +1926,6 @@
"activeShards":1,
"inactiveShards":0,
"rf":3,
"maxShardsPerNode":1,
"maxActualShardsPerNode":1,
"minActualShardsPerNode":1,
"maxShardReplicasPerNode":1,
@ -1951,7 +1941,6 @@
"activeShards":1,
"inactiveShards":0,
"rf":3,
"maxShardsPerNode":1,
"maxActualShardsPerNode":1,
"minActualShardsPerNode":1,
"maxShardReplicasPerNode":1,
@ -1967,7 +1956,6 @@
"activeShards":1,
"inactiveShards":0,
"rf":3,
"maxShardsPerNode":1,
"maxActualShardsPerNode":1,
"minActualShardsPerNode":1,
"maxShardReplicasPerNode":1,
@ -1983,7 +1971,6 @@
"activeShards":1,
"inactiveShards":0,
"rf":3,
"maxShardsPerNode":1,
"maxActualShardsPerNode":1,
"minActualShardsPerNode":1,
"maxShardReplicasPerNode":1,
@ -1999,7 +1986,6 @@
"activeShards":1,
"inactiveShards":0,
"rf":3,
"maxShardsPerNode":1,
"maxActualShardsPerNode":1,
"minActualShardsPerNode":1,
"maxShardReplicasPerNode":1,
@ -2015,7 +2001,6 @@
"activeShards":1,
"inactiveShards":0,
"rf":3,
"maxShardsPerNode":1,
"maxActualShardsPerNode":1,
"minActualShardsPerNode":1,
"maxShardReplicasPerNode":1,
@ -2031,7 +2016,6 @@
"activeShards":1,
"inactiveShards":0,
"rf":3,
"maxShardsPerNode":1,
"maxActualShardsPerNode":1,
"minActualShardsPerNode":1,
"maxShardReplicasPerNode":1,
@ -2042,4 +2026,4 @@
"minCoresPerNode":1,
"avgShardSize":22.679232054390013,
"maxShardSize":22.679232054390013,
"minShardSize":22.679232054390013}}}
"minShardSize":22.679232054390013}}}

View File

@ -41,8 +41,7 @@ public class HelloWorldSolrCloudTestCase extends SolrCloudTestCase {
private static final int numShards = 3;
private static final int numReplicas = 2;
private static final int maxShardsPerNode = 2;
private static final int nodeCount = (numShards*numReplicas + (maxShardsPerNode-1))/maxShardsPerNode;
private static final int nodeCount = numShards*numReplicas;
private static final String id = "id";
@ -56,7 +55,6 @@ public class HelloWorldSolrCloudTestCase extends SolrCloudTestCase {
// create an empty collection
CollectionAdminRequest.createCollection(COLLECTION, "conf", numShards, numReplicas)
.setMaxShardsPerNode(maxShardsPerNode)
.process(cluster.getSolrClient());
// add a document

View File

@ -17,7 +17,6 @@
package org.apache.solr.cloud;
import static org.apache.solr.client.solrj.response.RequestStatusState.COMPLETED;
import static org.apache.solr.client.solrj.response.RequestStatusState.FAILED;
import java.lang.invoke.MethodHandles;
import java.util.Collection;
@ -62,7 +61,6 @@ public class AddReplicaTest extends SolrCloudTestCase {
CloudSolrClient cloudClient = cluster.getSolrClient();
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collection, "conf1", 1, 1);
create.setMaxShardsPerNode(2);
cloudClient.request(create);
cluster.waitForActiveCollection(collection, 1, 1);
@ -82,13 +80,6 @@ public class AddReplicaTest extends SolrCloudTestCase {
assertEquals(1, docCollection.getReplicas(EnumSet.of(Replica.Type.TLOG)).size());
assertEquals(1, docCollection.getReplicas(EnumSet.of(Replica.Type.PULL)).size());
// try to add 5 more replicas which should fail because numNodes(4)*maxShardsPerNode(2)=8 and 4 replicas already exist
addReplica = CollectionAdminRequest.addReplicaToShard(collection, "shard1")
.setNrtReplicas(3)
.setTlogReplicas(1)
.setPullReplicas(1);
status = addReplica.processAndWait(collection + "_xyz1", cloudClient, 120);
assertEquals(FAILED, status);
docCollection = cloudClient.getZkStateReader().getClusterState().getCollectionOrNull(collection);
assertNotNull(docCollection);
// sanity check that everything is as before
@ -97,7 +88,7 @@ public class AddReplicaTest extends SolrCloudTestCase {
assertEquals(1, docCollection.getReplicas(EnumSet.of(Replica.Type.TLOG)).size());
assertEquals(1, docCollection.getReplicas(EnumSet.of(Replica.Type.PULL)).size());
// but adding any number of replicas is supported if an explicit create node set is specified
// adding any number of replicas is supported if an explicit create node set is specified
// so test that as well
LinkedHashSet<String> createNodeSet = new LinkedHashSet<>(2);
createNodeSet.add(cluster.getRandomJetty(random()).getNodeName());
@ -130,7 +121,6 @@ public class AddReplicaTest extends SolrCloudTestCase {
CloudSolrClient cloudClient = cluster.getSolrClient();
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collection, "conf1", 2, 1);
create.setMaxShardsPerNode(2);
cloudClient.request(create);
cluster.waitForActiveCollection(collection, 2, 2);

View File

@ -51,7 +51,6 @@ public class AssignBackwardCompatibilityTest extends SolrCloudTestCase {
.addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-dynamic").resolve("conf"))
.configure();
CollectionAdminRequest.createCollection(COLLECTION, 1, 4)
.setMaxShardsPerNode(1000)
.process(cluster.getSolrClient());
cluster.waitForActiveCollection(COLLECTION, 1, 4);
}

View File

@ -776,14 +776,13 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
@Override
protected CollectionAdminResponse createCollection(Map<String, List<Integer>> collectionInfos,
String collectionName, String configSetName, int numShards, int numReplicas, int maxShardsPerNode, SolrClient client, String createNodeSetStr) throws SolrServerException, IOException {
String collectionName, String configSetName, int numShards, int numReplicas, SolrClient client, String createNodeSetStr) throws SolrServerException, IOException {
// TODO: Use CollectionAdminRequest for this test
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("action", CollectionAction.CREATE.toString());
params.set(OverseerCollectionMessageHandler.NUM_SLICES, numShards);
params.set(ZkStateReader.REPLICATION_FACTOR, numReplicas);
params.set(ZkStateReader.MAX_SHARDS_PER_NODE, maxShardsPerNode);
if (createNodeSetStr != null) params.set(OverseerCollectionMessageHandler.CREATE_NODE_SET, createNodeSetStr);
int clientIndex = clients.size() > 1 ? random().nextInt(2) : 0;
@ -959,7 +958,6 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
log.info("### STARTING testANewCollectionInOneInstanceWithManualShardAssignement");
assertEquals(0, CollectionAdminRequest.createCollection(oneInstanceCollection2, "conf1", 2, 2)
.setCreateNodeSet("")
.setMaxShardsPerNode(4)
.process(cloudClient).getStatus());
List<SolrClient> collectionClients = new ArrayList<>();
@ -1107,7 +1105,6 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
log.info("### STARTING testANewCollectionInOneInstance");
CollectionAdminResponse response = CollectionAdminRequest.createCollection(oneInstanceCollection, "conf1", 2, 2)
.setCreateNodeSet(jettys.get(0).getNodeName())
.setMaxShardsPerNode(4)
.process(cloudClient);
assertEquals(0, response.getStatus());
List<SolrClient> collectionClients = new ArrayList<>();

View File

@ -289,7 +289,7 @@ public class ChaosMonkeyNothingIsSafeTest extends AbstractFullDistribZkTestBase
try (CloudSolrClient client = createCloudClient("collection1", 30000)) {
createCollection(null, "testcollection",
1, 1, 1, client, null, "conf1");
1, 1, client, null, "conf1");
}
List<Integer> numShardsNumReplicas = new ArrayList<>(2);

View File

@ -308,9 +308,8 @@ public class ChaosMonkeyNothingIsSafeWithPullReplicasTest extends AbstractFullDi
}
try (CloudSolrClient client = createCloudClient("collection1", 30000)) {
// We don't really know how many live nodes we have at this point, so "maxShardsPerNode" needs to be > 1
createCollection(null, "testcollection",
1, 1, 10, client, null, "conf1");
1, 1, client, null, "conf1");
}
List<Integer> numShardsNumReplicas = new ArrayList<>(2);
numShardsNumReplicas.add(1);

View File

@ -176,7 +176,7 @@ public class ChaosMonkeySafeLeaderTest extends AbstractFullDistribZkTestBase {
}
try (CloudSolrClient client = createCloudClient("collection1")) {
createCollection(null, "testcollection", 1, 1, 1, client, null, "conf1");
createCollection(null, "testcollection", 1, 1, client, null, "conf1");
}
List<Integer> numShardsNumReplicas = new ArrayList<>(2);

View File

@ -224,7 +224,7 @@ public class ChaosMonkeySafeLeaderWithPullReplicasTest extends AbstractFullDistr
}
try (CloudSolrClient client = createCloudClient("collection1")) {
createCollection(null, "testcollection", 1, 1, 100, client, null, "conf1");
createCollection(null, "testcollection", 1, 1, client, null, "conf1");
}
List<Integer> numShardsNumReplicas = new ArrayList<>(2);

View File

@ -35,7 +35,7 @@ import org.apache.solr.common.util.Utils;
/**
* A utility class that can create mock ZkStateReader objects with custom ClusterState objects created
* using a simple string based description. See {@link #buildClusterState(String, int, int, String...)} for
* using a simple string based description. See {@link #buildClusterState(String, int, String...)} for
* details on how the cluster state can be created.
*
* @lucene.experimental
@ -48,10 +48,6 @@ public class ClusterStateMockUtil {
return buildClusterState(clusterDescription, 1, liveNodes);
}
public static ZkStateReader buildClusterState(String clusterDescription, int replicationFactor, String ... liveNodes) {
return buildClusterState(clusterDescription, replicationFactor, 10, liveNodes);
}
/**
* This method lets you construct a complex ClusterState object by using simple strings of letters.
*
@ -76,7 +72,6 @@ public class ClusterStateMockUtil {
* Result:
* {
* "collection2":{
* "maxShardsPerNode":"1",
* "replicationFactor":"1",
* "shards":{"slice1":{
* "state":"active",
@ -85,7 +80,6 @@ public class ClusterStateMockUtil {
* "node_name":"baseUrl1_",
* "base_url":"http://baseUrl1"}}}}},
* "collection1":{
* "maxShardsPerNode":"1",
* "replicationFactor":"1",
* "shards":{
* "slice1":{
@ -112,11 +106,10 @@ public class ClusterStateMockUtil {
*
*/
@SuppressWarnings("resource")
public static ZkStateReader buildClusterState(String clusterDescription, int replicationFactor, int maxShardsPerNode, String ... liveNodes) {
public static ZkStateReader buildClusterState(String clusterDescription, int replicationFactor, String ... liveNodes) {
Map<String,Slice> slices = null;
Map<String,Replica> replicas = null;
Map<String,Object> collectionProps = new HashMap<>();
collectionProps.put(ZkStateReader.MAX_SHARDS_PER_NODE, Integer.toString(maxShardsPerNode));
collectionProps.put(ZkStateReader.REPLICATION_FACTOR, Integer.toString(replicationFactor));
Map<String,DocCollection> collectionStates = new HashMap<>();
DocCollection docCollection = null;

View File

@ -364,7 +364,6 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
String collectionName = "solrj_implicit";
CollectionAdminResponse response
= CollectionAdminRequest.createCollectionWithImplicitRouter(collectionName, "conf", "shardA,shardB", 1, 1, 1)
.setMaxShardsPerNode(3)
.process(cluster.getSolrClient());
assertEquals(0, response.getStatus());
@ -1072,13 +1071,6 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
waitForState("Expecting attribute 'replicationFactor' to be 25", collection,
(n, c) -> 25 == c.getReplicationFactor());
CollectionAdminRequest.modifyCollection(collection, null)
.unsetAttribute("maxShardsPerNode")
.process(cluster.getSolrClient());
waitForState("Expecting attribute 'maxShardsPerNode' to be deleted", collection,
(n, c) -> null == c.get("maxShardsPerNode"));
expectThrows(IllegalArgumentException.class,
"An attempt to set unknown collection attribute should have failed",
() -> CollectionAdminRequest.modifyCollection(collection, null)

View File

@ -120,7 +120,6 @@ public class CreateRoutedAliasTest extends SolrCloudTestCase {
" \"config\":\"_default\",\n" +
" \"tlogReplicas\":1,\n" +
" \"pullReplicas\":1,\n" +
" \"maxShardsPerNode\":4,\n" + // note: we also expect the 'policy' to work fine
" \"nodeSet\": '" + createNode + "',\n" +
" \"properties\" : {\n" +
" \"foobar\":\"bazbam\",\n" +
@ -152,7 +151,6 @@ public class CreateRoutedAliasTest extends SolrCloudTestCase {
//assertEquals(1, coll.getNumNrtReplicas().intValue()); // TODO seems to be erroneous; I figured 'null'
assertEquals(1, coll.getNumTlogReplicas().intValue()); // per-shard
assertEquals(1, coll.getNumPullReplicas().intValue()); // per-shard
assertEquals(4, coll.getMaxShardsPerNode());
assertTrue("nodeSet didn't work?",
coll.getSlices().stream().flatMap(s -> s.getReplicas().stream())
.map(Replica::getNodeName).allMatch(createNode::equals));

View File

@ -54,10 +54,8 @@ public class DeleteInactiveReplicaTest extends SolrCloudTestCase {
String collectionName = "delDeadColl";
int replicationFactor = 2;
int numShards = 2;
int maxShardsPerNode = ((((numShards + 1) * replicationFactor) / cluster.getJettySolrRunners().size())) + 1;
CollectionAdminRequest.createCollection(collectionName, "conf", numShards, replicationFactor)
.setMaxShardsPerNode(maxShardsPerNode)
.process(cluster.getSolrClient());
waitForState("Expected a cluster of 2 shards and 2 replicas", collectionName, (n, c) -> {
return DocCollection.isFullyActive(n, c, numShards, replicationFactor);

View File

@ -37,7 +37,6 @@ public class DeleteLastCustomShardedReplicaTest extends SolrCloudTestCase {
final String collectionName = "customcollreplicadeletion";
CollectionAdminRequest.createCollectionWithImplicitRouter(collectionName, "conf", "a,b", 1)
.setMaxShardsPerNode(5)
.process(cluster.getSolrClient());
DocCollection collectionState = getCollectionState(collectionName);

View File

@ -68,7 +68,7 @@ public class DeleteNodeTest extends SolrCloudTestCase {
CollectionAdminRequest.createCollection(coll, "conf1", 5, 1, 0, 0),
CollectionAdminRequest.createCollection(coll, "conf1", 5, 0, 1, 0)
);
create.setCreateNodeSet(StrUtils.join(l, ',')).setMaxShardsPerNode(3);
create.setCreateNodeSet(StrUtils.join(l, ','));
cloudClient.request(create);
state = cloudClient.getZkStateReader().getClusterState();
String node2bdecommissioned = l.get(0);

View File

@ -114,7 +114,6 @@ public class DeleteShardTest extends SolrCloudTestCase {
final String collection = "deleteshard_test";
CollectionAdminRequest.createCollectionWithImplicitRouter(collection, "conf", "a,b,c", 1)
.setMaxShardsPerNode(2)
.process(cluster.getSolrClient());
cluster.waitForActiveCollection(collection, 3, 3);

View File

@ -86,7 +86,6 @@ public class DocValuesNotIndexedTest extends SolrCloudTestCase {
// Need enough shards that we have some shards that don't have any docs on them.
CollectionAdminRequest.createCollection(COLLECTION, "conf1", 4, 1)
.setMaxShardsPerNode(2)
.process(cluster.getSolrClient());
cluster.waitForActiveCollection(COLLECTION, 4, 4);

View File

@ -75,7 +75,7 @@ public class ForceLeaderTest extends HttpPartitionTest {
String testCollectionName = "forceleader_lower_terms_collection";
createCollection(testCollectionName, "conf1", 1, 3, 1);
createCollection(testCollectionName, "conf1", 1, 3);
try {

View File

@ -73,7 +73,7 @@ public class HttpPartitionOnCommitTest extends BasicDistributedZkTest {
// create a collection that has 2 shard and 2 replicas
String testCollectionName = "c8n_2x2_commits";
createCollection(testCollectionName, "conf1", 2, 2, 1);
createCollection(testCollectionName, "conf1", 2, 2);
cloudClient.setDefaultCollection(testCollectionName);
List<Replica> notLeaders =
@ -122,7 +122,7 @@ public class HttpPartitionOnCommitTest extends BasicDistributedZkTest {
// create a collection that has 1 shard and 3 replicas
String testCollectionName = "c8n_1x3_commits";
createCollection(testCollectionName, "conf1", 1, 3, 1);
createCollection(testCollectionName, "conf1", 1, 3);
cloudClient.setDefaultCollection(testCollectionName);
List<Replica> notLeaders =

View File

@ -159,7 +159,7 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase {
TestInjection.prepRecoveryOpPauseForever = "true:100";
createCollection(testCollectionName, "conf1", 1, 2, 1);
createCollection(testCollectionName, "conf1", 1, 2);
cloudClient.setDefaultCollection(testCollectionName);
sendDoc(1, 2);
@ -211,7 +211,7 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase {
protected void testRf2() throws Exception {
// create a collection that has 1 shard but 2 replicas
String testCollectionName = "c8n_1x2";
createCollectionRetry(testCollectionName, "conf1", 1, 2, 1);
createCollectionRetry(testCollectionName, "conf1", 1, 2);
cloudClient.setDefaultCollection(testCollectionName);
sendDoc(1);
@ -330,7 +330,7 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase {
protected void testRf3() throws Exception {
// create a collection that has 1 shard but 2 replicas
String testCollectionName = "c8n_1x3";
createCollectionRetry(testCollectionName, "conf1", 1, 3, 1);
createCollectionRetry(testCollectionName, "conf1", 1, 3);
cloudClient.setDefaultCollection(testCollectionName);
@ -385,7 +385,7 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase {
protected void testLeaderZkSessionLoss() throws Exception {
String testCollectionName = "c8n_1x2_leader_session_loss";
createCollectionRetry(testCollectionName, "conf1", 1, 2, 1);
createCollectionRetry(testCollectionName, "conf1", 1, 2);
cloudClient.setDefaultCollection(testCollectionName);
sendDoc(1);

View File

@ -52,7 +52,6 @@ public class LeaderElectionContextKeyTest extends SolrCloudTestCase {
// therefore Assign.buildCoreNodeName will create same coreNodeName
CollectionAdminRequest
.createCollection("testCollection"+i, "config", 2, 1)
.setMaxShardsPerNode(100)
.setCreateNodeSet("")
.process(cluster.getSolrClient());
CollectionAdminRequest

View File

@ -51,7 +51,7 @@ public class LeaderElectionIntegrationTest extends SolrCloudTestCase {
private void createCollection(String collection) throws IOException, SolrServerException {
assertEquals(0, CollectionAdminRequest.createCollection(collection,
"conf", 2, 1)
.setMaxShardsPerNode(1).process(cluster.getSolrClient()).getStatus());
.process(cluster.getSolrClient()).getStatus());
for (int i = 1; i < NUM_REPLICAS_OF_SHARD1; i++) {
assertTrue(
CollectionAdminRequest.addReplicaToShard(collection, "shard1").process(cluster.getSolrClient()).isSuccess()

View File

@ -63,7 +63,7 @@ public class LeaderFailoverAfterPartitionTest extends HttpPartitionTest {
// kill the leader ... see what happens
// create a collection that has 1 shard but 3 replicas
String testCollectionName = "c8n_1x3_lf"; // _lf is leader fails
createCollection(testCollectionName, "conf1", 1, 3, 1);
createCollection(testCollectionName, "conf1", 1, 3);
cloudClient.setDefaultCollection(testCollectionName);
sendDoc(1);

View File

@ -57,7 +57,6 @@ public class MissingSegmentRecoveryTest extends SolrCloudTestCase {
@Before
public void setup() throws SolrServerException, IOException {
CollectionAdminRequest.createCollection(collection, "conf", 1, 2)
.setMaxShardsPerNode(1)
.process(cluster.getSolrClient());
waitForState("Expected a collection with one shard and two replicas", collection, clusterShape(1, 2));
cluster.getSolrClient().setDefaultCollection(collection);

View File

@ -105,7 +105,6 @@ public class MoveReplicaTest extends SolrCloudTestCase {
// random create tlog or pull type replicas with nrt
boolean isTlog = random().nextBoolean();
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(coll, "conf1", 2, 1, isTlog ? 1 : 0, !isTlog ? 1 : 0);
create.setMaxShardsPerNode(2);
create.setAutoAddReplicas(false);
cloudClient.request(create);

View File

@ -27,7 +27,6 @@ public class MultiSolrCloudTestCaseTest extends MultiSolrCloudTestCase {
private static int numShards;
private static int numReplicas;
private static int maxShardsPerNode;
private static int nodesPerCluster;
@BeforeClass
@ -47,8 +46,7 @@ public class MultiSolrCloudTestCaseTest extends MultiSolrCloudTestCase {
numShards = 1+random().nextInt(2);
numReplicas = 1+random().nextInt(2);
maxShardsPerNode = 1+random().nextInt(2);
nodesPerCluster = (numShards*numReplicas + (maxShardsPerNode-1))/maxShardsPerNode;
nodesPerCluster = numShards*numReplicas;
doSetupClusters(
clusterIds,
@ -58,7 +56,7 @@ public class MultiSolrCloudTestCaseTest extends MultiSolrCloudTestCase {
return nodesPerCluster;
}
},
new DefaultClusterInitFunction(numShards, numReplicas, maxShardsPerNode) {
new DefaultClusterInitFunction(numShards, numReplicas) {
@Override
public void accept(String clusterId, MiniSolrCloudCluster cluster) {
for (final String collection : collections) {

View File

@ -43,10 +43,9 @@ public class NodeMutatorTest extends SolrTestCaseJ4Test {
public void downNodeReportsAllImpactedCollectionsAndNothingElse() throws IOException {
NodeMutator nm = new NodeMutator();
//We use 2 nodes with maxShardsPerNode as 1
//Collection1: 2 shards X 1 replica = replica1 on node1 and replica2 on node2
//Collection2: 1 shard X 1 replica = replica1 on node2
ZkStateReader reader = ClusterStateMockUtil.buildClusterState("csrr2rDcsr2", 1, 1, NODE1, NODE2);
ZkStateReader reader = ClusterStateMockUtil.buildClusterState("csrr2rDcsr2", 1, NODE1, NODE2);
ClusterState clusterState = reader.getClusterState();
assertEquals(clusterState.getCollection("collection1").getReplica("replica1").getBaseUrl(), NODE1_URL);
assertEquals(clusterState.getCollection("collection1").getReplica("replica2").getBaseUrl(), NODE2_URL);
@ -60,11 +59,10 @@ public class NodeMutatorTest extends SolrTestCaseJ4Test {
assertEquals(writes.get(0).collection.getReplica("replica2").getState(), Replica.State.ACTIVE);
reader.close();
//We use 3 nodes with maxShardsPerNode as 1
//Collection1: 2 shards X 1 replica = replica1 on node1 and replica2 on node2
//Collection2: 1 shard X 1 replica = replica1 on node2
//Collection3: 1 shard X 3 replica = replica1 on node1 , replica2 on node2, replica3 on node3
reader = ClusterStateMockUtil.buildClusterState("csrr2rDcsr2csr1r2r3", 1, 1, NODE1, NODE2, NODE3);
reader = ClusterStateMockUtil.buildClusterState("csrr2rDcsr2csr1r2r3", 1, NODE1, NODE2, NODE3);
clusterState = reader.getClusterState();
assertEquals(clusterState.getCollection("collection1").getReplica("replica1").getBaseUrl(), NODE1_URL);
assertEquals(clusterState.getCollection("collection1").getReplica("replica2").getBaseUrl(), NODE2_URL);

View File

@ -539,14 +539,13 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
}
protected void issueCreateJob(Integer numberOfSlices,
Integer replicationFactor, Integer maxShardsPerNode, List<String> createNodeList, boolean sendCreateNodeList, boolean createNodeSetShuffle) {
Integer replicationFactor, List<String> createNodeList, boolean sendCreateNodeList, boolean createNodeSetShuffle) {
Map<String,Object> propMap = Utils.makeMap(
Overseer.QUEUE_OPERATION, CollectionParams.CollectionAction.CREATE.toLower(),
ZkStateReader.REPLICATION_FACTOR, replicationFactor.toString(),
"name", COLLECTION_NAME,
"collection.configName", CONFIG_NAME,
OverseerCollectionMessageHandler.NUM_SLICES, numberOfSlices.toString(),
ZkStateReader.MAX_SHARDS_PER_NODE, maxShardsPerNode.toString()
OverseerCollectionMessageHandler.NUM_SLICES, numberOfSlices.toString()
);
if (sendCreateNodeList) {
propMap.put(OverseerCollectionMessageHandler.CREATE_NODE_SET,
@ -720,7 +719,7 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
SEND_NULL
}
protected void testTemplate(Integer numberOfNodes, Integer numberOfNodesToCreateOn, CreateNodeListOptions createNodeListOption, Integer replicationFactor,
Integer numberOfSlices, Integer maxShardsPerNode,
Integer numberOfSlices,
boolean collectionExceptedToBeCreated) throws Exception {
assertTrue("Wrong usage of testTemplate. numberOfNodesToCreateOn " + numberOfNodesToCreateOn + " is not allowed to be higher than numberOfNodes " + numberOfNodes, numberOfNodes.intValue() >= numberOfNodesToCreateOn.intValue());
assertTrue("Wrong usage of testTemplage. createNodeListOption has to be " + CreateNodeListOptions.SEND + " when numberOfNodes and numberOfNodesToCreateOn are unequal", ((createNodeListOption == CreateNodeListOptions.SEND) || (numberOfNodes.intValue() == numberOfNodesToCreateOn.intValue())));
@ -750,7 +749,7 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
final List<String> createNodeListToSend = ((createNodeListOption != CreateNodeListOptions.SEND_NULL) ? createNodeList : null);
final boolean sendCreateNodeList = (createNodeListOption != CreateNodeListOptions.DONT_SEND);
final boolean dontShuffleCreateNodeSet = (createNodeListToSend != null) && sendCreateNodeList && random().nextBoolean();
issueCreateJob(numberOfSlices, replicationFactor, maxShardsPerNode, createNodeListToSend, sendCreateNodeList, !dontShuffleCreateNodeSet);
issueCreateJob(numberOfSlices, replicationFactor, createNodeListToSend, sendCreateNodeList, !dontShuffleCreateNodeSet);
waitForEmptyQueue(10000);
if (collectionExceptedToBeCreated) {
@ -769,9 +768,8 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
CreateNodeListOptions createNodeListOptions = CreateNodeListOptions.DONT_SEND;
Integer replicationFactor = 1;
Integer numberOfSlices = 8;
Integer maxShardsPerNode = 2;
testTemplate(numberOfNodes, numberOfNodesToCreateOn, createNodeListOptions, replicationFactor, numberOfSlices,
maxShardsPerNode, true);
true);
}
@Test
@ -781,9 +779,8 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
CreateNodeListOptions createNodeListOptions = CreateNodeListOptions.DONT_SEND;
Integer replicationFactor = 2;
Integer numberOfSlices = 4;
Integer maxShardsPerNode = 2;
testTemplate(numberOfNodes, numberOfNodesToCreateOn, createNodeListOptions, replicationFactor, numberOfSlices,
maxShardsPerNode, true);
true);
}
@Test
@ -793,9 +790,8 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
CreateNodeListOptions createNodeListOptions = CreateNodeListOptions.SEND;
Integer replicationFactor = 1;
Integer numberOfSlices = 8;
Integer maxShardsPerNode = 2;
testTemplate(numberOfNodes, numberOfNodesToCreateOn, createNodeListOptions, replicationFactor, numberOfSlices,
maxShardsPerNode, true);
true);
}
@Test
@ -805,9 +801,8 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
CreateNodeListOptions createNodeListOptions = CreateNodeListOptions.SEND;
Integer replicationFactor = 2;
Integer numberOfSlices = 4;
Integer maxShardsPerNode = 2;
testTemplate(numberOfNodes, numberOfNodesToCreateOn, createNodeListOptions, replicationFactor, numberOfSlices,
maxShardsPerNode, true);
true);
}
@Test
@ -817,9 +812,8 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
CreateNodeListOptions createNodeListOptions = CreateNodeListOptions.SEND_NULL;
Integer replicationFactor = 1;
Integer numberOfSlices = 8;
Integer maxShardsPerNode = 2;
testTemplate(numberOfNodes, numberOfNodesToCreateOn, createNodeListOptions, replicationFactor, numberOfSlices,
maxShardsPerNode, true);
true);
}
@Test
@ -829,9 +823,8 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
CreateNodeListOptions createNodeListOptions = CreateNodeListOptions.SEND_NULL;
Integer replicationFactor = 2;
Integer numberOfSlices = 4;
Integer maxShardsPerNode = 2;
testTemplate(numberOfNodes, numberOfNodesToCreateOn, createNodeListOptions, replicationFactor, numberOfSlices,
maxShardsPerNode, true);
true);
}
@Test
@ -841,9 +834,8 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
CreateNodeListOptions createNodeListOptions = CreateNodeListOptions.DONT_SEND;
Integer replicationFactor = 1;
Integer numberOfSlices = 6;
Integer maxShardsPerNode = 2;
testTemplate(numberOfNodes, numberOfNodesToCreateOn, createNodeListOptions, replicationFactor, numberOfSlices,
maxShardsPerNode, true);
true);
}
@Test
@ -853,37 +845,10 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
CreateNodeListOptions createNodeListOptions = CreateNodeListOptions.DONT_SEND;
Integer replicationFactor = 2;
Integer numberOfSlices = 3;
Integer maxShardsPerNode = 2;
testTemplate(numberOfNodes, numberOfNodesToCreateOn, createNodeListOptions, replicationFactor, numberOfSlices,
maxShardsPerNode, true);
true);
}
@Test
public void testNoReplicationCollectionNotCreatedDueToMaxShardsPerNodeLimit()
throws Exception {
Integer numberOfNodes = 4;
Integer numberOfNodesToCreateOn = 4;
CreateNodeListOptions createNodeListOptions = CreateNodeListOptions.DONT_SEND;
Integer replicationFactor = 1;
Integer numberOfSlices = 6;
Integer maxShardsPerNode = 1;
testTemplate(numberOfNodes, numberOfNodesToCreateOn, createNodeListOptions, replicationFactor, numberOfSlices,
maxShardsPerNode, false);
}
@Test
public void testReplicationCollectionNotCreatedDueToMaxShardsPerNodeLimit()
throws Exception {
Integer numberOfNodes = 4;
Integer numberOfNodesToCreateOn = 4;
CreateNodeListOptions createNodeListOptions = CreateNodeListOptions.DONT_SEND;
Integer replicationFactor = 2;
Integer numberOfSlices = 3;
Integer maxShardsPerNode = 1;
testTemplate(numberOfNodes, numberOfNodesToCreateOn, createNodeListOptions, replicationFactor, numberOfSlices,
maxShardsPerNode, false);
}
@Test
public void testNoReplicationLimitedNodesToCreateOn()
throws Exception {
@ -892,9 +857,8 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
CreateNodeListOptions createNodeListOptions = CreateNodeListOptions.SEND;
Integer replicationFactor = 1;
Integer numberOfSlices = 6;
Integer maxShardsPerNode = 3;
testTemplate(numberOfNodes, numberOfNodesToCreateOn, createNodeListOptions, replicationFactor, numberOfSlices,
maxShardsPerNode, true);
true);
}
@Test
@ -905,9 +869,8 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
CreateNodeListOptions createNodeListOptions = CreateNodeListOptions.SEND;
Integer replicationFactor = 2;
Integer numberOfSlices = 3;
Integer maxShardsPerNode = 3;
testTemplate(numberOfNodes, numberOfNodesToCreateOn, createNodeListOptions, replicationFactor, numberOfSlices,
maxShardsPerNode, true);
true);
}
@Test
@ -918,9 +881,8 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
CreateNodeListOptions createNodeListOptions = CreateNodeListOptions.SEND;
Integer replicationFactor = 1;
Integer numberOfSlices = 8;
Integer maxShardsPerNode = 2;
testTemplate(numberOfNodes, numberOfNodesToCreateOn, createNodeListOptions, replicationFactor, numberOfSlices,
maxShardsPerNode, false);
false);
}
@Test
@ -931,9 +893,8 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
CreateNodeListOptions createNodeListOptions = CreateNodeListOptions.SEND;
Integer replicationFactor = 2;
Integer numberOfSlices = 4;
Integer maxShardsPerNode = 2;
testTemplate(numberOfNodes, numberOfNodesToCreateOn, createNodeListOptions, replicationFactor, numberOfSlices,
maxShardsPerNode, false);
false);
}
}

View File

@ -1109,8 +1109,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, CollectionParams.CollectionAction.CREATE.toLower(),
"name", "perf" + i,
ZkStateReader.NUM_SHARDS_PROP, "1",
ZkStateReader.REPLICATION_FACTOR, "1",
ZkStateReader.MAX_SHARDS_PER_NODE, "1"
ZkStateReader.REPLICATION_FACTOR, "1"
);
ZkDistributedQueue q = overseers.get(0).getStateUpdateQueue();
q.offer(Utils.toJSON(m));
@ -1471,13 +1470,11 @@ public class OverseerTest extends SolrTestCaseJ4 {
// create collection
{
final Integer maxShardsPerNode = numReplicas * numShards;
zkClient.makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/" + COLLECTION, true);
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, CollectionParams.CollectionAction.CREATE.toLower(),
"name", COLLECTION,
ZkStateReader.NUM_SHARDS_PROP, numShards.toString(),
ZkStateReader.REPLICATION_FACTOR, "1",
ZkStateReader.MAX_SHARDS_PER_NODE, maxShardsPerNode.toString()
ZkStateReader.REPLICATION_FACTOR, "1"
);
q.offer(Utils.toJSON(m));
}

View File

@ -68,7 +68,6 @@ public class RecoveryZkTest extends SolrCloudTestCase {
final String collection = "recoverytest";
CollectionAdminRequest.createCollection(collection, "conf", 1, 2)
.setMaxShardsPerNode(1)
.process(cluster.getSolrClient());
waitForState("Expected a collection with one shard and two replicas", collection, clusterShape(1, 2));
cluster.getSolrClient().setDefaultCollection(collection);

View File

@ -380,7 +380,6 @@ public class ReindexCollectionTest extends SolrCloudTestCase {
private void createCollection(String name, String config, int numShards, int numReplicas) throws Exception {
CollectionAdminRequest.createCollection(name, config, numShards, numReplicas)
.setMaxShardsPerNode(-1)
.process(solrClient);
cluster.waitForActiveCollection(name, numShards, numShards * numReplicas);

View File

@ -85,7 +85,7 @@ public class ReplaceNodeTest extends SolrCloudTestCase {
CollectionAdminRequest.createCollection(coll, "conf1", 5, 1,0,0)
//CollectionAdminRequest.createCollection(coll, "conf1", 5, 0,1,0)
);
create.setCreateNodeSet(StrUtils.join(l, ',')).setMaxShardsPerNode(3);
create.setCreateNodeSet(StrUtils.join(l, ','));
cloudClient.request(create);
cluster.waitForActiveCollection(coll, 5, 5 * (create.getNumNrtReplicas() + create.getNumPullReplicas() + create.getNumTlogReplicas()));

View File

@ -103,11 +103,10 @@ public class ReplicationFactorTest extends AbstractFullDistribZkTestBase {
protected void testRf2NotUsingDirectUpdates() throws Exception {
int numShards = 2;
int replicationFactor = 2;
int maxShardsPerNode = 1;
String testCollectionName = "repfacttest_c8n_2x2";
String shardId = "shard1";
createCollectionWithRetry(testCollectionName, "conf1", numShards, replicationFactor, maxShardsPerNode);
createCollectionWithRetry(testCollectionName, "conf1", numShards, replicationFactor);
cloudClient.setDefaultCollection(testCollectionName);
@ -276,12 +275,11 @@ public class ReplicationFactorTest extends AbstractFullDistribZkTestBase {
protected void testRf3() throws Exception {
final int numShards = 1;
final int replicationFactor = 3;
final int maxShardsPerNode = 1;
final String testCollectionName = "repfacttest_c8n_1x3";
final String shardId = "shard1";
final int minRf = 2;
createCollectionWithRetry(testCollectionName, "conf1", numShards, replicationFactor, maxShardsPerNode);
createCollectionWithRetry(testCollectionName, "conf1", numShards, replicationFactor);
cloudClient.setDefaultCollection(testCollectionName);
List<Replica> replicas =
@ -503,15 +501,15 @@ public class ReplicationFactorTest extends AbstractFullDistribZkTestBase {
}
}
void createCollectionWithRetry(String testCollectionName, String config, int numShards, int replicationFactor, int maxShardsPerNode) throws IOException, SolrServerException, InterruptedException, TimeoutException {
CollectionAdminResponse resp = createCollection(testCollectionName, "conf1", numShards, replicationFactor, maxShardsPerNode);
void createCollectionWithRetry(String testCollectionName, String config, int numShards, int replicationFactor) throws IOException, SolrServerException, InterruptedException, TimeoutException {
CollectionAdminResponse resp = createCollection(testCollectionName, "conf1", numShards, replicationFactor);
if (resp.getResponse().get("failure") != null) {
Thread.sleep(5000); // let system settle down. This should be very rare.
CollectionAdminRequest.deleteCollection(testCollectionName).process(cloudClient);
resp = createCollection(testCollectionName, "conf1", numShards, replicationFactor, maxShardsPerNode);
resp = createCollection(testCollectionName, "conf1", numShards, replicationFactor);
if (resp.getResponse().get("failure") != null) {
fail("Could not create " + testCollectionName);

View File

@ -156,7 +156,6 @@ public class SharedFSAutoReplicaFailoverTest extends AbstractFullDistribZkTestBa
private void testBasics() throws Exception {
String collection1 = "solrj_collection";
Create createCollectionRequest = CollectionAdminRequest.createCollection(collection1,"conf1",2,2)
.setMaxShardsPerNode(2)
.setRouterField("myOwnField")
.setAutoAddReplicas(true);
CollectionAdminResponse response = createCollectionRequest.process(cloudClient);
@ -167,7 +166,6 @@ public class SharedFSAutoReplicaFailoverTest extends AbstractFullDistribZkTestBa
String collection2 = "solrj_collection2";
createCollectionRequest = CollectionAdminRequest.createCollection(collection2,"conf1",2,2)
.setMaxShardsPerNode(2)
.setRouterField("myOwnField")
.setAutoAddReplicas(false);
CollectionAdminResponse response2 = createCollectionRequest.process(getCommonCloudSolrClient());
@ -179,7 +177,6 @@ public class SharedFSAutoReplicaFailoverTest extends AbstractFullDistribZkTestBa
String collection3 = "solrj_collection3";
createCollectionRequest = CollectionAdminRequest.createCollection(collection3,"conf1",5,1)
.setMaxShardsPerNode(1)
.setRouterField("myOwnField")
.setAutoAddReplicas(true);
CollectionAdminResponse response3 = createCollectionRequest.process(getCommonCloudSolrClient());
@ -192,7 +189,6 @@ public class SharedFSAutoReplicaFailoverTest extends AbstractFullDistribZkTestBa
// a collection has only 1 replica per a shard
String collection4 = "solrj_collection4";
createCollectionRequest = CollectionAdminRequest.createCollection(collection4,"conf1",5,1)
.setMaxShardsPerNode(5)
.setRouterField("text")
.setAutoAddReplicas(true);
CollectionAdminResponse response4 = createCollectionRequest.process(getCommonCloudSolrClient());

View File

@ -76,7 +76,6 @@ public class SplitShardTest extends SolrCloudTestCase {
public void doTest() throws IOException, SolrServerException {
CollectionAdminRequest
.createCollection(COLLECTION_NAME, "conf", 2, 1)
.setMaxShardsPerNode(100)
.process(cluster.getSolrClient());
cluster.waitForActiveCollection(COLLECTION_NAME, 2, 2);
@ -127,7 +126,6 @@ public class SplitShardTest extends SolrCloudTestCase {
String collectionName = "splitFuzzCollection";
CollectionAdminRequest
.createCollection(collectionName, "conf", 2, 1)
.setMaxShardsPerNode(100)
.process(cluster.getSolrClient());
cluster.waitForActiveCollection(collectionName, 2, 2);
@ -156,7 +154,6 @@ public class SplitShardTest extends SolrCloudTestCase {
CollectionAdminRequest
.createCollection(collectionName, "conf", 1, repFactor)
.setMaxShardsPerNode(100)
.process(cluster.getSolrClient());
cluster.waitForActiveCollection(collectionName, 1, repFactor);

View File

@ -90,7 +90,6 @@ public class SystemCollectionCompatTest extends SolrCloudTestCase {
// put .system replicas on other nodes that the overseer
CollectionAdminRequest.createCollection(CollectionAdminParams.SYSTEM_COLL, null, 1, 2)
.setCreateNodeSet(String.join(",", nodes))
.setMaxShardsPerNode(2)
.process(cluster.getSolrClient());
cluster.waitForActiveCollection(CollectionAdminParams.SYSTEM_COLL, 1, 2);
// send a dummy doc to the .system collection

View File

@ -47,8 +47,7 @@ public class TestAuthenticationFramework extends SolrCloudTestCase {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private static final int numShards = 2;
private static final int numReplicas = 2;
private static final int maxShardsPerNode = 2;
private static final int nodeCount = (numShards*numReplicas + (maxShardsPerNode-1))/maxShardsPerNode;
private static final int nodeCount = numShards*numReplicas;
private static final String configName = "solrCloudCollectionConfig";
private static final String collectionName = "testcollection";
@ -97,13 +96,11 @@ public class TestAuthenticationFramework extends SolrCloudTestCase {
throws Exception {
if (random().nextBoolean()) { // process asynchronously
CollectionAdminRequest.createCollection(collectionName, configName, numShards, numReplicas)
.setMaxShardsPerNode(maxShardsPerNode)
.processAndWait(cluster.getSolrClient(), 90);
cluster.waitForActiveCollection(collectionName, numShards, numShards * numReplicas);
}
else {
CollectionAdminRequest.createCollection(collectionName, configName, numShards, numReplicas)
.setMaxShardsPerNode(maxShardsPerNode)
.process(cluster.getSolrClient());
cluster.waitForActiveCollection(collectionName, numShards, numShards * numReplicas);
}

View File

@ -95,7 +95,6 @@ public abstract class TestBaseStatsCacheCloud extends SolrCloudTestCase {
protected void createTestCollection() throws Exception {
CollectionAdminRequest.createCollection(collectionName, "conf", 2, numNodes)
.setMaxShardsPerNode(2)
.process(solrClient);
indexDocs(solrClient, collectionName, NUM_DOCS, 0, generator);
indexDocs(control, "collection1", NUM_DOCS, 0, generator);

View File

@ -74,7 +74,6 @@ public class TestCloudRecovery extends SolrCloudTestCase {
// TestInjection#waitForInSyncWithLeader is broken
CollectionAdminRequest
.createCollection(COLLECTION, "config", 2, nrtReplicas, tlogReplicas, 0)
.setMaxShardsPerNode(2)
.process(cluster.getSolrClient());
cluster.waitForActiveCollection(COLLECTION, 2, 2 * (nrtReplicas + tlogReplicas));

View File

@ -45,7 +45,6 @@ public class TestCloudRecovery2 extends SolrCloudTestCase {
CollectionAdminRequest
.createCollection(COLLECTION, "config", 1,2)
.setMaxShardsPerNode(2)
.process(cluster.getSolrClient());
AbstractDistribZkTestBase.waitForRecoveriesToFinish(COLLECTION, cluster.getSolrClient().getZkStateReader(),
false, true, 30);

View File

@ -141,7 +141,7 @@ public class TestCloudSearcherWarming extends SolrCloudTestCase {
String collectionName = "testPeersyncFailureReplicationSuccess";
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, 1, 1)
.setCreateNodeSet(cluster.getJettySolrRunner(0).getNodeName()).setMaxShardsPerNode(2);
.setCreateNodeSet(cluster.getJettySolrRunner(0).getNodeName());
create.process(solrClient);
waitForState("The collection should have 1 shard and 1 replica", collectionName, clusterShape(1, 1));

View File

@ -44,7 +44,6 @@ public class TestDeleteCollectionOnDownNodes extends SolrCloudTestCase {
public void deleteCollectionWithDownNodes() throws Exception {
CollectionAdminRequest.createCollection("halfdeletedcollection2", "conf", 4, 3)
.setMaxShardsPerNode(3)
.process(cluster.getSolrClient());
cluster.waitForActiveCollection("halfdeletedcollection2", 60, TimeUnit.SECONDS, 4, 12);

View File

@ -56,7 +56,7 @@ public class TestDynamicFieldNamesIndexCorrectly extends AbstractFullDistribZkTe
public void test() throws Exception {
waitForThingsToLevelOut(30, TimeUnit.SECONDS);
createCollection(COLLECTION, "conf1", 4, 1, 4);
createCollection(COLLECTION, "conf1", 4, 1);
final int numRuns = 10;
populateIndex(numRuns);
}

View File

@ -64,7 +64,7 @@ public class TestOnReconnectListenerSupport extends AbstractFullDistribZkTestBas
String testCollectionName = "c8n_onreconnect_1x1";
String shardId = "shard1";
createCollectionRetry(testCollectionName, "conf1", 1, 1, 1);
createCollectionRetry(testCollectionName, "conf1", 1, 1);
cloudClient.setDefaultCollection(testCollectionName);
Replica leader = getShardLeader(testCollectionName, shardId, 30 /* timeout secs */);

View File

@ -130,17 +130,15 @@ public class TestPullReplica extends SolrCloudTestCase {
case 0:
// Sometimes use SolrJ
CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1, 0, 3)
.setMaxShardsPerNode(100)
.process(cluster.getSolrClient());
break;
case 1:
// Sometimes use v1 API
String url = String.format(Locale.ROOT, "%s/admin/collections?action=CREATE&name=%s&collection.configName=%s&numShards=%s&pullReplicas=%s&maxShardsPerNode=%s",
String url = String.format(Locale.ROOT, "%s/admin/collections?action=CREATE&name=%s&collection.configName=%s&numShards=%s&pullReplicas=%s",
cluster.getRandomJetty(random()).getBaseUrl(),
collectionName, "conf",
2, // numShards
3, // pullReplicas
100); // maxShardsPerNode
3); // pullReplicas
url = url + pickRandom("", "&nrtReplicas=1", "&replicationFactor=1"); // These options should all mean the same
HttpGet createCollectionGet = new HttpGet(url);
cluster.getSolrClient().getHttpClient().execute(createCollectionGet);
@ -148,11 +146,10 @@ public class TestPullReplica extends SolrCloudTestCase {
case 2:
// Sometimes use V2 API
url = cluster.getRandomJetty(random()).getBaseUrl().toString() + "/____v2/c";
String requestBody = String.format(Locale.ROOT, "{create:{name:%s, config:%s, numShards:%s, pullReplicas:%s, maxShardsPerNode:%s %s}}",
String requestBody = String.format(Locale.ROOT, "{create:{name:%s, config:%s, numShards:%s, pullReplicas:%s, %s}}",
collectionName, "conf",
2, // numShards
3, // pullReplicas
100, // maxShardsPerNode
pickRandom("", ", nrtReplicas:1", ", replicationFactor:1")); // These options should all mean the same
HttpPost createCollectionPost = new HttpPost(url);
createCollectionPost.setHeader("Content-type", "application/json");
@ -221,7 +218,6 @@ public class TestPullReplica extends SolrCloudTestCase {
public void testAddDocs() throws Exception {
int numPullReplicas = 1 + random().nextInt(3);
CollectionAdminRequest.createCollection(collectionName, "conf", 1, 1, 0, numPullReplicas)
.setMaxShardsPerNode(100)
.process(cluster.getSolrClient());
waitForState("Expected collection to be created with 1 shard and " + (numPullReplicas + 1) + " replicas", collectionName, clusterShape(1, numPullReplicas + 1));
DocCollection docCollection = assertNumberOfReplicas(1, 0, numPullReplicas, false, true);
@ -279,7 +275,6 @@ public class TestPullReplica extends SolrCloudTestCase {
public void testAddRemovePullReplica() throws Exception {
CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1, 0, 0)
.setMaxShardsPerNode(100)
.process(cluster.getSolrClient());
waitForState("Expected collection to be created with 2 shards and 1 replica each", collectionName, clusterShape(2, 2));
DocCollection docCollection = assertNumberOfReplicas(2, 0, 0, false, true);
@ -315,7 +310,6 @@ public class TestPullReplica extends SolrCloudTestCase {
public void testPullReplicaStates() throws Exception {
// Validate that pull replicas go through the correct states when starting, stopping, reconnecting
CollectionAdminRequest.createCollection(collectionName, "conf", 1, 1, 0, 0)
.setMaxShardsPerNode(100)
.process(cluster.getSolrClient());
// cluster.getSolrClient().getZkStateReader().registerCore(collectionName); //TODO: Is this needed?
waitForState("Replica not added", collectionName, activeReplicaCount(1, 0, 0));
@ -349,7 +343,6 @@ public class TestPullReplica extends SolrCloudTestCase {
// should be redirected to Replica.Type.NRT
int numReplicas = random().nextBoolean()?1:2;
CollectionAdminRequest.createCollection(collectionName, "conf", 1, numReplicas, 0, numReplicas)
.setMaxShardsPerNode(100)
.process(cluster.getSolrClient());
waitForState("Unexpected replica count", collectionName, activeReplicaCount(numReplicas, 0, numReplicas));
DocCollection docCollection = assertNumberOfReplicas(numReplicas, 0, numReplicas, false, true);
@ -394,7 +387,6 @@ public class TestPullReplica extends SolrCloudTestCase {
@SuppressWarnings({"try"})
private void doTestNoLeader(boolean removeReplica) throws Exception {
CollectionAdminRequest.createCollection(collectionName, "conf", 1, 1, 0, 1)
.setMaxShardsPerNode(100)
.process(cluster.getSolrClient());
waitForState("Expected collection to be created with 1 shard and 2 replicas", collectionName, clusterShape(1, 2));
DocCollection docCollection = assertNumberOfReplicas(1, 0, 1, false, true);
@ -500,7 +492,6 @@ public class TestPullReplica extends SolrCloudTestCase {
public void testKillPullReplica() throws Exception {
CollectionAdminRequest.createCollection(collectionName, "conf", 1, 1, 0, 1)
.setMaxShardsPerNode(100)
.process(cluster.getSolrClient());
// cluster.getSolrClient().getZkStateReader().registerCore(collectionName); //TODO: Is this needed?
waitForState("Expected collection to be created with 1 shard and 2 replicas", collectionName, clusterShape(1, 2));

View File

@ -129,7 +129,6 @@ public class TestPullReplicaErrorHandling extends SolrCloudTestCase {
public void testCantConnectToPullReplica() throws Exception {
int numShards = 2;
CollectionAdminRequest.createCollection(collectionName, "conf", numShards, 1, 0, 1)
.setMaxShardsPerNode(1)
.process(cluster.getSolrClient());
cluster.waitForActiveCollection(collectionName, numShards, numShards * 2);
addDocs(10);
@ -172,7 +171,6 @@ public void testCantConnectToPullReplica() throws Exception {
public void testCantConnectToLeader() throws Exception {
int numShards = 1;
CollectionAdminRequest.createCollection(collectionName, "conf", numShards, 1, 0, 1)
.setMaxShardsPerNode(1)
.process(cluster.getSolrClient());
cluster.waitForActiveCollection(collectionName, numShards, numShards * 2);
addDocs(10);
@ -208,7 +206,6 @@ public void testCantConnectToPullReplica() throws Exception {
public void testPullReplicaDisconnectsFromZooKeeper() throws Exception {
int numShards = 1;
CollectionAdminRequest.createCollection(collectionName, "conf", numShards, 1, 0, 1)
.setMaxShardsPerNode(1)
.process(cluster.getSolrClient());
addDocs(10);
DocCollection docCollection = assertNumberOfReplicas(numShards, 0, numShards, false, true);

View File

@ -68,7 +68,6 @@ public class TestRebalanceLeaders extends SolrCloudTestCase {
CollectionAdminResponse resp = CollectionAdminRequest.createCollection(COLLECTION_NAME, COLLECTION_NAME,
numShards, numReplicas, 0, 0)
.setMaxShardsPerNode((numShards * numReplicas) / numNodes + 1)
.process(cluster.getSolrClient());
assertEquals("Admin request failed; ", 0, resp.getStatus());
cluster.waitForActiveCollection(COLLECTION_NAME, numShards, numShards * numReplicas);

View File

@ -73,7 +73,6 @@ public class TestRequestForwarding extends SolrTestCaseJ4 {
private void createCollection(String name, String config) throws Exception {
CollectionAdminResponse response;
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(name,config,2,1);
create.setMaxShardsPerNode(1);
response = create.process(solrCluster.getSolrClient());
if (response.getStatus() != 0 || response.getErrorMessages() != null) {

View File

@ -141,7 +141,6 @@ public class TestSkipOverseerOperations extends SolrCloudTestCase {
.map(JettySolrRunner::getNodeName)
.collect(Collectors.joining(","))
)
.setMaxShardsPerNode(2)
.process(cluster.getSolrClient());
cluster.waitForActiveCollection(collection, 2, 4);

View File

@ -50,8 +50,7 @@ public class TestSolrCloudWithKerberosAlt extends SolrCloudTestCase {
private static final int numShards = 1;
private static final int numReplicas = 1;
private static final int maxShardsPerNode = 1;
private static final int nodeCount = (numShards*numReplicas + (maxShardsPerNode-1))/maxShardsPerNode;
private static final int nodeCount = numShards*numReplicas;
private static final String configName = "solrCloudCollectionConfig";
private static final String collectionName = "testkerberoscollection";
@ -122,7 +121,6 @@ public class TestSolrCloudWithKerberosAlt extends SolrCloudTestCase {
private void testCollectionCreateSearchDelete() throws Exception {
CloudSolrClient client = cluster.getSolrClient();
CollectionAdminRequest.createCollection(collectionName, configName, numShards, numReplicas)
.setMaxShardsPerNode(maxShardsPerNode)
.process(client);
cluster.waitForActiveCollection(collectionName, numShards, numShards * numReplicas);

View File

@ -183,7 +183,6 @@ public class TestSolrCloudWithSecureImpersonation extends SolrTestCaseJ4 {
return msp;
}
};
create.setMaxShardsPerNode(1);
response = create.process(solrCluster.getSolrClient());
miniCluster.waitForActiveCollection(name, 1, 1);

View File

@ -147,18 +147,16 @@ public class TestTlogReplica extends SolrCloudTestCase {
switch (random().nextInt(3)) {
case 0:
CollectionAdminRequest.createCollection(collectionName, "conf", 2, 0, 4, 0)
.setMaxShardsPerNode(100)
.process(cluster.getSolrClient());
cluster.waitForActiveCollection(collectionName, 2, 8);
break;
case 1:
// Sometimes don't use SolrJ
String url = String.format(Locale.ROOT, "%s/admin/collections?action=CREATE&name=%s&collection.configName=%s&numShards=%s&tlogReplicas=%s&maxShardsPerNode=%s",
String url = String.format(Locale.ROOT, "%s/admin/collections?action=CREATE&name=%s&collection.configName=%s&numShards=%s&tlogReplicas=%s",
cluster.getRandomJetty(random()).getBaseUrl(),
collectionName, "conf",
2, // numShards
4, // tlogReplicas
100); // maxShardsPerNode
4); // tlogReplicas
HttpGet createCollectionGet = new HttpGet(url);
HttpResponse httpResponse = cluster.getSolrClient().getHttpClient().execute(createCollectionGet);
assertEquals(200, httpResponse.getStatusLine().getStatusCode());
@ -167,11 +165,11 @@ public class TestTlogReplica extends SolrCloudTestCase {
case 2:
// Sometimes use V2 API
url = cluster.getRandomJetty(random()).getBaseUrl().toString() + "/____v2/c";
String requestBody = String.format(Locale.ROOT, "{create:{name:%s, config:%s, numShards:%s, tlogReplicas:%s, maxShardsPerNode:%s}}",
String requestBody = String.format(Locale.ROOT, "{create:{name:%s, config:%s, numShards:%s, tlogReplicas:%s}}",
collectionName, "conf",
2, // numShards
4, // tlogReplicas
100); // maxShardsPerNode
4); // tlogReplicas
HttpPost createCollectionPost = new HttpPost(url);
createCollectionPost.setHeader("Content-type", "application/json");
createCollectionPost.setEntity(new StringEntity(requestBody));
@ -323,7 +321,6 @@ public class TestTlogReplica extends SolrCloudTestCase {
int numReplicas = random().nextBoolean()?1:2;
int numNrtReplicas = random().nextBoolean()?0:2;
CollectionAdminRequest.createCollection(collectionName, "conf", 1, numNrtReplicas, numReplicas, 0)
.setMaxShardsPerNode(100)
.process(cluster.getSolrClient());
waitForState("Unexpected replica count", collectionName, activeReplicaCount(numNrtReplicas, numReplicas, 0));
DocCollection docCollection = assertNumberOfReplicas(numNrtReplicas, numReplicas, 0, false, true);
@ -748,7 +745,6 @@ public class TestTlogReplica extends SolrCloudTestCase {
private DocCollection createAndWaitForCollection(int numShards, int numNrtReplicas, int numTlogReplicas, int numPullReplicas) throws SolrServerException, IOException, KeeperException, InterruptedException {
CollectionAdminRequest.createCollection(collectionName, "conf", numShards, numNrtReplicas, numTlogReplicas, numPullReplicas)
.setMaxShardsPerNode(100)
.process(cluster.getSolrClient());
int numReplicasPerShard = numNrtReplicas + numTlogReplicas + numPullReplicas;
waitForState("Expected collection to be created with " + numShards + " shards and " + numReplicasPerShard + " replicas",

View File

@ -77,8 +77,7 @@ public class TestUtilizeNode extends SolrCloudTestCase {
CloudSolrClient cloudClient = cluster.getSolrClient();
log.info("Creating Collection...");
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(coll, "conf1", 2, 2)
.setMaxShardsPerNode(2);
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(coll, "conf1", 2, 2);
cloudClient.request(create);
log.info("Spinning up additional jettyX...");

View File

@ -65,7 +65,6 @@ public class ZkShardTermsTest extends SolrCloudTestCase {
// When new collection is created, the old term nodes will be removed
CollectionAdminRequest.createCollection(collection, 2, 2)
.setCreateNodeSet(cluster.getJettySolrRunner(0).getNodeName())
.setMaxShardsPerNode(1000)
.process(cluster.getSolrClient());
try (ZkShardTerms zkShardTerms = new ZkShardTerms(collection, "shard1", cluster.getZkClient())) {
waitFor(2, () -> zkShardTerms.getTerms().size());

View File

@ -121,14 +121,6 @@ public abstract class AbstractCloudBackupRestoreTestCase extends SolrCloudTestCa
CollectionAdminRequest.createCollectionWithImplicitRouter(getCollectionName(), "conf1", "shard1,shard2", replFactor, numTlogReplicas, numPullReplicas) :
CollectionAdminRequest.createCollection(getCollectionName(), "conf1", NUM_SHARDS, replFactor, numTlogReplicas, numPullReplicas);
if (random().nextBoolean()) {
create.setMaxShardsPerNode(-1);
} else if (doSplitShardOperation) {
create.setMaxShardsPerNode((int) Math.ceil(NUM_SPLIT_SHARDS * backupReplFactor / (double) cluster.getJettySolrRunners().size()));
} else if (NUM_SHARDS * (backupReplFactor) > cluster.getJettySolrRunners().size() || random().nextBoolean()) {
create.setMaxShardsPerNode((int) Math.ceil(NUM_SHARDS * backupReplFactor / (double) cluster.getJettySolrRunners().size()));//just to assert it survives the restoration
}
if (random().nextBoolean()) {
create.setAutoAddReplicas(true);//just to assert it survives the restoration
}
@ -178,10 +170,6 @@ public abstract class AbstractCloudBackupRestoreTestCase extends SolrCloudTestCa
CollectionAdminRequest.Create create =
CollectionAdminRequest.createCollection(getCollectionName(), "conf1", NUM_SHARDS, replFactor, numTlogReplicas, numPullReplicas);
if (NUM_SHARDS * (replFactor + numTlogReplicas + numPullReplicas) > cluster.getJettySolrRunners().size()) {
create.setMaxShardsPerNode((int)Math.ceil(NUM_SHARDS * (replFactor + numTlogReplicas + numPullReplicas) / cluster.getJettySolrRunners().size())); //just to assert it survives the restoration
}
CloudSolrClient solrClient = cluster.getSolrClient();
create.process(solrClient);
@ -208,10 +196,6 @@ public abstract class AbstractCloudBackupRestoreTestCase extends SolrCloudTestCa
{
CollectionAdminRequest.Restore restore = CollectionAdminRequest.restoreCollection(restoreCollectionName, backupName)
.setLocation(backupLocation).setRepositoryName(getBackupRepoName());
if (backupCollection.getReplicas().size() > cluster.getJettySolrRunners().size()) {
// may need to increase maxShardsPerNode (e.g. if it was shard split, then now we need more)
restore.setMaxShardsPerNode((int)Math.ceil(backupCollection.getReplicas().size()/cluster.getJettySolrRunners().size()));
}
restore.setConfigName("confFaulty");
assertEquals(RequestStatusState.FAILED, restore.processAndWait(solrClient, 30));
@ -339,8 +323,6 @@ public abstract class AbstractCloudBackupRestoreTestCase extends SolrCloudTestCa
int restoreReplFactor = restoreReplcationFactor + restoreTlogReplicas + restorePullReplicas;
boolean isMaxShardsPerNodeExternal = false;
boolean isMaxShardsUnlimited = false;
CollectionAdminRequest.Restore restore = CollectionAdminRequest.restoreCollection(restoreCollectionName, backupName)
.setLocation(backupLocation).setRepositoryName(getBackupRepoName());
@ -352,28 +334,9 @@ public abstract class AbstractCloudBackupRestoreTestCase extends SolrCloudTestCa
}
int computeRestoreMaxShardsPerNode = (int) Math.ceil((restoreReplFactor * numShards/(double) cluster.getJettySolrRunners().size()));
if (restoreReplFactor > backupReplFactor) { //else the backup maxShardsPerNode should be enough
if (log.isInfoEnabled()) {
log.info("numShards={} restoreReplFactor={} maxShardsPerNode={} totalNodes={}",
numShards, restoreReplFactor, computeRestoreMaxShardsPerNode, cluster.getJettySolrRunners().size());
}
if (random().nextBoolean()) { //set it to -1
isMaxShardsUnlimited = true;
restore.setMaxShardsPerNode(-1);
} else {
isMaxShardsPerNodeExternal = true;
restore.setMaxShardsPerNode(computeRestoreMaxShardsPerNode);
}
}
if (rarely()) { // Try with createNodeSet configuration
//Always 1 as cluster.getJettySolrRunners().size()=NUM_SHARDS=2
restore.setCreateNodeSet(cluster.getJettySolrRunners().get(0).getNodeName());
// we need to double maxShardsPerNode value since we reduced number of available nodes by half.
isMaxShardsPerNodeExternal = true;
computeRestoreMaxShardsPerNode = origShardToDocCount.size() * restoreReplFactor;
restore.setMaxShardsPerNode(computeRestoreMaxShardsPerNode);
}
final int restoreMaxShardsPerNode = computeRestoreMaxShardsPerNode;
@ -420,13 +383,6 @@ public abstract class AbstractCloudBackupRestoreTestCase extends SolrCloudTestCa
assertEquals(restoreCollection.toString(), restoreReplcationFactor, restoreCollection.getNumNrtReplicas().intValue());
assertEquals(restoreCollection.toString(), restorePullReplicas, restoreCollection.getNumPullReplicas().intValue());
assertEquals(restoreCollection.toString(), restoreTlogReplicas, restoreCollection.getNumTlogReplicas().intValue());
if (isMaxShardsPerNodeExternal) {
assertEquals(restoreCollectionName, restoreMaxShardsPerNode, restoreCollection.getMaxShardsPerNode());
} else if (isMaxShardsUnlimited){
assertEquals(restoreCollectionName, -1, restoreCollection.getMaxShardsPerNode());
} else {
assertEquals(restoreCollectionName, backupCollection.getMaxShardsPerNode(), restoreCollection.getMaxShardsPerNode());
}
//SOLR-12605: Add more docs after restore is complete to see if they are getting added fine
//explicitly querying the leaders. If we use CloudSolrClient there is no guarantee that we'll hit a nrtReplica

View File

@ -53,7 +53,6 @@ public class AsyncCallRequestStatusResponseTest extends SolrCloudTestCase {
int numShards = 4;
int numReplicas = 1;
Create createCollection = CollectionAdminRequest.createCollection("asynccall", "conf", numShards, numReplicas);
createCollection.setMaxShardsPerNode(100);
String asyncId =
createCollection.processAsync(cluster.getSolrClient());

View File

@ -21,8 +21,10 @@ import java.util.Map;
import java.util.stream.Collectors;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.solr.client.solrj.SolrRequest;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.cloud.CloudTestUtils;
import org.apache.solr.cloud.SolrCloudTestCase;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.Replica;
@ -49,53 +51,49 @@ public class CollectionTooManyReplicasTest extends SolrCloudTestCase {
@Test
public void testAddTooManyReplicas() throws Exception {
final String collectionName = "TooManyReplicasInSeveralFlavors";
CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
.setMaxShardsPerNode(1)
.process(cluster.getSolrClient());
// I have two replicas, one for each shard
// Curiously, I should be able to add a bunch of replicas if I specify the node, even more than maxShardsPerNode
// Just get the first node any way we can.
// Get a node to use for the "node" parameter.
String nodeName = getAllNodeNames(collectionName).get(0);
// Add a replica using the "node" parameter (no "too many replicas check")
// Add a replica using the "node" parameter
// this node should have 2 replicas on it
CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
.setNode(nodeName)
.withProperty("name", "bogus2")
.process(cluster.getSolrClient());
// Three replicas so far, should be able to create another one "normally"
CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
.process(cluster.getSolrClient());
// equivalent to maxShardsPerNode=1
String commands = "{ set-cluster-policy: [ {replica: '<2', shard: '#ANY', node: '#ANY', strict: true} ] }";
cluster.getSolrClient().request(CloudTestUtils.AutoScalingRequest.create(SolrRequest.METHOD.POST, commands));
// This one should fail though, no "node" parameter specified
// this should fail because the policy prevents it
Exception e = expectThrows(Exception.class, () -> {
CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
.setNode(nodeName)
.process(cluster.getSolrClient());
});
assertTrue(e.toString(), e.toString().contains("No node can satisfy"));
assertTrue("Should have gotten the right error message back",
e.getMessage().contains("given the current number of eligible live nodes"));
// Oddly, we should succeed next just because setting property.name will not check for nodes being "full up"
// TODO: Isn't this a bug?
// this should succeed because it places the replica on a different node
CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
.withProperty("name", "bogus2")
.setNode(nodeName)
.process(cluster.getSolrClient());
DocCollection collectionState = getCollectionState(collectionName);
Slice slice = collectionState.getSlice("shard1");
Replica replica = getRandomReplica(slice, r -> r.getCoreName().equals("bogus2"));
assertNotNull("Should have found a replica named 'bogus2'", replica);
assertEquals("Replica should have been put on correct core", nodeName, replica.getNodeName());
assertEquals("Replica should have been put on correct node", nodeName, replica.getNodeName());
// Shard1 should have 4 replicas
assertEquals("There should be 4 replicas for shard 1", 4, slice.getReplicas().size());
// Shard1 should have 2 replicas
assertEquals("There should be 3 replicas for shard 1", 3, slice.getReplicas().size());
// And let's fail one more time because to ensure that the math doesn't do weird stuff it we have more replicas
// than simple calcs would indicate.
@ -105,7 +103,7 @@ public class CollectionTooManyReplicasTest extends SolrCloudTestCase {
});
assertTrue("Should have gotten the right error message back",
e2.getMessage().contains("given the current number of eligible live nodes"));
e2.getMessage().contains("No node can satisfy"));
// wait for recoveries to finish, for a clean shutdown - see SOLR-9645
waitForState("Expected to see all replicas active", collectionName, (n, c) -> {
@ -119,10 +117,12 @@ public class CollectionTooManyReplicasTest extends SolrCloudTestCase {
@Test
public void testAddShard() throws Exception {
// equivalent to maxShardsPerNode=2
String commands = "{ set-cluster-policy: [ {replica: '<3', shard: '#ANY', node: '#ANY', strict: true} ] }";
cluster.getSolrClient().request(CloudTestUtils.AutoScalingRequest.create(SolrRequest.METHOD.POST, commands));
String collectionName = "TooManyReplicasWhenAddingShards";
CollectionAdminRequest.createCollectionWithImplicitRouter(collectionName, "conf", "shardstart", 2)
.setMaxShardsPerNode(2)
.process(cluster.getSolrClient());
// We have two nodes, maxShardsPerNode is set to 2. Therefore, we should be able to add 2 shards each with
@ -140,38 +140,40 @@ public class CollectionTooManyReplicasTest extends SolrCloudTestCase {
.process(cluster.getSolrClient());
});
assertTrue("Should have gotten the right error message back",
e.getMessage().contains("given the current number of eligible live nodes"));
e.getMessage().contains("No node can satisfy the rules"));
// Hmmm, providing a nodeset also overrides the checks for max replicas, so prove it.
List<String> nodes = getAllNodeNames(collectionName);
CollectionAdminRequest.createShard(collectionName, "shard4")
.setNodeSet(String.join(",", nodes))
.process(cluster.getSolrClient());
// And just for yucks, insure we fail the "regular" one again.
Exception e2 = expectThrows(Exception.class, () -> {
CollectionAdminRequest.createShard(collectionName, "shard4")
.setNodeSet(String.join(",", nodes))
.process(cluster.getSolrClient());
});
assertTrue("Should have gotten the right error message back",
e2.getMessage().contains("No node can satisfy the rules"));
// // And just for yucks, insure we fail the "regular" one again.
Exception e3 = expectThrows(Exception.class, () -> {
CollectionAdminRequest.createShard(collectionName, "shard5")
.process(cluster.getSolrClient());
});
assertTrue("Should have gotten the right error message back",
e2.getMessage().contains("given the current number of eligible live nodes"));
e3.getMessage().contains("No node can satisfy the rules"));
// And finally, ensure that there are all the replicas we expect. We should have shards 1, 2 and 4 and each
// should have exactly two replicas
waitForState("Expected shards shardstart, 1, 2 and 4, each with two active replicas", collectionName, (n, c) -> {
return DocCollection.isFullyActive(n, c, 4, 2);
waitForState("Expected shards shardstart, 1, 2, each with two active replicas", collectionName, (n, c) -> {
return DocCollection.isFullyActive(n, c, 3, 2);
});
Map<String, Slice> slices = getCollectionState(collectionName).getSlicesMap();
assertEquals("There should be exaclty four slices", slices.size(), 4);
assertEquals("There should be exaclty three slices", slices.size(), 3);
assertNotNull("shardstart should exist", slices.get("shardstart"));
assertNotNull("shard1 should exist", slices.get("shard1"));
assertNotNull("shard2 should exist", slices.get("shard2"));
assertNotNull("shard4 should exist", slices.get("shard4"));
assertEquals("Shardstart should have exactly 2 replicas", 2, slices.get("shardstart").getReplicas().size());
assertEquals("Shard1 should have exactly 2 replicas", 2, slices.get("shard1").getReplicas().size());
assertEquals("Shard2 should have exactly 2 replicas", 2, slices.get("shard2").getReplicas().size());
assertEquals("Shard4 should have exactly 2 replicas", 2, slices.get("shard4").getReplicas().size());
}
@ -179,7 +181,6 @@ public class CollectionTooManyReplicasTest extends SolrCloudTestCase {
public void testDownedShards() throws Exception {
String collectionName = "TooManyReplicasWhenAddingDownedNode";
CollectionAdminRequest.createCollectionWithImplicitRouter(collectionName, "conf", "shardstart", 1)
.setMaxShardsPerNode(2)
.process(cluster.getSolrClient());
// Shut down a Jetty, I really don't care which

View File

@ -224,16 +224,6 @@ public class CollectionsAPIDistributedZkTest extends SolrCloudTestCase {
});
}
@Test
public void testTooManyReplicas() {
@SuppressWarnings({"rawtypes"})
CollectionAdminRequest req = CollectionAdminRequest.createCollection("collection", "conf", 2, 10);
expectThrows(Exception.class, () -> {
cluster.getSolrClient().request(req);
});
}
@Test
public void testMissingNumShards() {
// No numShards should fail
@ -353,18 +343,6 @@ public class CollectionsAPIDistributedZkTest extends SolrCloudTestCase {
assertEquals("conf2", configName);
}
@Test
public void testMaxNodesPerShard() {
int numLiveNodes = cluster.getJettySolrRunners().size();
int numShards = (numLiveNodes/2) + 1;
int replicationFactor = 2;
expectThrows(SolrException.class, () -> {
CollectionAdminRequest.createCollection("oversharded", "conf", numShards, replicationFactor)
.process(cluster.getSolrClient());
});
}
@Test
public void testCreateNodeSet() throws Exception {
JettySolrRunner jetty1 = cluster.getRandomJetty(random());
@ -413,11 +391,9 @@ public class CollectionsAPIDistributedZkTest extends SolrCloudTestCase {
int numShards = TestUtil.nextInt(random(), 0, cluster.getJettySolrRunners().size()) + 1;
int replicationFactor = TestUtil.nextInt(random(), 0, 3) + 1;
int maxShardsPerNode = (((numShards * replicationFactor) / cluster.getJettySolrRunners().size())) + 1;
createRequests[i]
= CollectionAdminRequest.createCollection("awhollynewcollection_" + i, "conf2", numShards, replicationFactor)
.setMaxShardsPerNode(maxShardsPerNode);
= CollectionAdminRequest.createCollection("awhollynewcollection_" + i, "conf2", numShards, replicationFactor);
createRequests[i].processAsync(cluster.getSolrClient());
Coll coll = new Coll();
@ -613,7 +589,6 @@ public class CollectionsAPIDistributedZkTest extends SolrCloudTestCase {
String collectionName = "addReplicaColl";
CollectionAdminRequest.createCollection(collectionName, "conf", 2, 2)
.setMaxShardsPerNode(4)
.process(cluster.getSolrClient());
cluster.waitForActiveCollection(collectionName, 2, 4);

View File

@ -71,8 +71,7 @@ public class ConcurrentCreateCollectionTest extends SolrCloudTestCase {
private CollectionAdminRequest.Create createCollectionRequest(String cname, int numShards, int numReplicas) throws Exception {
CollectionAdminRequest.Create creq = CollectionAdminRequest
// .createCollection(cname, "conf", NODES - 1, NODES - 1)
.createCollection(cname, "conf", numShards, numReplicas)
.setMaxShardsPerNode(100);
.createCollection(cname, "conf", numShards, numReplicas);
creq.setWaitForFinalState(true);
creq.setAutoAddReplicas(true);
return creq;
@ -135,8 +134,7 @@ public class ConcurrentCreateCollectionTest extends SolrCloudTestCase {
String cname = "STARTCOLLECTION";
CollectionAdminRequest.Create creq = CollectionAdminRequest
// .createCollection(cname, "conf", NODES - 1, NODES - 1)
.createCollection(cname, "conf", unbalancedSize, 1)
.setMaxShardsPerNode(100);
.createCollection(cname, "conf", unbalancedSize, 1);
creq.setWaitForFinalState(true);
// creq.setAutoAddReplicas(true);
if (useCollectionPolicy) { creq.setPolicy("policy1"); }

View File

@ -31,7 +31,6 @@ import org.junit.BeforeClass;
import org.junit.Test;
import static org.apache.solr.common.cloud.DocCollection.DOC_ROUTER;
import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
import static org.apache.solr.common.params.ShardParams._ROUTE_;
@ -61,16 +60,13 @@ public class CustomCollectionTest extends SolrCloudTestCase {
final String collection = "implicitcoll";
int replicationFactor = TestUtil.nextInt(random(), 0, 3) + 2;
int numShards = 3;
int maxShardsPerNode = (((numShards + 1) * replicationFactor) / NODE_COUNT) + 1;
CollectionAdminRequest.createCollectionWithImplicitRouter(collection, "conf", "a,b,c", replicationFactor)
.setMaxShardsPerNode(maxShardsPerNode)
.process(cluster.getSolrClient());
DocCollection coll = getCollectionState(collection);
assertEquals("implicit", ((Map) coll.get(DOC_ROUTER)).get("name"));
assertNotNull(coll.getStr(REPLICATION_FACTOR));
assertNotNull(coll.getStr(MAX_SHARDS_PER_NODE));
assertNull("A shard of a Collection configured with implicit router must have null range",
coll.getSlice("a").getRange());
@ -126,13 +122,11 @@ public class CustomCollectionTest extends SolrCloudTestCase {
int numShards = 4;
int replicationFactor = TestUtil.nextInt(random(), 0, 3) + 2;
int maxShardsPerNode = ((numShards * replicationFactor) / NODE_COUNT) + 1;
String shard_fld = "shard_s";
final String collection = "withShardField";
CollectionAdminRequest.createCollectionWithImplicitRouter(collection, "conf", "a,b,c,d", replicationFactor)
.setMaxShardsPerNode(maxShardsPerNode)
.setRouterField(shard_fld)
.process(cluster.getSolrClient());
@ -154,11 +148,9 @@ public class CustomCollectionTest extends SolrCloudTestCase {
String collectionName = "routeFieldColl";
int numShards = 4;
int replicationFactor = 2;
int maxShardsPerNode = ((numShards * replicationFactor) / NODE_COUNT) + 1;
String shard_fld = "shard_s";
CollectionAdminRequest.createCollection(collectionName, "conf", numShards, replicationFactor)
.setMaxShardsPerNode(maxShardsPerNode)
.setRouterField(shard_fld)
.process(cluster.getSolrClient());

View File

@ -75,7 +75,6 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.solr.common.cloud.ZkStateReader.BASE_URL_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
@Slow
@ -137,7 +136,6 @@ public class ShardSplitTest extends BasicDistributedZkTest {
String collectionName = "testSplitStaticIndexReplication_" + splitMethod.toLower();
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, "conf1", 1, 1);
create.setMaxShardsPerNode(5); // some high number so we can create replicas without hindrance
create.setCreateNodeSet(nodeName); // we want to create the leader on a fixed node so that we know which one to restart later
create.process(cloudClient);
@ -356,7 +354,6 @@ public class ShardSplitTest extends BasicDistributedZkTest {
waitForThingsToLevelOut(15, TimeUnit.SECONDS);
String collectionName = "testSplitMixedReplicaTypes_" + splitMethod.toLower();
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, "conf1", 1, 2, 0, 2); // TODO tlog replicas disabled right now.
create.setMaxShardsPerNode(5); // some high number so we can create replicas without hindrance
create.process(cloudClient);
cloudClient.waitForState(collectionName, 30, TimeUnit.SECONDS, SolrCloudTestCase.activeClusterShape(1, 4));
@ -567,7 +564,6 @@ public class ShardSplitTest extends BasicDistributedZkTest {
waitForThingsToLevelOut(15, TimeUnit.SECONDS);
String collectionName = "testSplitLocking";
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, "conf1", 1, 2);
create.setMaxShardsPerNode(5); // some high number so we can create replicas without hindrance
create.process(cloudClient);
cloudClient.waitForState(collectionName, 30, TimeUnit.SECONDS, SolrCloudTestCase.activeClusterShape(1, 2));
@ -796,15 +792,12 @@ public class ShardSplitTest extends BasicDistributedZkTest {
String collectionName = "routeFieldColl";
int numShards = 4;
int replicationFactor = 2;
int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrClient()
.getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
HashMap<String, List<Integer>> collectionInfos = new HashMap<>();
String shard_fld = "shard_s";
try (CloudSolrClient client = createCloudClient(null)) {
Map<String, Object> props = Utils.makeMap(
REPLICATION_FACTOR, replicationFactor,
MAX_SHARDS_PER_NODE, maxShardsPerNode,
OverseerCollectionMessageHandler.NUM_SLICES, numShards,
"router.field", shard_fld);
@ -858,15 +851,12 @@ public class ShardSplitTest extends BasicDistributedZkTest {
String collectionName = "splitByRouteKeyTest";
int numShards = 4;
int replicationFactor = 2;
int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrClient()
.getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
HashMap<String, List<Integer>> collectionInfos = new HashMap<>();
try (CloudSolrClient client = createCloudClient(null)) {
Map<String, Object> props = Utils.makeMap(
REPLICATION_FACTOR, replicationFactor,
MAX_SHARDS_PER_NODE, maxShardsPerNode,
OverseerCollectionMessageHandler.NUM_SLICES, numShards);
createCollection(collectionInfos, collectionName,props,client);

View File

@ -154,7 +154,6 @@ public class SplitByPrefixTest extends SolrCloudTestCase {
CollectionAdminRequest
.createCollection(COLLECTION_NAME, "conf", 1, 1)
.setMaxShardsPerNode(100)
.process(cluster.getSolrClient());
cluster.waitForActiveCollection(COLLECTION_NAME, 1, 1);

View File

@ -69,11 +69,10 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
} else {
req = CollectionAdminRequest.createCollection(COLLECTION_NAME, "conf1",2, 1, 0, 1);
}
req.setMaxShardsPerNode(2);
setV2(req);
client.request(req);
assertV2CallsCount();
createCollection(null, COLLECTION_NAME1, 1, 1, 1, client, null, "conf1");
createCollection(null, COLLECTION_NAME1, 1, 1, client, null, "conf1");
}
waitForCollection(cloudClient.getZkStateReader(), COLLECTION_NAME, 2);
@ -93,7 +92,6 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
replicaPropTest();
clusterStatusZNodeVersion();
testCollectionCreationCollectionNameValidation();
testCollectionCreationTooManyShards();
testReplicationFactorValidaton();
testCollectionCreationShardNameValidation();
testAliasCreationNameValidation();
@ -102,31 +100,6 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
testModifyCollection(); // deletes replicationFactor property from collections, be careful adding new tests after this one!
}
private void testCollectionCreationTooManyShards() throws Exception {
try (CloudSolrClient client = createCloudClient(null)) {
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("action", CollectionParams.CollectionAction.CREATE.toString());
params.set("name", "collection_too_many");
params.set("router.name", "implicit");
params.set("numShards", "10");
params.set("maxShardsPerNode", 1);
params.set("shards", "b0,b1,b2,b3,b4,b5,b6,b7,b8,b9");
@SuppressWarnings({"rawtypes"})
SolrRequest request = new QueryRequest(params);
request.setPath("/admin/collections");
try {
client.request(request);
fail("A collection creation request with too many shards than allowed by maxShardsPerNode should not have succeeded");
} catch (BaseHttpSolrClient.RemoteSolrException e) {
final String errorMessage = e.getMessage();
assertTrue(errorMessage.contains("Cannot create collection"));
assertTrue(errorMessage.contains("This requires 10 shards to be created (higher than the allowed number)"));
assertMissingCollection(client, "collection_too_many");
}
}
}
private void assertMissingCollection(CloudSolrClient client, String collectionName) throws Exception {
ClusterState clusterState = client.getZkStateReader().getClusterState();
assertNull(clusterState.getCollectionOrNull(collectionName));
@ -441,7 +414,7 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
private void clusterStatusZNodeVersion() throws Exception {
String cname = "clusterStatusZNodeVersion";
try (CloudSolrClient client = createCloudClient(null)) {
setV2(CollectionAdminRequest.createCollection(cname, "conf1", 1, 1).setMaxShardsPerNode(1)).process(client);
setV2(CollectionAdminRequest.createCollection(cname, "conf1", 1, 1)).process(client);
assertV2CallsCount();
waitForRecoveriesToFinish(cname, true);

View File

@ -56,7 +56,6 @@ public class TestCollectionsAPIViaSolrCloudCluster extends SolrCloudTestCase {
private static final int numShards = 2;
private static final int numReplicas = 2;
private static final int maxShardsPerNode = 1;
private static final int nodeCount = 5;
private static final String configName = "solrCloudCollectionConfig";
private static final Map<String,String> collectionProperties // ensure indexes survive core shutdown
@ -77,14 +76,12 @@ public class TestCollectionsAPIViaSolrCloudCluster extends SolrCloudTestCase {
private void createCollection(String collectionName, String createNodeSet) throws Exception {
if (random().nextBoolean()) { // process asynchronously
CollectionAdminRequest.createCollection(collectionName, configName, numShards, numReplicas)
.setMaxShardsPerNode(maxShardsPerNode)
.setCreateNodeSet(createNodeSet)
.setProperties(collectionProperties)
.processAndWait(cluster.getSolrClient(), 30);
}
else {
CollectionAdminRequest.createCollection(collectionName, configName, numShards, numReplicas)
.setMaxShardsPerNode(maxShardsPerNode)
.setCreateNodeSet(createNodeSet)
.setProperties(collectionProperties)
.process(cluster.getSolrClient());

View File

@ -52,12 +52,12 @@ public class TestReplicaProperties extends ReplicaPropertiesBase {
try (CloudSolrClient client = createCloudClient(null)) {
// Mix up a bunch of different combinations of shards and replicas in order to exercise boundary cases.
// shards, replicationfactor, maxreplicaspernode
// shards, replicationfactor
int shards = random().nextInt(7);
if (shards < 2) shards = 2;
int rFactor = random().nextInt(4);
if (rFactor < 2) rFactor = 2;
createCollection(null, COLLECTION_NAME, shards, rFactor, shards * rFactor + 1, client, null, "conf1");
createCollection(null, COLLECTION_NAME, shards, rFactor, client, null, "conf1");
}
waitForCollection(cloudClient.getZkStateReader(), COLLECTION_NAME, 2);

View File

@ -49,7 +49,6 @@ public class TestRequestStatusCollectionAPI extends BasicDistributedZkTest {
params.set("numShards", numShards);
int replicationFactor = 1;
params.set("replicationFactor", replicationFactor);
params.set("maxShardsPerNode", 100);
params.set("collection.configName", "conf1");
params.set(CommonAdminParams.ASYNC, "1000");
try {
@ -130,7 +129,6 @@ public class TestRequestStatusCollectionAPI extends BasicDistributedZkTest {
params.set("name", "collection2");
params.set("numShards", 2);
params.set("replicationFactor", 1);
params.set("maxShardsPerNode", 100);
params.set("collection.configName", "conf1");
params.set(CommonAdminParams.ASYNC, "1002");
try {
@ -158,7 +156,6 @@ public class TestRequestStatusCollectionAPI extends BasicDistributedZkTest {
params.set("name", "collection3");
params.set("numShards", 1);
params.set("replicationFactor", 1);
params.set("maxShardsPerNode", 100);
params.set("collection.configName", "conf1");
params.set(CommonAdminParams.ASYNC, "1002");
try {

View File

@ -108,7 +108,6 @@ public class AutoAddReplicasIntegrationTest extends SolrCloudTestCase {
CollectionAdminRequest.createCollection(COLLECTION, "conf", 2, 2)
.setCreateNodeSet(jetty1.getNodeName()+","+jetty2.getNodeName())
.setAutoAddReplicas(true)
.setMaxShardsPerNode(2)
.process(cluster.getSolrClient());
cluster.waitForActiveCollection(COLLECTION, 2, 4);
@ -162,7 +161,6 @@ public class AutoAddReplicasIntegrationTest extends SolrCloudTestCase {
CollectionAdminRequest.createCollection(COLLECTION, "conf", 2, 2)
.setCreateNodeSet(jetty1.getNodeName()+","+jetty2.getNodeName())
.setAutoAddReplicas(true)
.setMaxShardsPerNode(2)
.process(cluster.getSolrClient());
cluster.waitForActiveCollection(COLLECTION, 2, 4);
@ -221,7 +219,6 @@ public class AutoAddReplicasIntegrationTest extends SolrCloudTestCase {
CollectionAdminRequest.createCollection(COLLECTION, "conf", 2, 2)
.setCreateNodeSet(jetty1.getNodeName()+","+jetty2.getNodeName())
.setAutoAddReplicas(false) // NOTE: false
.setMaxShardsPerNode(2)
.process(cluster.getSolrClient());
cluster.waitForActiveCollection(COLLECTION, 2, 4);
@ -298,7 +295,6 @@ public class AutoAddReplicasIntegrationTest extends SolrCloudTestCase {
CollectionAdminRequest.createCollection(COLLECTION, "conf", 2, 2)
.setCreateNodeSet(jetty1.getNodeName()+","+jetty2.getNodeName())
.setAutoAddReplicas(false) // NOTE: false
.setMaxShardsPerNode(2)
.process(cluster.getSolrClient());
if (log.isInfoEnabled()) {
@ -310,7 +306,6 @@ public class AutoAddReplicasIntegrationTest extends SolrCloudTestCase {
CollectionAdminRequest.createCollection(ALT_COLLECTION, "conf", 2, 2)
.setCreateNodeSet(jetty1.getNodeName()+","+jetty2.getNodeName())
.setAutoAddReplicas(true) // NOTE: true
.setMaxShardsPerNode(2)
.process(cluster.getSolrClient());
cluster.waitForActiveCollection(COLLECTION, 2, 4);

View File

@ -88,18 +88,15 @@ public class AutoAddReplicasPlanActionTest extends SolrCloudTestCase{
CollectionAdminRequest.createCollection(collection1, "conf", 2, 2)
.setCreateNodeSet(jetty1.getNodeName()+","+jetty2.getNodeName())
.setAutoAddReplicas(true)
.setMaxShardsPerNode(2)
.process(cluster.getSolrClient());
CollectionAdminRequest.createCollection(collection2, "conf", 1, 2)
.setCreateNodeSet(jetty2.getNodeName()+","+jetty3.getNodeName())
.setAutoAddReplicas(false)
.setMaxShardsPerNode(1)
.process(cluster.getSolrClient());
// the number of cores in jetty1 (6) will be larger than jetty3 (1)
CollectionAdminRequest.createCollection(collection3, "conf", 3, 1)
.setCreateNodeSet(jetty1.getNodeName())
.setAutoAddReplicas(false)
.setMaxShardsPerNode(3)
.process(cluster.getSolrClient());
cluster.waitForActiveCollection(collection1, 2, 4);

View File

@ -109,7 +109,6 @@ public class AutoScalingHandlerTest extends SolrCloudTestCase {
CloudSolrClient solrClient = cluster.getSolrClient();
String COLLNAME = "testSuggestionsWithPayload.COLL";
CollectionAdminResponse adminResponse = CollectionAdminRequest.createCollection(COLLNAME, CONFIGSET_NAME, 1, 2)
.setMaxShardsPerNode(4)
.process(solrClient);
cluster.waitForActiveCollection(COLLNAME, 1, 2);
DocCollection collection = solrClient.getClusterStateProvider().getCollection(COLLNAME);
@ -143,7 +142,6 @@ public class AutoScalingHandlerTest extends SolrCloudTestCase {
CloudSolrClient solrClient = cluster.getSolrClient();
String COLLNAME = "testDiagnosticsWithPayload.COLL";
CollectionAdminResponse adminResponse = CollectionAdminRequest.createCollection(COLLNAME, CONFIGSET_NAME, 1, 2)
.setMaxShardsPerNode(4)
.process(solrClient);
cluster.waitForActiveCollection(COLLNAME, 1, 2);
DocCollection collection = solrClient.getClusterStateProvider().getCollection(COLLNAME);
@ -863,8 +861,7 @@ public class AutoScalingHandlerTest extends SolrCloudTestCase {
assertEquals(response.get("result").toString(), "success");
// lets create a collection which violates the rule replicas < 2
CollectionAdminRequest.Create create = CollectionAdminRequest.Create.createCollection("readApiTestViolations", CONFIGSET_NAME, 1, 6)
.setMaxShardsPerNode(3);
CollectionAdminRequest.Create create = CollectionAdminRequest.Create.createCollection("readApiTestViolations", CONFIGSET_NAME, 1, 6);
CollectionAdminResponse adminResponse = create.process(solrClient);
cluster.waitForActiveCollection("readApiTestViolations", 1, 6);
assertTrue(adminResponse.isSuccess());

View File

@ -287,7 +287,6 @@ public class ComputePlanActionTest extends SolrCloudTestCase {
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection("testNodeWithMultipleReplicasLost",
"conf", 2, 3);
create.setMaxShardsPerNode(2);
create.process(solrClient);
cluster.waitForActiveCollection("testNodeWithMultipleReplicasLost", 2, 6);
@ -372,7 +371,7 @@ public class ComputePlanActionTest extends SolrCloudTestCase {
assertEquals(response.get("result").toString(), "success");
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection("testNodeAdded",
"conf",1, 2).setMaxShardsPerNode(2);
"conf",1, 2);
create.process(solrClient);
waitForState("Timed out waiting for replicas of new collection to be active",
@ -666,7 +665,7 @@ public class ComputePlanActionTest extends SolrCloudTestCase {
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionNamePrefix + "_0",
"conf", numShards, nNrtReplicas, nTlogReplicas, nPullReplicas).setMaxShardsPerNode(2);
"conf", numShards, nNrtReplicas, nTlogReplicas, nPullReplicas);
create.process(solrClient);
waitForState("Timed out waiting for replicas of new collection to be active",
@ -695,7 +694,7 @@ public class ComputePlanActionTest extends SolrCloudTestCase {
for (int i = 1; i < numCollections; i++) {
create = CollectionAdminRequest.createCollection(collectionNamePrefix + "_" + i,
"conf", numShards, 2).setMaxShardsPerNode(numShards * 2);
"conf", numShards, 2);
create.process(solrClient);
waitForState("Timed out waiting for replicas of new collection to be active",
@ -767,7 +766,7 @@ public class ComputePlanActionTest extends SolrCloudTestCase {
String newNodeName = newNode.getNodeName();
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionNamePrefix + "_0",
"conf", numShards, 2).setMaxShardsPerNode(numShards * 2);
"conf", numShards, 2);
create.process(solrClient);
waitForState("Timed out waiting for replicas of new collection to be active",

View File

@ -128,7 +128,6 @@ public class ExecutePlanActionTest extends SolrCloudTestCase {
String collectionName = "testExecute";
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName,
"conf", 1, 2);
create.setMaxShardsPerNode(1);
create.process(solrClient);
cluster.waitForActiveCollection(collectionName, 1, 2);
@ -225,7 +224,6 @@ public class ExecutePlanActionTest extends SolrCloudTestCase {
String collectionName = "testIntegration";
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName,
"conf", 1, 2);
create.setMaxShardsPerNode(1);
create.process(solrClient);
cluster.waitForActiveCollection(collectionName, 1, 2);
@ -292,7 +290,6 @@ public class ExecutePlanActionTest extends SolrCloudTestCase {
String collectionName = "testTaskTimeout";
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName,
"conf", 1, 2);
create.setMaxShardsPerNode(1);
create.process(solrClient);
cluster.waitForActiveCollection(collectionName, 1, 2);
@ -351,7 +348,6 @@ public class ExecutePlanActionTest extends SolrCloudTestCase {
String collectionName = "testTaskFail";
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName,
"conf", 1, 2);
create.setMaxShardsPerNode(1);
create.process(solrClient);
cluster.waitForActiveCollection(collectionName, 1, 2);

View File

@ -124,7 +124,7 @@ public class IndexSizeTriggerMixedBoundsTest extends SolrCloudTestCase {
public void testMixedBounds() throws Exception {
String collectionName = "testMixedBounds_collection";
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName,
"conf", 2, 2).setMaxShardsPerNode(2);
"conf", 2, 2);
create.process(solrClient);
CloudUtil.waitForState(cloudManager, "failed to create " + collectionName, collectionName,
CloudUtil.clusterShape(2, 2, false, true));

View File

@ -136,7 +136,7 @@ public class IndexSizeTriggerSizeEstimationTest extends SolrCloudTestCase {
public void testEstimatedIndexSize() throws Exception {
String collectionName = "testEstimatedIndexSize_collection";
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName,
"conf", 2, 2).setMaxShardsPerNode(2);
"conf", 2, 2);
create.process(solrClient);
CloudUtil.waitForState(cloudManager, "failed to create " + collectionName, collectionName,

View File

@ -149,7 +149,7 @@ public class IndexSizeTriggerTest extends SolrCloudTestCase {
public void testTrigger() throws Exception {
String collectionName = "testTrigger_collection";
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName,
"conf", 2, 2).setMaxShardsPerNode(2);
"conf", 2, 2);
create.process(solrClient);
if (SPEED == 1) {
@ -261,7 +261,7 @@ public class IndexSizeTriggerTest extends SolrCloudTestCase {
public void testSplitIntegration() throws Exception {
String collectionName = "testSplitIntegration_collection";
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName,
"conf", 2, 2).setMaxShardsPerNode(2);
"conf", 2, 2);
create.process(solrClient);
if (SPEED == 1) {
@ -387,7 +387,7 @@ public class IndexSizeTriggerTest extends SolrCloudTestCase {
public void testMergeIntegration() throws Exception {
String collectionName = "testMergeIntegration_collection";
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName,
"conf", 2, 2).setMaxShardsPerNode(2);
"conf", 2, 2);
create.process(solrClient);
if (SPEED == 1) {
@ -506,7 +506,7 @@ public class IndexSizeTriggerTest extends SolrCloudTestCase {
public void testMaxOps() throws Exception {
String collectionName = "testMaxOps_collection";
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName,
"conf", 5, 2).setMaxShardsPerNode(10);
"conf", 5, 2);
create.process(solrClient);
CloudUtil.waitForState(cloudManager, "failed to create " + collectionName, collectionName,
@ -650,7 +650,7 @@ public class IndexSizeTriggerTest extends SolrCloudTestCase {
public void testSplitConfig() throws Exception {
String collectionName = "testSplitConfig_collection";
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName,
"conf", 2, 2).setMaxShardsPerNode(2);
"conf", 2, 2);
create.process(solrClient);
CloudUtil.waitForState(cloudManager, "failed to create " + collectionName, collectionName,
CloudUtil.clusterShape(2, 2, false, true));

View File

@ -84,7 +84,7 @@ public class MetricTriggerIntegrationTest extends SolrCloudTestCase {
String collectionName = "testMetricTrigger";
CloudSolrClient solrClient = cluster.getSolrClient();
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName,
"conf", 2, 2).setMaxShardsPerNode(2);
"conf", 2, 2);
create.process(solrClient);
solrClient.setDefaultCollection(collectionName);

View File

@ -51,7 +51,6 @@ public class MetricTriggerTest extends SolrCloudTestCase {
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(DEFAULT_TEST_COLLECTION_NAME,
"conf", 1, 1);
CloudSolrClient solrClient = cluster.getSolrClient();
create.setMaxShardsPerNode(1);
create.process(solrClient);
cluster.waitForActiveCollection(DEFAULT_TEST_COLLECTION_NAME, 1, 1);
}

View File

@ -84,7 +84,7 @@ public class ScheduledTriggerIntegrationTest extends SolrCloudTestCase {
// this collection will place 2 cores on 1st node and 1 core on 2nd node
String collectionName = "testScheduledTrigger";
CollectionAdminRequest.createCollection(collectionName, 1, 3)
.setMaxShardsPerNode(5).process(solrClient);
.process(solrClient);
cluster.waitForActiveCollection(collectionName, 1, 3);

View File

@ -100,11 +100,9 @@ public class SearchRateTriggerTest extends SolrCloudTestCase {
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(COLL1,
"conf", 2, 2);
CloudSolrClient solrClient = cluster.getSolrClient();
create.setMaxShardsPerNode(1);
create.process(solrClient);
create = CollectionAdminRequest.createCollection(COLL2,
"conf", 2, 2);
create.setMaxShardsPerNode(1);
create.process(solrClient);
CloudUtil.waitForState(cloudManager, COLL1, 60, TimeUnit.SECONDS, clusterShape(2, 2));
@ -238,7 +236,6 @@ public class SearchRateTriggerTest extends SolrCloudTestCase {
TimeSource timeSource = cloudManager.getTimeSource();
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(COLL1,
"conf", 2, 2);
create.setMaxShardsPerNode(1);
create.process(solrClient);
CloudUtil.waitForState(cloudManager, COLL1, 60, TimeUnit.SECONDS, clusterShape(2, 4));

View File

@ -129,7 +129,6 @@ public class SystemLogListenerTest extends SolrCloudTestCase {
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection("test",
"conf",3, 2);
create.setMaxShardsPerNode(3);
create.process(solrClient);
waitForState("Timed out waiting for replicas of new collection to be active",

View File

@ -424,7 +424,6 @@ public class TestPolicyCloud extends SolrCloudTestCase {
final String collectionName = "addshard_with_reptype_using_policy";
CollectionAdminRequest.createCollectionWithImplicitRouter(collectionName, "conf", "s1", 1, 1, 1)
.setMaxShardsPerNode(-1)
.process(cluster.getSolrClient());
cluster.waitForActiveCollection(collectionName, 1, 3);

View File

@ -224,7 +224,6 @@ public class TestSimComputePlanAction extends SimSolrCloudTestCase {
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection("testNodeWithMultipleReplicasLost",
"conf",2, 3);
create.setMaxShardsPerNode(2);
create.process(solrClient);
CloudUtil.waitForState(cluster, "Timed out waiting for replicas of new collection to be active",
@ -314,7 +313,7 @@ public class TestSimComputePlanAction extends SimSolrCloudTestCase {
assertAutoscalingUpdateComplete();
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection("testNodeAdded",
"conf",1, 4).setMaxShardsPerNode(-1);
"conf",1, 4);
create.process(solrClient);
CloudUtil.waitForState(cluster, "Timed out waiting for replicas of new collection to be active",

View File

@ -94,7 +94,6 @@ public class TestSimExecutePlanAction extends SimSolrCloudTestCase {
String collectionName = "testExecute";
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName,
"conf", 1, 2);
create.setMaxShardsPerNode(1);
create.process(solrClient);
if (log.isInfoEnabled()) {
@ -192,7 +191,6 @@ public class TestSimExecutePlanAction extends SimSolrCloudTestCase {
String collectionName = "testIntegration";
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName,
"conf", 1, 2);
create.setMaxShardsPerNode(1);
create.process(solrClient);
CloudUtil.waitForState(cluster, "Timed out waiting for replicas of new collection to be active",

View File

@ -96,7 +96,7 @@ public class TestSimExtremeIndexing extends SimSolrCloudTestCase {
public void testScaleUp() throws Exception {
String collectionName = "testScaleUp_collection";
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName,
"conf", 2, 2).setMaxShardsPerNode(10);
"conf", 2, 2);
create.process(solrClient);
CloudUtil.waitForState(cluster, collectionName, 90, TimeUnit.SECONDS,

View File

@ -204,7 +204,6 @@ public class TestSimLargeCluster extends SimSolrCloudTestCase {
String collectionName = "testBasic";
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName,
"conf", 5, 5, 5, 5);
create.setMaxShardsPerNode(1);
create.setAutoAddReplicas(false);
create.setCreateNodeSet(String.join(",", nodes));
create.process(solrClient);
@ -283,12 +282,10 @@ public class TestSimLargeCluster extends SimSolrCloudTestCase {
final int pReps = TestUtil.nextInt(random(), 2, 25 - numShards);
final int repsPerShard = (nReps + tReps + pReps);
final int totalCores = repsPerShard * numShards;
final int maxShardsPerNode = atLeast(2) + (totalCores / NUM_NODES);
final String name = "large_sim_collection" + i;
final CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection
(name, "conf", numShards, nReps, tReps, pReps);
create.setMaxShardsPerNode(maxShardsPerNode);
create.setAutoAddReplicas(false);
log.info("CREATE: {}", create);
@ -332,7 +329,6 @@ public class TestSimLargeCluster extends SimSolrCloudTestCase {
String collectionName = "testNodeAdded";
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName,
"conf", NUM_NODES / 10, NUM_NODES / 8, NUM_NODES / 8, NUM_NODES / 8);
create.setMaxShardsPerNode(5);
create.setAutoAddReplicas(false);
create.process(solrClient);
@ -549,7 +545,6 @@ public class TestSimLargeCluster extends SimSolrCloudTestCase {
String collectionName = "testNodeLost";
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName,
"conf", NUM_NODES / 5, NUM_NODES / 10);
create.setMaxShardsPerNode(5);
create.setAutoAddReplicas(false);
create.process(solrClient);

Some files were not shown because too many files have changed in this diff Show More