This commit is contained in:
Cao Manh Dat 2017-06-01 08:51:17 +07:00
commit 82f2a35396
47 changed files with 693 additions and 1270 deletions

View File

@ -49,6 +49,34 @@ public class GeoPolygonFactory {
return makeGeoPolygon(planetModel, pointList, null);
}
/** Create a GeoConcavePolygon using the specified points. The polygon must have
* a maximum extent larger than PI. The siding of the polygon is chosen so that any
* adjacent point to a segment provides an exterior measurement and therefore,
* the polygon is a truly concave polygon. Note that this method should only be used when there is certainty
* that we are dealing with a concave polygon, e.g. the polygon has been serialized.
* If there is not such certainty, please refer to @{@link GeoPolygonFactory#makeGeoPolygon(PlanetModel, List)}.
* @param pointList is a list of the GeoPoints to build an arbitrary polygon out of.
* @return a GeoPolygon corresponding to what was specified.
*/
public static GeoPolygon makeGeoConcavePolygon(final PlanetModel planetModel,
final List<GeoPoint> pointList) {
return new GeoConcavePolygon(planetModel, pointList);
}
/** Create a GeoConvexPolygon using the specified points. The polygon must have
* a maximum extent no larger than PI. The siding of the polygon is chosen so that any adjacent
* point to a segment provides an interior measurement and therefore
* the polygon is a truly convex polygon. Note that this method should only be used when
* there is certainty that we are dealing with a convex polygon, e.g. the polygon has been serialized.
* If there is not such certainty, please refer to @{@link GeoPolygonFactory#makeGeoPolygon(PlanetModel, List)}.
* @param pointList is a list of the GeoPoints to build an arbitrary polygon out of.
* @return a GeoPolygon corresponding to what was specified.
*/
public static GeoPolygon makeGeoConvexPolygon(final PlanetModel planetModel,
final List<GeoPoint> pointList) {
return new GeoConvexPolygon(planetModel, pointList);
}
/** Create a GeoPolygon using the specified points and holes, using order to determine
* siding of the polygon. Much like ESRI, this method uses clockwise to indicate the space
* on the same side of the shape as being inside, and counter-clockwise to indicate the
@ -66,6 +94,41 @@ public class GeoPolygonFactory {
final List<GeoPolygon> holes) {
return makeGeoPolygon(planetModel, pointList, holes, 0.0);
}
/** Create a GeoConcavePolygon using the specified points and holes. The polygon must have
* a maximum extent larger than PI. The siding of the polygon is chosen so that any adjacent
* point to a segment provides an exterior measurement and therefore
* the polygon is a truly concave polygon. Note that this method should only be used when
* there is certainty that we are dealing with a concave polygon, e.g. the polygon has been serialized.
* If there is not such certainty, please refer to {@link GeoPolygonFactory#makeGeoPolygon(PlanetModel, List, List)}.
* @param pointList is a list of the GeoPoints to build an arbitrary polygon out of.
* @param holes is a list of polygons representing "holes" in the outside polygon. Holes describe the area outside
* each hole as being "in set". Null == none.
* @return a GeoPolygon corresponding to what was specified.
*/
public static GeoPolygon makeGeoConcavePolygon(final PlanetModel planetModel,
final List<GeoPoint> pointList,
final List<GeoPolygon> holes) {
return new GeoConcavePolygon(planetModel,pointList, holes);
}
/** Create a GeoConvexPolygon using the specified points and holes. The polygon must have
* a maximum extent no larger than PI. The siding of the polygon is chosen so that any adjacent
* point to a segment provides an interior measurement and therefore
* the polygon is a truly convex polygon. Note that this method should only be used when
* there is certainty that we are dealing with a convex polygon, e.g. the polygon has been serialized.
* If there is not such certainty, please refer to {@link GeoPolygonFactory#makeGeoPolygon(PlanetModel, List, List)}.
* @param pointList is a list of the GeoPoints to build an arbitrary polygon out of.
* @param holes is a list of polygons representing "holes" in the outside polygon. Holes describe the area outside
* each hole as being "in set". Null == none.
* @return a GeoPolygon corresponding to what was specified.
*/
public static GeoPolygon makeGeoConvexPolygon(final PlanetModel planetModel,
final List<GeoPoint> pointList,
final List<GeoPolygon> holes) {
return new GeoConvexPolygon(planetModel,pointList, holes);
}
/** Create a GeoPolygon using the specified points and holes, using order to determine
* siding of the polygon. Much like ESRI, this method uses clockwise to indicate the space

View File

@ -19,6 +19,7 @@ package org.apache.lucene.spatial3d.geom;
import java.util.ArrayList;
import java.util.List;
import java.util.BitSet;
import java.util.Collections;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
@ -966,5 +967,67 @@ shape:
assertTrue(solid.isWithin(point));
}
@Test
public void testConcavePolygon() {
ArrayList<GeoPoint> points = new ArrayList<>();
points.add(new GeoPoint(PlanetModel.SPHERE, -0.1, -0.5));
points.add(new GeoPoint(PlanetModel.SPHERE, 0.0, -0.6));
points.add(new GeoPoint(PlanetModel.SPHERE, 0.1, -0.5));
points.add(new GeoPoint(PlanetModel.SPHERE, 0.0, -0.4));
GeoPolygon polygon = (GeoPolygon)((GeoCompositePolygon)GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points)).shapes.get(0);
GeoPolygon polygonConcave = GeoPolygonFactory.makeGeoConcavePolygon(PlanetModel.SPHERE,points);
assertEquals(polygon,polygonConcave);
}
@Test
public void testConcavePolygonWithHole() {
ArrayList<GeoPoint> points = new ArrayList<>();
points.add(new GeoPoint(PlanetModel.SPHERE, -1.1, -1.5));
points.add(new GeoPoint(PlanetModel.SPHERE, 1.0, -1.6));
points.add(new GeoPoint(PlanetModel.SPHERE, 1.1, -1.5));
points.add(new GeoPoint(PlanetModel.SPHERE, 1.0, -1.4));
ArrayList<GeoPoint> hole_points = new ArrayList<>();
hole_points.add(new GeoPoint(PlanetModel.SPHERE, -0.1, -0.5));
hole_points.add(new GeoPoint(PlanetModel.SPHERE, 0.0, -0.6));
hole_points.add(new GeoPoint(PlanetModel.SPHERE, 0.1, -0.5));
hole_points.add(new GeoPoint(PlanetModel.SPHERE, 0.0, -0.4));
GeoPolygon hole = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE,hole_points);
GeoPolygon polygon = (GeoPolygon)((GeoCompositePolygon)GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points,Collections.singletonList(hole))).shapes.get(0);
GeoPolygon polygon2 = GeoPolygonFactory.makeGeoConcavePolygon(PlanetModel.SPHERE,points,Collections.singletonList(hole));
assertEquals(polygon,polygon2);
}
@Test
public void testConvexPolygon() {
ArrayList<GeoPoint> points = new ArrayList<>();
points.add(new GeoPoint(PlanetModel.SPHERE, 0, 0));
points.add(new GeoPoint(PlanetModel.SPHERE, 0.0, 0.5));
points.add(new GeoPoint(PlanetModel.SPHERE, 0.5, 0.5));
points.add(new GeoPoint(PlanetModel.SPHERE, 0.5, 0));
GeoPolygon polygon = (GeoPolygon)((GeoCompositePolygon)GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points)).shapes.get(0);
GeoPolygon polygon2 = GeoPolygonFactory.makeGeoConvexPolygon(PlanetModel.SPHERE,points);
assertEquals(polygon,polygon2);
}
@Test
public void testConvexPolygonWithHole() {
ArrayList<GeoPoint> points = new ArrayList<>();
points.add(new GeoPoint(PlanetModel.SPHERE, -1, -1));
points.add(new GeoPoint(PlanetModel.SPHERE, -1, 1));
points.add(new GeoPoint(PlanetModel.SPHERE, 1, 1));
points.add(new GeoPoint(PlanetModel.SPHERE, 1, -1));
ArrayList<GeoPoint> hole_points = new ArrayList<>();
hole_points.add(new GeoPoint(PlanetModel.SPHERE, -0.1, -0.5));
hole_points.add(new GeoPoint(PlanetModel.SPHERE, 0.0, -0.6));
hole_points.add(new GeoPoint(PlanetModel.SPHERE, 0.1, -0.5));
hole_points.add(new GeoPoint(PlanetModel.SPHERE, 0.0, -0.4));
GeoPolygon hole = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE,hole_points);
GeoPolygon polygon = (GeoPolygon)((GeoCompositePolygon)GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points,Collections.singletonList(hole))).shapes.get(0);
GeoPolygon polygon2 = GeoPolygonFactory.makeGeoConvexPolygon(PlanetModel.SPHERE,points,Collections.singletonList(hole));
assertEquals(polygon,polygon2);
}
}

View File

@ -201,6 +201,8 @@ Other Changes
* SOLR-10752: replicationFactor (nrtReplicas) default is 0 if tlogReplicas is specified when creating a collection
(Tomás Fernández Löbbe)
* SOLR-10757: delete/refactor/cleanup CollectionAdminRequest deprecations (hossman)
================== 6.7.0 ==================
Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.
@ -248,6 +250,11 @@ Bug Fixes
* SOLR-10719: Creating a core.properties fails if the parent of core.properties is a symlinked dierctory
(Erick Erickson)
* SOLR-10360: Solr HDFS snapshot export fails due to FileNotFoundException error when using MR1 instead of
yarn. (Hrishikesh via Mark Miller)
* SOLR-10137: Ensure that ConfigSets created via API are mutable. (Hrishikesh via Mark Miller)
Optimizations
----------------------
* SOLR-10634: JSON Facet API: When a field/terms facet will retrieve all buckets (i.e. limit:-1)

View File

@ -22,6 +22,7 @@ import java.lang.invoke.MethodHandles;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Locale;
import java.util.Map;
import org.apache.commons.lang.StringUtils;
@ -72,7 +73,7 @@ public class AddReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
String node = message.getStr(CoreAdminParams.NODE);
String shard = message.getStr(SHARD_ID_PROP);
String coreName = message.getStr(CoreAdminParams.NAME);
Replica.Type replicaType = Replica.Type.valueOf(message.getStr(ZkStateReader.REPLICA_TYPE, Replica.Type.NRT.name()));
Replica.Type replicaType = Replica.Type.valueOf(message.getStr(ZkStateReader.REPLICA_TYPE, Replica.Type.NRT.name()).toUpperCase(Locale.ROOT));
boolean parallel = message.getBool("parallel", false);
if (StringUtils.isBlank(coreName)) {
coreName = message.getStr(CoreAdminParams.PROPERTY_PREFIX + CoreAdminParams.NAME);

View File

@ -301,11 +301,8 @@ public class SolrSnapshotsTool implements Closeable {
if (backupRepo.isPresent()) {
backup.setRepositoryName(backupRepo.get());
}
if (asyncReqId.isPresent()) {
backup.setAsyncId(asyncReqId.get());
}
CollectionAdminResponse resp = backup.process(solrClient);
Preconditions.checkState(resp.getStatus() == 0, "The request failed. The status code is " + resp.getStatus());
// if asyncId is null, processAsync will block and throw an Exception with any error
backup.processAsync(asyncReqId.orElse(null), solrClient);
} catch (Exception e) {
log.error("Failed to backup collection meta-data for collection " + collectionName, e);
System.out.println("Failed to backup collection meta-data for collection " + collectionName

View File

@ -238,6 +238,10 @@ public class ConfigSetsHandler extends RequestHandlerBase implements PermissionN
props.put(param, params.get(param));
}
}
// The configset created via an API should be mutable.
props.put("immutable", "false");
return props;
}

View File

@ -51,7 +51,19 @@
},
"replicationFactor": {
"type": "integer",
"description": "The number of replicas to be created for each shard. Replicas are physical copies of each shard, acting as failover for the shard."
"description": "The number of NRT replicas to be created for each shard. Replicas are physical copies of each shard, acting as failover for the shard."
},
"nrtReplicas": {
"type": "integer",
"description": "The number of NRT replicas to be created for each shard. Replicas are physical copies of each shard, acting as failover for the shard. Replicas of type NRT will be updated with each document that is added to the cluster, and can use \"softCommits\" to get a new view of the index in Near Real Time. This parameter works in the same way as 'replicationFactor'"
},
"tlogReplicas": {
"type": "integer",
"description": "The number of TLOG replicas to be created for each shard. TLOG replicas update their transaction log for every update to the cluster, but only the shard leader updates the local index, other TLOG replicas will use segment replication and copy the latest index files from the leader."
},
"pullReplicas": {
"type": "integer",
"description": "The number of PULL replicas to be created for each shard. PULL replicas don't receive copies of the documents on update requests, they just replicate the latest segments periodically from the shard leader. PULL replicas can't become shard leaders, and need at least one active TLOG(recommended) or NRT replicas in the shard to replicate from."
},
"nodeSet": {
"type": "array",

View File

@ -101,6 +101,11 @@
"async": {
"type": "string",
"description": "Defines a request ID that can be used to track this action after it's submitted. The action will be processed asynchronously when this is defined."
},
"type": {
"type": "string",
"enum":["NRT", "TLOG", "PULL"],
"description": "The type of replica to add. NRT (default), TLOG or PULL"
}
},
"required":["shard"]

View File

@ -87,8 +87,12 @@ public abstract class AbstractCloudBackupRestoreTestCase extends SolrCloudTestCa
int replFactor = TestUtil.nextInt(random(), 1, 2);
int numTlogReplicas = TestUtil.nextInt(random(), 0, 1);
int numPullReplicas = TestUtil.nextInt(random(), 0, 1);
CollectionAdminRequest.Create create =
CollectionAdminRequest.createCollection(getCollectionName(), "conf1", NUM_SHARDS, replFactor, numTlogReplicas, numPullReplicas);
CollectionAdminRequest.Create create = isImplicit ?
// NOTE: use shard list with same # of shards as NUM_SHARDS; we assume this later
CollectionAdminRequest.createCollectionWithImplicitRouter(getCollectionName(), "conf1", "shard1,shard2", replFactor, numTlogReplicas, numPullReplicas) :
CollectionAdminRequest.createCollection(getCollectionName(), "conf1", NUM_SHARDS, replFactor, numTlogReplicas, numPullReplicas);
if (NUM_SHARDS * (replFactor + numTlogReplicas + numPullReplicas) > cluster.getJettySolrRunners().size() || random().nextBoolean()) {
create.setMaxShardsPerNode((int)Math.ceil(NUM_SHARDS * (replFactor + numTlogReplicas + numPullReplicas) / cluster.getJettySolrRunners().size()));//just to assert it survives the restoration
if (doSplitShardOperation) {
@ -102,9 +106,6 @@ public abstract class AbstractCloudBackupRestoreTestCase extends SolrCloudTestCa
coreProps.put("customKey", "customValue");//just to assert it survives the restoration
create.setProperties(coreProps);
if (isImplicit) { //implicit router
create.setRouterName(ImplicitDocRouter.NAME);
create.setNumShards(null);//erase it. TODO suggest a new createCollectionWithImplicitRouter method
create.setShards("shard1,shard2"); // however still same number as NUM_SHARDS; we assume this later
create.setRouterField("shard_s");
} else {//composite id router
if (random().nextBoolean()) {

View File

@ -24,8 +24,6 @@ import org.apache.lucene.util.TestUtil;
import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.impl.CloudSolrClient;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.request.CollectionAdminRequest.Create;
import org.apache.solr.client.solrj.request.CollectionAdminRequest.SplitShard;
import org.apache.solr.client.solrj.response.RequestStatusState;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrInputDocument;
@ -54,29 +52,19 @@ public class CollectionsAPIAsyncDistributedZkTest extends SolrCloudTestCase {
final CloudSolrClient client = cluster.getSolrClient();
RequestStatusState state = new Create()
.setCollectionName("testasynccollectioncreation")
.setNumShards(1)
.setReplicationFactor(1)
.setConfigName("conf1")
RequestStatusState state = CollectionAdminRequest.createCollection("testasynccollectioncreation","conf1",1,1)
.processAndWait(client, MAX_TIMEOUT_SECONDS);
assertSame("CreateCollection task did not complete!", RequestStatusState.COMPLETED, state);
state = new Create()
.setCollectionName("testasynccollectioncreation")
.setNumShards(1)
.setConfigName("conf1")
state = CollectionAdminRequest.createCollection("testasynccollectioncreation","conf1",1,1)
.processAndWait(client, MAX_TIMEOUT_SECONDS);
assertSame("Recreating a collection with the same should have failed.", RequestStatusState.FAILED, state);
state = new CollectionAdminRequest.AddReplica()
.setCollectionName("testasynccollectioncreation")
.setShardName("shard1")
.processAndWait(client, MAX_TIMEOUT_SECONDS);
state = CollectionAdminRequest.addReplicaToShard("testasynccollectioncreation", "shard1")
.processAndWait(client, MAX_TIMEOUT_SECONDS);
assertSame("Add replica did not complete", RequestStatusState.COMPLETED, state);
state = new SplitShard()
.setCollectionName("testasynccollectioncreation")
state = CollectionAdminRequest.splitShard("testasynccollectioncreation")
.setShardName("shard1")
.processAndWait(client, MAX_TIMEOUT_SECONDS * 2);
assertEquals("Shard split did not complete. Last recorded state: " + state, RequestStatusState.COMPLETED, state);
@ -89,12 +77,9 @@ public class CollectionsAPIAsyncDistributedZkTest extends SolrCloudTestCase {
final String collection = "testAsyncOperations";
final CloudSolrClient client = cluster.getSolrClient();
RequestStatusState state = new Create()
.setCollectionName(collection)
.setNumShards(1)
RequestStatusState state = CollectionAdminRequest.createCollection(collection,"conf1",1,1)
.setRouterName("implicit")
.setShards("shard1")
.setConfigName("conf1")
.processAndWait(client, MAX_TIMEOUT_SECONDS);
assertSame("CreateCollection task did not complete!", RequestStatusState.COMPLETED, state);
@ -114,14 +99,11 @@ public class CollectionsAPIAsyncDistributedZkTest extends SolrCloudTestCase {
query.set("shards", "shard1");
assertEquals(numDocs, client.query(collection, query).getResults().getNumFound());
state = new CollectionAdminRequest.Reload()
.setCollectionName(collection)
state = CollectionAdminRequest.reloadCollection(collection)
.processAndWait(client, MAX_TIMEOUT_SECONDS);
assertSame("ReloadCollection did not complete", RequestStatusState.COMPLETED, state);
state = new CollectionAdminRequest.CreateShard()
.setCollectionName(collection)
.setShardName("shard2")
state = CollectionAdminRequest.createShard(collection,"shard2")
.processAndWait(client, MAX_TIMEOUT_SECONDS);
assertSame("CreateShard did not complete", RequestStatusState.COMPLETED, state);
@ -135,16 +117,11 @@ public class CollectionsAPIAsyncDistributedZkTest extends SolrCloudTestCase {
query.set("shards", "shard2");
assertEquals(1, client.query(collection, query).getResults().getNumFound());
state = new CollectionAdminRequest.DeleteShard()
.setCollectionName(collection)
.setShardName("shard2")
.processAndWait(client, MAX_TIMEOUT_SECONDS);
state = CollectionAdminRequest.deleteShard(collection,"shard2").processAndWait(client, MAX_TIMEOUT_SECONDS);
assertSame("DeleteShard did not complete", RequestStatusState.COMPLETED, state);
state = new CollectionAdminRequest.AddReplica()
.setCollectionName(collection)
.setShardName("shard1")
.processAndWait(client, MAX_TIMEOUT_SECONDS);
state = CollectionAdminRequest.addReplicaToShard(collection, "shard1")
.processAndWait(client, MAX_TIMEOUT_SECONDS);
assertSame("AddReplica did not complete", RequestStatusState.COMPLETED, state);
//cloudClient watch might take a couple of seconds to reflect it
@ -157,9 +134,7 @@ public class CollectionsAPIAsyncDistributedZkTest extends SolrCloudTestCase {
Thread.sleep(100);
}
state = new CollectionAdminRequest.CreateAlias()
.setAliasName("myalias")
.setAliasedCollections(collection)
state = CollectionAdminRequest.createAlias("myalias",collection)
.processAndWait(client, MAX_TIMEOUT_SECONDS);
assertSame("CreateAlias did not complete", RequestStatusState.COMPLETED, state);
@ -167,8 +142,7 @@ public class CollectionsAPIAsyncDistributedZkTest extends SolrCloudTestCase {
query.set("shards", "shard1");
assertEquals(numDocs, client.query("myalias", query).getResults().getNumFound());
state = new CollectionAdminRequest.DeleteAlias()
.setAliasName("myalias")
state = CollectionAdminRequest.deleteAlias("myalias")
.processAndWait(client, MAX_TIMEOUT_SECONDS);
assertSame("DeleteAlias did not complete", RequestStatusState.COMPLETED, state);
@ -191,15 +165,11 @@ public class CollectionsAPIAsyncDistributedZkTest extends SolrCloudTestCase {
shard1 = client.getZkStateReader().getClusterState().getSlice(collection, "shard1");
String replicaName = shard1.getReplicas().iterator().next().getName();
state = new CollectionAdminRequest.DeleteReplica()
.setCollectionName(collection)
.setShardName("shard1")
.setReplica(replicaName)
.processAndWait(client, MAX_TIMEOUT_SECONDS);
state = CollectionAdminRequest.deleteReplica(collection, "shard1", replicaName)
.processAndWait(client, MAX_TIMEOUT_SECONDS);
assertSame("DeleteReplica did not complete", RequestStatusState.COMPLETED, state);
state = new CollectionAdminRequest.Delete()
.setCollectionName(collection)
state = CollectionAdminRequest.deleteCollection(collection)
.processAndWait(client, MAX_TIMEOUT_SECONDS);
assertSame("DeleteCollection did not complete", RequestStatusState.COMPLETED, state);
}

View File

@ -267,17 +267,21 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
@Test
public void testClusterProp() throws InterruptedException, IOException, SolrServerException {
// sanity check our expected default
final ClusterProperties props = new ClusterProperties(zkClient());
assertEquals("Expecting prop to default to unset, test needs upated",
props.getClusterProperty(ZkStateReader.LEGACY_CLOUD, null), null);
CollectionAdminResponse response = CollectionAdminRequest.setClusterProperty(ZkStateReader.LEGACY_CLOUD, "false")
.process(cluster.getSolrClient());
.process(cluster.getSolrClient());
assertEquals(0, response.getStatus());
ClusterProperties props = new ClusterProperties(zkClient());
assertEquals("Cluster property was not set", props.getClusterProperty(ZkStateReader.LEGACY_CLOUD, "true"), "false");
assertEquals("Cluster property was not set", props.getClusterProperty(ZkStateReader.LEGACY_CLOUD, null), "false");
// Unset ClusterProp that we set.
CollectionAdminRequest.setClusterProperty(ZkStateReader.LEGACY_CLOUD, null).process(cluster.getSolrClient());
assertEquals("Cluster property was not unset", props.getClusterProperty(ZkStateReader.LEGACY_CLOUD, "true"), "true");
assertEquals("Cluster property was not unset", props.getClusterProperty(ZkStateReader.LEGACY_CLOUD, null), null);
}

View File

@ -164,11 +164,8 @@ public class ConcurrentDeleteAndCreateCollectionTest extends SolrTestCaseJ4 {
private void createCollection() {
try {
final CollectionAdminResponse response = new CollectionAdminRequest.Create()
.setCollectionName(collectionName)
.setNumShards(1)
.setReplicationFactor(1)
.setConfigName(configName).process(solrClient);
final CollectionAdminResponse response = CollectionAdminRequest.createCollection(collectionName,configName,1,1)
.process(solrClient);
if (response.getStatus() != 0) {
addFailure(new RuntimeException("failed to create collection " + collectionName));
}
@ -180,9 +177,8 @@ public class ConcurrentDeleteAndCreateCollectionTest extends SolrTestCaseJ4 {
private void deleteCollection() {
try {
final CollectionAdminRequest.Delete deleteCollectionRequest = new CollectionAdminRequest.Delete()
.setCollectionName(collectionName);
final CollectionAdminRequest.Delete deleteCollectionRequest
= CollectionAdminRequest.deleteCollection(collectionName);
final CollectionAdminResponse response = deleteCollectionRequest.process(solrClient);
if (response.getStatus() != 0) {
addFailure(new RuntimeException("failed to delete collection " + collectionName));

View File

@ -120,27 +120,4 @@ public class DeleteStatusTest extends SolrCloudTestCase {
CollectionAdminRequest.requestStatus(id2).process(client).getRequestStatus());
}
@Test
@SuppressWarnings("deprecation")
public void testDeprecatedConstructorValidation() throws Exception {
final CloudSolrClient client = cluster.getSolrClient();
try {
new CollectionAdminRequest.DeleteStatus().process(client);
fail("delete status should have failed");
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().contains("Either requestid or flush parameter must be specified."));
}
try {
new CollectionAdminRequest.DeleteStatus().setFlush(true)
.setRequestId("foo")
.process(client);
fail("delete status should have failed");
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().contains("Both requestid and flush parameters can not be specified together."));
}
}
}

View File

@ -152,14 +152,7 @@ public class ForceLeaderTest extends HttpPartitionTest {
} finally {
log.info("Cleaning up after the test.");
// try to clean up
try {
CollectionAdminRequest.Delete req = new CollectionAdminRequest.Delete();
req.setCollectionName(testCollectionName);
req.process(cloudClient);
} catch (Exception e) {
// don't fail the test
log.warn("Could not delete collection {} after test completed", testCollectionName);
}
attemptCollectionDelete(cloudClient, testCollectionName);
}
}
@ -206,15 +199,7 @@ public class ForceLeaderTest extends HttpPartitionTest {
}
} finally {
log.info("Cleaning up after the test.");
// try to clean up
try {
CollectionAdminRequest.Delete req = new CollectionAdminRequest.Delete();
req.setCollectionName(testCollectionName);
req.process(cloudClient);
} catch (Exception e) {
// don't fail the test
log.warn("Could not delete collection {} after test completed", testCollectionName);
}
attemptCollectionDelete(cloudClient, testCollectionName);
}
}
@ -431,10 +416,8 @@ public class ForceLeaderTest extends HttpPartitionTest {
}
private void doForceLeader(SolrClient client, String collectionName, String shard) throws IOException, SolrServerException {
CollectionAdminRequest.ForceLeader forceLeader = new CollectionAdminRequest.ForceLeader();
forceLeader.setCollectionName(collectionName);
forceLeader.setShardName(shard);
client.request(forceLeader);
CollectionAdminRequest.ForceLeader forceLeader = CollectionAdminRequest.forceLeaderElection(collectionName, shard);
client.request(forceLeader);
}
protected int getNumberOfActiveReplicas(ClusterState clusterState, String collection, String sliceId) {

View File

@ -157,13 +157,9 @@ public class FullSolrCloudDistribCmdsTest extends AbstractFullDistribZkTestBase
CollectionAdminResponse response;
Map<String, NamedList<Integer>> coresStatus;
CollectionAdminRequest.Create createCollectionRequest = new CollectionAdminRequest.Create()
.setCollectionName("implicit_collection_without_routerfield")
.setRouterName("implicit")
.setNumShards(2)
.setShards("shard1,shard2")
.setReplicationFactor(2)
.setConfigName("conf1");
CollectionAdminRequest.Create createCollectionRequest
= CollectionAdminRequest.createCollectionWithImplicitRouter("implicit_collection_without_routerfield",
"conf1","shard1,shard2",2);
response = createCollectionRequest.process(server);
assertEquals(0, response.getStatus());
@ -285,14 +281,10 @@ public class FullSolrCloudDistribCmdsTest extends AbstractFullDistribZkTestBase
CollectionAdminResponse response;
Map<String, NamedList<Integer>> coresStatus;
response = new CollectionAdminRequest.Create()
.setCollectionName("compositeid_collection_with_routerfield")
response = CollectionAdminRequest.createCollection("compositeid_collection_with_routerfield","conf1",2,2)
.setRouterName("compositeId")
.setRouterField("routefield_s")
.setNumShards(2)
.setShards("shard1,shard2")
.setReplicationFactor(2)
.setConfigName("conf1")
.process(server);
assertEquals(0, response.getStatus());

View File

@ -37,7 +37,6 @@ import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.client.solrj.impl.CloudSolrClient;
import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.request.QueryRequest;
import org.apache.solr.client.solrj.request.UpdateRequest;
import org.apache.solr.common.SolrException;
@ -199,13 +198,7 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase {
zkClient.delete(znodePath, -1, false);
// try to clean up
try {
new CollectionAdminRequest.Delete()
.setCollectionName(testCollectionName).process(cloudClient);
} catch (Exception e) {
// don't fail the test
log.warn("Could not delete collection {} after test completed", testCollectionName);
}
attemptCollectionDelete(cloudClient, testCollectionName);
}
protected void testMinRf() throws Exception {
@ -387,13 +380,7 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase {
log.info("testRf2 succeeded ... deleting the "+testCollectionName+" collection");
// try to clean up
try {
new CollectionAdminRequest.Delete()
.setCollectionName(testCollectionName).process(cloudClient);
} catch (Exception e) {
// don't fail the test
log.warn("Could not delete collection {} after test completed", testCollectionName);
}
attemptCollectionDelete(cloudClient, testCollectionName);
}
protected void testRf3() throws Exception {
@ -443,14 +430,7 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase {
log.info("testRf3 succeeded ... deleting the "+testCollectionName+" collection");
// try to clean up
try {
CollectionAdminRequest.Delete req = new CollectionAdminRequest.Delete();
req.setCollectionName(testCollectionName);
req.process(cloudClient);
} catch (Exception e) {
// don't fail the test
log.warn("Could not delete collection {} after test completed", testCollectionName);
}
attemptCollectionDelete(cloudClient, testCollectionName);
}
// test inspired by SOLR-6511
@ -534,14 +514,7 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase {
log.info("testLeaderZkSessionLoss succeeded ... deleting the "+testCollectionName+" collection");
// try to clean up
try {
CollectionAdminRequest.Delete req = new CollectionAdminRequest.Delete();
req.setCollectionName(testCollectionName);
req.process(cloudClient);
} catch (Exception e) {
// don't fail the test
log.warn("Could not delete collection {} after test completed", testCollectionName);
}
attemptCollectionDelete(cloudClient, testCollectionName);
}
protected List<Replica> getActiveOrRecoveringReplicas(String testCollectionName, String shardId) throws Exception {
@ -693,4 +666,5 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase {
log.info("Took {} ms to see replicas [{}] become active.", timer.getTime(), replicasToCheck);
}
}

View File

@ -20,7 +20,6 @@ import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.cloud.Replica;
import org.junit.Test;
@ -184,13 +183,6 @@ public class LeaderFailoverAfterPartitionTest extends HttpPartitionTest {
assertDocsExistInAllReplicas(participatingReplicas, testCollectionName, 1, 6);
// try to clean up
try {
CollectionAdminRequest.Delete req = new CollectionAdminRequest.Delete();
req.setCollectionName(testCollectionName);
req.process(cloudClient);
} catch (Exception e) {
// don't fail the test
log.warn("Could not delete collection {} after test completed", testCollectionName);
}
attemptCollectionDelete(cloudClient, testCollectionName);
}
}

View File

@ -19,7 +19,6 @@ package org.apache.solr.cloud;
import org.apache.http.NoHttpResponseException;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.util.RTimer;
@ -96,14 +95,7 @@ public class LeaderInitiatedRecoveryOnCommitTest extends BasicDistributedZkTest
Thread.sleep(sleepMsBeforeHealPartition);
// try to clean up
try {
CollectionAdminRequest.Delete req = new CollectionAdminRequest.Delete();
req.setCollectionName(testCollectionName);
req.process(cloudClient);
} catch (Exception e) {
// don't fail the test
log.warn("Could not delete collection {} after test completed", testCollectionName);
}
attemptCollectionDelete(cloudClient, testCollectionName);
log.info("multiShardTest completed OK");
}
@ -144,14 +136,7 @@ public class LeaderInitiatedRecoveryOnCommitTest extends BasicDistributedZkTest
Thread.sleep(sleepMsBeforeHealPartition);
// try to clean up
try {
CollectionAdminRequest.Delete req = new CollectionAdminRequest.Delete();
req.setCollectionName(testCollectionName);
req.process(cloudClient);
} catch (Exception e) {
// don't fail the test
log.warn("Could not delete collection {} after test completed", testCollectionName);
}
attemptCollectionDelete(cloudClient, testCollectionName);
log.info("oneShardTest completed OK");
}

View File

@ -24,6 +24,7 @@ import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.client.solrj.SolrRequest;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.request.CollectionAdminRequest.Create;
import org.apache.solr.client.solrj.request.CollectionAdminRequest.SplitShard;
import org.apache.solr.client.solrj.request.QueryRequest;
@ -31,7 +32,6 @@ import org.apache.solr.client.solrj.response.CollectionAdminResponse;
import org.apache.solr.client.solrj.response.RequestStatusState;
import org.apache.solr.common.params.CollectionParams;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.Utils;
import org.junit.Test;
import org.slf4j.Logger;
@ -116,12 +116,7 @@ public class MultiThreadedOCPTest extends AbstractFullDistribZkTestBase {
private void testParallelCollectionAPICalls() throws IOException, SolrServerException {
try (SolrClient client = createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0)))) {
for(int i = 1 ; i <= NUM_COLLECTIONS ; i++) {
new Create()
.setCollectionName("ocptest" + i)
.setNumShards(4)
.setConfigName("conf1")
.setAsyncId(String.valueOf(i))
.process(client);
CollectionAdminRequest.createCollection("ocptest" + i,"conf1",4,1).processAsync(String.valueOf(i), client);
}
boolean pass = false;
@ -158,12 +153,8 @@ public class MultiThreadedOCPTest extends AbstractFullDistribZkTestBase {
"/overseer/collection-queue-work", new Overseer.Stats());
try (SolrClient client = createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0)))) {
Create createCollectionRequest = new Create()
.setCollectionName("ocptest_shardsplit")
.setNumShards(4)
.setConfigName("conf1")
.setAsyncId("1000");
createCollectionRequest.process(client);
Create createCollectionRequest = CollectionAdminRequest.createCollection("ocptest_shardsplit","conf1",4,1);
createCollectionRequest.processAsync("1000",client);
distributedQueue.offer(Utils.toJSON(Utils.makeMap(
"collection", "ocptest_shardsplit",
@ -215,36 +206,21 @@ public class MultiThreadedOCPTest extends AbstractFullDistribZkTestBase {
private void testDeduplicationOfSubmittedTasks() throws IOException, SolrServerException {
try (SolrClient client = createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0)))) {
new Create()
.setCollectionName("ocptest_shardsplit2")
.setNumShards(4)
.setConfigName("conf1")
.setAsyncId("3000")
.process(client);
CollectionAdminRequest.createCollection("ocptest_shardsplit2","conf1",4,1).processAsync("3000",client);
SplitShard splitShardRequest = new SplitShard()
.setCollectionName("ocptest_shardsplit2")
.setShardName(SHARD1)
.setAsyncId("3001");
splitShardRequest.process(client);
splitShardRequest = new SplitShard()
.setCollectionName("ocptest_shardsplit2")
.setShardName(SHARD2)
.setAsyncId("3002");
splitShardRequest.process(client);
SplitShard splitShardRequest = CollectionAdminRequest.splitShard("ocptest_shardsplit2").setShardName(SHARD1);
splitShardRequest.processAsync("3001",client);
splitShardRequest = CollectionAdminRequest.splitShard("ocptest_shardsplit2").setShardName(SHARD2);
splitShardRequest.processAsync("3002",client);
// Now submit another task with the same id. At this time, hopefully the previous 3002 should still be in the queue.
splitShardRequest = new SplitShard()
.setCollectionName("ocptest_shardsplit2")
.setShardName(SHARD1)
.setAsyncId("3002");
CollectionAdminResponse response = splitShardRequest.process(client);
NamedList r = response.getResponse();
assertEquals("Duplicate request was supposed to exist but wasn't found. De-duplication of submitted task failed.",
"Task with the same requestid already exists.", r.get("error"));
expectThrows(SolrServerException.class, () -> {
CollectionAdminRequest.splitShard("ocptest_shardsplit2").setShardName(SHARD1).processAsync("3002",client);
// more helpful assertion failure
fail("Duplicate request was supposed to exist but wasn't found. De-duplication of submitted task failed.");
});
for (int i = 3001; i <= 3002; i++) {
final RequestStatusState state = getRequestStateAfterCompletion(i + "", REQUEST_STATUS_TIMEOUT, client);
assertSame("Task " + i + " did not complete, final state: " + state, RequestStatusState.COMPLETED, state);
@ -271,11 +247,8 @@ public class MultiThreadedOCPTest extends AbstractFullDistribZkTestBase {
indexThread.start();
try (SolrClient client = createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0)))) {
SplitShard splitShardRequest = new SplitShard()
.setCollectionName("collection1")
.setShardName(SHARD1)
.setAsyncId("2000");
splitShardRequest.process(client);
SplitShard splitShardRequest = CollectionAdminRequest.splitShard("collection1").setShardName(SHARD1);
splitShardRequest.processAsync("2000",client);
RequestStatusState state = getRequestState("2000", client);
while (state == RequestStatusState.SUBMITTED) {

View File

@ -65,7 +65,9 @@ public class ReplaceNodeTest extends SolrCloudTestCase {
String emptyNode = l.remove(0);
String node2bdecommissioned = l.get(0);
CollectionAdminRequest.Create create;
create = pickRandom(CollectionAdminRequest.createCollection(coll, "conf1", 5, 2),
// NOTE: always using the createCollection that takes in 'int' for all types of replicas, so we never
// have to worry about null checking when comparing the Create command with the final Slices
create = pickRandom(CollectionAdminRequest.createCollection(coll, "conf1", 5, 2,0,0),
CollectionAdminRequest.createCollection(coll, "conf1", 5, 1,1,0),
CollectionAdminRequest.createCollection(coll, "conf1", 5, 0,1,1),
CollectionAdminRequest.createCollection(coll, "conf1", 5, 1,0,1),

View File

@ -101,9 +101,7 @@ public class ReplicationFactorTest extends AbstractFullDistribZkTestBase {
CollectionAdminResponse resp = createCollection(testCollectionName, numShards, replicationFactor, maxShardsPerNode);
if (resp.getResponse().get("failure") != null) {
CollectionAdminRequest.Delete req = new CollectionAdminRequest.Delete();
req.setCollectionName(testCollectionName);
req.process(cloudClient);
CollectionAdminRequest.deleteCollection(testCollectionName).process(cloudClient);
resp = createCollection(testCollectionName, numShards, replicationFactor, maxShardsPerNode);

View File

@ -70,7 +70,7 @@ public class RollingRestartTest extends AbstractFullDistribZkTestBase {
int n = random().nextInt(getShardCount());
String nodeName = cloudJettys.get(n).nodeName;
log.info("Chose {} as overseer designate", nodeName);
new CollectionAdminRequest.AddRole().setRole("overseer").setNode(nodeName).process(cloudClient);
CollectionAdminRequest.addRole(nodeName,"overseer").process(cloudClient);
designates.add(nodeName);
designateJettys.add(cloudJettys.get(n));
}

View File

@ -523,16 +523,12 @@ public class ShardSplitTest extends BasicDistributedZkTest {
log.info("Starting testSplitShardWithRule");
String collectionName = "shardSplitWithRule";
CollectionAdminRequest.Create createRequest = new CollectionAdminRequest.Create()
.setCollectionName(collectionName)
.setNumShards(1)
.setReplicationFactor(2)
CollectionAdminRequest.Create createRequest = CollectionAdminRequest.createCollection(collectionName,1,2)
.setRule("shard:*,replica:<2,node:*");
CollectionAdminResponse response = createRequest.process(cloudClient);
assertEquals(0, response.getStatus());
CollectionAdminRequest.SplitShard splitShardRequest = new CollectionAdminRequest.SplitShard()
.setCollectionName(collectionName)
CollectionAdminRequest.SplitShard splitShardRequest = CollectionAdminRequest.splitShard(collectionName)
.setShardName("shard1");
response = splitShardRequest.process(cloudClient);
assertEquals(String.valueOf(response.getErrorMessages()), 0, response.getStatus());

View File

@ -146,12 +146,8 @@ public class SharedFSAutoReplicaFailoverTest extends AbstractFullDistribZkTestBa
// serially
private void testBasics() throws Exception {
String collection1 = "solrj_collection";
Create createCollectionRequest = new Create()
.setCollectionName(collection1)
.setNumShards(2)
.setReplicationFactor(2)
Create createCollectionRequest = CollectionAdminRequest.createCollection(collection1,"conf1",2,2)
.setMaxShardsPerNode(2)
.setConfigName("conf1")
.setRouterField("myOwnField")
.setAutoAddReplicas(true);
CollectionAdminResponse response = createCollectionRequest.process(cloudClient);
@ -161,12 +157,8 @@ public class SharedFSAutoReplicaFailoverTest extends AbstractFullDistribZkTestBa
waitForRecoveriesToFinish(collection1, false);
String collection2 = "solrj_collection2";
createCollectionRequest = new Create()
.setCollectionName(collection2)
.setNumShards(2)
.setReplicationFactor(2)
createCollectionRequest = CollectionAdminRequest.createCollection(collection2,"conf1",2,2)
.setMaxShardsPerNode(2)
.setConfigName("conf1")
.setRouterField("myOwnField")
.setAutoAddReplicas(false);
CollectionAdminResponse response2 = createCollectionRequest.process(getCommonCloudSolrClient());
@ -177,12 +169,8 @@ public class SharedFSAutoReplicaFailoverTest extends AbstractFullDistribZkTestBa
waitForRecoveriesToFinish(collection2, false);
String collection3 = "solrj_collection3";
createCollectionRequest = new Create()
.setCollectionName(collection3)
.setNumShards(5)
.setReplicationFactor(1)
createCollectionRequest = CollectionAdminRequest.createCollection(collection3,"conf1",5,1)
.setMaxShardsPerNode(1)
.setConfigName("conf1")
.setRouterField("myOwnField")
.setAutoAddReplicas(true);
CollectionAdminResponse response3 = createCollectionRequest.process(getCommonCloudSolrClient());
@ -194,12 +182,8 @@ public class SharedFSAutoReplicaFailoverTest extends AbstractFullDistribZkTestBa
// a collection has only 1 replica per a shard
String collection4 = "solrj_collection4";
createCollectionRequest = new Create()
.setCollectionName(collection4)
.setNumShards(5)
.setReplicationFactor(1)
createCollectionRequest = CollectionAdminRequest.createCollection(collection4,"conf1",5,1)
.setMaxShardsPerNode(5)
.setConfigName("conf1")
.setRouterField("text")
.setAutoAddReplicas(true);
CollectionAdminResponse response4 = createCollectionRequest.process(getCommonCloudSolrClient());

View File

@ -39,10 +39,7 @@ public class SimpleCollectionCreateDeleteTest extends AbstractFullDistribZkTestB
}
}
String collectionName = "SimpleCollectionCreateDeleteTest";
CollectionAdminRequest.Create create = new CollectionAdminRequest.Create()
.setCollectionName(collectionName)
.setNumShards(1)
.setReplicationFactor(1)
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName,1,1)
.setCreateNodeSet(overseerNode)
.setStateFormat(2);
@ -51,17 +48,13 @@ public class SimpleCollectionCreateDeleteTest extends AbstractFullDistribZkTestB
if (request.get("success") != null) {
assertTrue(cloudClient.getZkStateReader().getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, false));
CollectionAdminRequest.Delete delete = new CollectionAdminRequest.Delete();
delete.setCollectionName(collectionName);
CollectionAdminRequest delete = CollectionAdminRequest.deleteCollection(collectionName);
cloudClient.request(delete);
assertFalse(cloudClient.getZkStateReader().getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, false));
// create collection again on a node other than the overseer leader
create = new CollectionAdminRequest.Create()
.setCollectionName(collectionName)
.setNumShards(1)
.setReplicationFactor(1)
create = CollectionAdminRequest.createCollection(collectionName,1,1)
.setCreateNodeSet(notOverseerNode)
.setStateFormat(2);
request = create.process(cloudClient).getResponse();

View File

@ -185,13 +185,7 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
String cname = "clusterStatusZNodeVersion";
try (CloudSolrClient client = createCloudClient(null)) {
CollectionAdminRequest.Create create = new CollectionAdminRequest.Create()
.setCollectionName(cname)
.setMaxShardsPerNode(1)
.setNumShards(1)
.setReplicationFactor(1)
.setConfigName("conf1");
create.process(client);
CollectionAdminRequest.createCollection(cname,"conf1",1,1).setMaxShardsPerNode(1).process(client);
waitForRecoveriesToFinish(cname, true);
@ -213,9 +207,7 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
Integer znodeVersion = (Integer) collection.get("znodeVersion");
assertNotNull(znodeVersion);
CollectionAdminRequest.AddReplica addReplica = new CollectionAdminRequest.AddReplica()
.setCollectionName(cname)
.setShardName("shard1");
CollectionAdminRequest.AddReplica addReplica = CollectionAdminRequest.addReplicaToShard(cname, "shard1");
addReplica.process(client);
waitForRecoveriesToFinish(cname, true);
@ -628,13 +620,7 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
try (CloudSolrClient client = createCloudClient(null)) {
client.connect();
new CollectionAdminRequest.Create()
.setCollectionName("testClusterStateMigration")
.setNumShards(1)
.setReplicationFactor(1)
.setConfigName("conf1")
.setStateFormat(1)
.process(client);
CollectionAdminRequest.createCollection("testClusterStateMigration","conf1",1,1).setStateFormat(1).process(client);
waitForRecoveriesToFinish("testClusterStateMigration", true);
@ -647,9 +633,7 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
}
client.commit("testClusterStateMigration");
new CollectionAdminRequest.MigrateClusterState()
.setCollectionName("testClusterStateMigration")
.process(client);
CollectionAdminRequest.migrateCollectionFormat("testClusterStateMigration").process(client);
client.getZkStateReader().forceUpdateCollection("testClusterStateMigration");

View File

@ -28,8 +28,12 @@ import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import org.apache.http.HttpResponse;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.entity.StringEntity;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.SolrServerException;
@ -117,21 +121,40 @@ public class TestPullReplica extends SolrCloudTestCase {
@Repeat(iterations=2) // 2 times to make sure cleanup is complete and we can create the same collection
public void testCreateDelete() throws Exception {
try {
if (random().nextBoolean()) {
CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1, 0, 3)
.setMaxShardsPerNode(100)
.process(cluster.getSolrClient());
} else {
// Sometimes don't use SolrJ.
String url = String.format(Locale.ROOT, "%s/admin/collections?action=CREATE&name=%s&numShards=%s&pullReplicas=%s&maxShardsPerNode=%s",
cluster.getRandomJetty(random()).getBaseUrl(),
collectionName,
2, // numShards
3, // pullReplicas
100); // maxShardsPerNode
url = url + pickRandom("", "&nrtReplicas=1", "&replicationFactor=1"); // These options should all mean the same
HttpGet createCollectionRequest = new HttpGet(url);
cluster.getSolrClient().getHttpClient().execute(createCollectionRequest);
switch (random().nextInt(3)) {
case 0:
// Sometimes use SolrJ
CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1, 0, 3)
.setMaxShardsPerNode(100)
.process(cluster.getSolrClient());
break;
case 1:
// Sometimes use v1 API
String url = String.format(Locale.ROOT, "%s/admin/collections?action=CREATE&name=%s&numShards=%s&pullReplicas=%s&maxShardsPerNode=%s",
cluster.getRandomJetty(random()).getBaseUrl(),
collectionName,
2, // numShards
3, // pullReplicas
100); // maxShardsPerNode
url = url + pickRandom("", "&nrtReplicas=1", "&replicationFactor=1"); // These options should all mean the same
HttpGet createCollectionGet = new HttpGet(url);
cluster.getSolrClient().getHttpClient().execute(createCollectionGet);
break;
case 2:
// Sometimes use V2 API
url = cluster.getRandomJetty(random()).getBaseUrl().toString() + "/____v2/c";
String requestBody = String.format(Locale.ROOT, "{create:{name:%s, numShards:%s, pullReplicas:%s, maxShardsPerNode:%s %s}}",
collectionName,
2, // numShards
3, // pullReplicas
100, // maxShardsPerNode
pickRandom("", ", nrtReplicas:1", ", replicationFactor:1")); // These options should all mean the same
HttpPost createCollectionPost = new HttpPost(url);
createCollectionPost.setHeader("Content-type", "application/json");
createCollectionPost.setEntity(new StringEntity(requestBody));
HttpResponse httpResponse = cluster.getSolrClient().getHttpClient().execute(createCollectionPost);
assertEquals(200, httpResponse.getStatusLine().getStatusCode());
break;
}
boolean reloaded = false;
while (true) {
@ -243,9 +266,9 @@ public class TestPullReplica extends SolrCloudTestCase {
DocCollection docCollection = assertNumberOfReplicas(2, 0, 0, false, true);
assertEquals(2, docCollection.getSlices().size());
CollectionAdminRequest.addReplicaToShard(collectionName, "shard1", Replica.Type.PULL).process(cluster.getSolrClient());
addReplicaToShard("shard1", Replica.Type.PULL);
docCollection = assertNumberOfReplicas(2, 0, 1, true, false);
CollectionAdminRequest.addReplicaToShard(collectionName, "shard2", Replica.Type.PULL).process(cluster.getSolrClient());
addReplicaToShard("shard2", Replica.Type.PULL);
docCollection = assertNumberOfReplicas(2, 0, 2, true, false);
waitForState("Expecting collection to have 2 shards and 2 replica each", collectionName, clusterShape(2, 2));
@ -587,4 +610,36 @@ public class TestPullReplica extends SolrCloudTestCase {
cluster.getSolrClient().add(collectionName, docs);
cluster.getSolrClient().commit(collectionName);
}
private void addReplicaToShard(String shardName, Replica.Type type) throws ClientProtocolException, IOException, SolrServerException {
switch (random().nextInt(3)) {
case 0: // Add replica with SolrJ
CollectionAdminResponse response = CollectionAdminRequest.addReplicaToShard(collectionName, shardName, type).process(cluster.getSolrClient());
assertEquals("Unexpected response status: " + response.getStatus(), 0, response.getStatus());
break;
case 1: // Add replica with V1 API
String url = String.format(Locale.ROOT, "%s/admin/collections?action=ADDREPLICA&collection=%s&shard=%s&type=%s",
cluster.getRandomJetty(random()).getBaseUrl(),
collectionName,
shardName,
type);
HttpGet addReplicaGet = new HttpGet(url);
HttpResponse httpResponse = cluster.getSolrClient().getHttpClient().execute(addReplicaGet);
assertEquals(200, httpResponse.getStatusLine().getStatusCode());
break;
case 2:// Add replica with V2 API
url = String.format(Locale.ROOT, "%s/____v2/c/%s/shards",
cluster.getRandomJetty(random()).getBaseUrl(),
collectionName);
String requestBody = String.format(Locale.ROOT, "{add-replica:{shard:%s, type:%s}}",
shardName,
type);
HttpPost addReplicaPost = new HttpPost(url);
addReplicaPost.setHeader("Content-type", "application/json");
addReplicaPost.setEntity(new StringEntity(requestBody));
httpResponse = cluster.getSolrClient().getHttpClient().execute(addReplicaPost);
assertEquals(200, httpResponse.getStatusLine().getStatusCode());
break;
}
}
}

View File

@ -73,17 +73,11 @@ public class TestRandomRequestDistribution extends AbstractFullDistribZkTestBase
*/
private void testRequestTracking() throws Exception {
new CollectionAdminRequest.Create()
.setCollectionName("a1x2")
.setNumShards(1)
.setReplicationFactor(2)
CollectionAdminRequest.createCollection("a1x2",1,2)
.setCreateNodeSet(nodeNames.get(0) + ',' + nodeNames.get(1))
.process(cloudClient);
new CollectionAdminRequest.Create()
.setCollectionName("b1x1")
.setNumShards(1)
.setReplicationFactor(1)
CollectionAdminRequest.createCollection("b1x1",1,1)
.setCreateNodeSet(nodeNames.get(2))
.process(cloudClient);
@ -134,10 +128,7 @@ public class TestRandomRequestDistribution extends AbstractFullDistribZkTestBase
private void testQueryAgainstDownReplica() throws Exception {
log.info("Creating collection 'football' with 1 shard and 2 replicas");
new CollectionAdminRequest.Create()
.setCollectionName("football")
.setNumShards(1)
.setReplicationFactor(2)
CollectionAdminRequest.createCollection("football",1,2)
.setCreateNodeSet(nodeNames.get(0) + ',' + nodeNames.get(1))
.process(cloudClient);

View File

@ -72,11 +72,7 @@ public class TestRequestForwarding extends SolrTestCaseJ4 {
private void createCollection(String name, String config) throws Exception {
CollectionAdminResponse response;
CollectionAdminRequest.Create create = new CollectionAdminRequest.Create();
create.setConfigName(config);
create.setCollectionName(name);
create.setNumShards(2);
create.setReplicationFactor(1);
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(name,config,2,1);
create.setMaxShardsPerNode(1);
response = create.process(solrCluster.getSolrClient());

View File

@ -174,10 +174,7 @@ public class TestSolrCloudWithKerberosAlt extends LuceneTestCase {
String configName = "solrCloudCollectionConfig";
miniCluster.uploadConfigSet(SolrTestCaseJ4.TEST_PATH().resolve("collection1/conf"), configName);
CollectionAdminRequest.Create createRequest = new CollectionAdminRequest.Create();
createRequest.setCollectionName(collectionName);
createRequest.setNumShards(NUM_SHARDS);
createRequest.setReplicationFactor(REPLICATION_FACTOR);
CollectionAdminRequest.Create createRequest = CollectionAdminRequest.createCollection(collectionName,NUM_SHARDS,REPLICATION_FACTOR);
Properties properties = new Properties();
properties.put(CoreDescriptor.CORE_CONFIG, "solrconfig-tlog.xml");
properties.put("solr.tests.maxBufferedDocs", "100000");
@ -216,9 +213,7 @@ public class TestSolrCloudWithKerberosAlt extends LuceneTestCase {
assertEquals(1, rsp.getResults().getNumFound());
// delete the collection we created earlier
CollectionAdminRequest.Delete deleteRequest = new CollectionAdminRequest.Delete();
deleteRequest.setCollectionName(collectionName);
deleteRequest.process(cloudSolrClient);
CollectionAdminRequest.deleteCollection(collectionName).process(cloudSolrClient);
AbstractDistribZkTestBase.waitForCollectionToDisappear(collectionName, zkStateReader, true, true, 330);
}

View File

@ -173,7 +173,7 @@ public class TestSolrCloudWithSecureImpersonation extends SolrTestCaseJ4 {
private void create1ShardCollection(String name, String config, MiniSolrCloudCluster solrCluster) throws Exception {
CollectionAdminResponse response;
CollectionAdminRequest.Create create = new CollectionAdminRequest.Create() {
CollectionAdminRequest.Create create = new CollectionAdminRequest.Create(name,config,1,1,0,0) {
@Override
public SolrParams getParams() {
ModifiableSolrParams msp = new ModifiableSolrParams(super.getParams());
@ -181,10 +181,6 @@ public class TestSolrCloudWithSecureImpersonation extends SolrTestCaseJ4 {
return msp;
}
};
create.setConfigName(config);
create.setCollectionName(name);
create.setNumShards(1);
create.setReplicationFactor(1);
create.setMaxShardsPerNode(1);
response = create.process(solrCluster.getSolrClient());

View File

@ -29,8 +29,12 @@ import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import org.apache.http.HttpResponse;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.entity.StringEntity;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.solr.client.solrj.SolrClient;
@ -145,20 +149,38 @@ public class TestTlogReplica extends SolrCloudTestCase {
@Repeat(iterations=2) // 2 times to make sure cleanup is complete and we can create the same collection
public void testCreateDelete() throws Exception {
try {
if (random().nextBoolean()) {
CollectionAdminRequest.createCollection(collectionName, "conf", 2, 0, 4, 0)
.setMaxShardsPerNode(100)
.process(cluster.getSolrClient());
} else {
// Sometimes don't use SolrJ
String url = String.format(Locale.ROOT, "%s/admin/collections?action=CREATE&name=%s&numShards=%s&tlogReplicas=%s&maxShardsPerNode=%s",
cluster.getRandomJetty(random()).getBaseUrl(),
collectionName,
2, // numShards
4, // tlogReplicas
100); // maxShardsPerNode
HttpGet createCollectionRequest = new HttpGet(url);
cluster.getSolrClient().getHttpClient().execute(createCollectionRequest);
switch (random().nextInt(3)) {
case 0:
CollectionAdminRequest.createCollection(collectionName, "conf", 2, 0, 4, 0)
.setMaxShardsPerNode(100)
.process(cluster.getSolrClient());
break;
case 1:
// Sometimes don't use SolrJ
String url = String.format(Locale.ROOT, "%s/admin/collections?action=CREATE&name=%s&numShards=%s&tlogReplicas=%s&maxShardsPerNode=%s",
cluster.getRandomJetty(random()).getBaseUrl(),
collectionName,
2, // numShards
4, // tlogReplicas
100); // maxShardsPerNode
HttpGet createCollectionGet = new HttpGet(url);
HttpResponse httpResponse = cluster.getSolrClient().getHttpClient().execute(createCollectionGet);
assertEquals(200, httpResponse.getStatusLine().getStatusCode());
break;
case 2:
// Sometimes use V2 API
url = cluster.getRandomJetty(random()).getBaseUrl().toString() + "/____v2/c";
String requestBody = String.format(Locale.ROOT, "{create:{name:%s, numShards:%s, tlogReplicas:%s, maxShardsPerNode:%s}}",
collectionName,
2, // numShards
4, // tlogReplicas
100); // maxShardsPerNode
HttpPost createCollectionPost = new HttpPost(url);
createCollectionPost.setHeader("Content-type", "application/json");
createCollectionPost.setEntity(new StringEntity(requestBody));
httpResponse = cluster.getSolrClient().getHttpClient().execute(createCollectionPost);
assertEquals(200, httpResponse.getStatusLine().getStatusCode());
break;
}
boolean reloaded = false;
@ -244,9 +266,9 @@ public class TestTlogReplica extends SolrCloudTestCase {
DocCollection docCollection = createAndWaitForCollection(2, 0, 1, 0);
assertEquals(2, docCollection.getSlices().size());
CollectionAdminRequest.addReplicaToShard(collectionName, "shard1", Replica.Type.TLOG).process(cluster.getSolrClient());
addReplicaToShard("shard1", Replica.Type.TLOG);
docCollection = assertNumberOfReplicas(0, 3, 0, true, false);
CollectionAdminRequest.addReplicaToShard(collectionName, "shard2", Replica.Type.TLOG).process(cluster.getSolrClient());
addReplicaToShard("shard2", Replica.Type.TLOG);
docCollection = assertNumberOfReplicas(0, 4, 0, true, false);
waitForState("Expecting collection to have 2 shards and 2 replica each", collectionName, clusterShape(2, 2));
@ -260,6 +282,38 @@ public class TestTlogReplica extends SolrCloudTestCase {
assertNumberOfReplicas(0, 3, 0, true, true);
}
private void addReplicaToShard(String shardName, Replica.Type type) throws ClientProtocolException, IOException, SolrServerException {
switch (random().nextInt(3)) {
case 0: // Add replica with SolrJ
CollectionAdminResponse response = CollectionAdminRequest.addReplicaToShard(collectionName, shardName, type).process(cluster.getSolrClient());
assertEquals("Unexpected response status: " + response.getStatus(), 0, response.getStatus());
break;
case 1: // Add replica with V1 API
String url = String.format(Locale.ROOT, "%s/admin/collections?action=ADDREPLICA&collection=%s&shard=%s&type=%s",
cluster.getRandomJetty(random()).getBaseUrl(),
collectionName,
shardName,
type);
HttpGet addReplicaGet = new HttpGet(url);
HttpResponse httpResponse = cluster.getSolrClient().getHttpClient().execute(addReplicaGet);
assertEquals(200, httpResponse.getStatusLine().getStatusCode());
break;
case 2:// Add replica with V2 API
url = String.format(Locale.ROOT, "%s/____v2/c/%s/shards",
cluster.getRandomJetty(random()).getBaseUrl(),
collectionName);
String requestBody = String.format(Locale.ROOT, "{add-replica:{shard:%s, type:%s}}",
shardName,
type);
HttpPost addReplicaPost = new HttpPost(url);
addReplicaPost.setHeader("Content-type", "application/json");
addReplicaPost.setEntity(new StringEntity(requestBody));
httpResponse = cluster.getSolrClient().getHttpClient().execute(addReplicaPost);
assertEquals(200, httpResponse.getStatusLine().getStatusCode());
break;
}
}
public void testRemoveLeader() throws Exception {
doReplaceLeader(true);
}

View File

@ -62,10 +62,7 @@ public class TestBlobHandler extends AbstractFullDistribZkTestBase {
try (SolrClient client = createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0)))) {
CollectionAdminResponse response1;
CollectionAdminRequest.Create createCollectionRequest = new CollectionAdminRequest.Create()
.setCollectionName(".system")
.setNumShards(1)
.setReplicationFactor(2);
CollectionAdminRequest.Create createCollectionRequest = CollectionAdminRequest.createCollection(".system",1,2);
response1 = createCollectionRequest.process(client);
assertEquals(0, response1.getStatus());
assertTrue(response1.isSuccess());
@ -119,10 +116,7 @@ public class TestBlobHandler extends AbstractFullDistribZkTestBase {
public static void createSystemCollection(SolrClient client) throws SolrServerException, IOException {
CollectionAdminResponse response1;
CollectionAdminRequest.Create createCollectionRequest = new CollectionAdminRequest.Create()
.setCollectionName(".system")
.setNumShards(1)
.setReplicationFactor(2);
CollectionAdminRequest.Create createCollectionRequest = CollectionAdminRequest.createCollection(".system",1,2);
response1 = createCollectionRequest.process(client);
assertEquals(0, response1.getStatus());
assertTrue(response1.isSuccess());

View File

@ -58,11 +58,19 @@ public class TestCollectionAPIs extends SolrTestCaseJ4 {
MockCollectionsHandler collectionsHandler = new MockCollectionsHandler();
ApiBag apiBag = new ApiBag(false);
Collection<Api> apis = collectionsHandler.getApis();
for (Api api : apis) apiBag.register(api, Collections.EMPTY_MAP);
for (Api api : apis) apiBag.register(api, Collections.emptyMap());
//test a simple create collection call
compareOutput(apiBag, "/collections", POST,
"{create:{name:'newcoll', config:'schemaless', numShards:2, replicationFactor:2 }}", null,
"{name:newcoll, fromApi:'true', replicationFactor:'2', collection.configName:schemaless, numShards:'2', stateFormat:'2', operation:create}");
compareOutput(apiBag, "/collections", POST,
"{create:{name:'newcoll', config:'schemaless', numShards:2, nrtReplicas:2 }}", null,
"{name:newcoll, fromApi:'true', nrtReplicas:'2', collection.configName:schemaless, numShards:'2', stateFormat:'2', operation:create}");
compareOutput(apiBag, "/collections", POST,
"{create:{name:'newcoll', config:'schemaless', numShards:2, nrtReplicas:2, tlogReplicas:2, pullReplicas:2 }}", null,
"{name:newcoll, fromApi:'true', nrtReplicas:'2', tlogReplicas:'2', pullReplicas:'2', collection.configName:schemaless, numShards:'2', stateFormat:'2', operation:create}");
//test a create collection with custom properties
compareOutput(apiBag, "/collections", POST,
@ -106,6 +114,21 @@ public class TestCollectionAPIs extends SolrTestCaseJ4 {
"{split:{ splitKey:id12345, coreProperties : {prop1:prop1Val, prop2:prop2Val} }}", null,
"{collection: collName , split.key : id12345 , operation : splitshard, property.prop1:prop1Val, property.prop2: prop2Val}"
);
compareOutput(apiBag, "/collections/collName/shards", POST,
"{add-replica:{shard: shard1, node: 'localhost_8978' , type:'TLOG' }}", null,
"{collection: collName , shard : shard1, node :'localhost_8978', operation : addreplica, type: TLOG}"
);
compareOutput(apiBag, "/collections/collName/shards", POST,
"{add-replica:{shard: shard1, node: 'localhost_8978' , type:'PULL' }}", null,
"{collection: collName , shard : shard1, node :'localhost_8978', operation : addreplica, type: PULL}"
);
assertErrorContains(apiBag, "/collections/collName/shards", POST,
"{add-replica:{shard: shard1, node: 'localhost_8978' , type:'foo' }}", null,
"Value of enum must be one of"
);
compareOutput(apiBag, "/collections/collName", POST,
"{add-replica-property : {name:propA , value: VALA, shard: shard1, replica:replica1}}", null,
@ -150,13 +173,22 @@ public class TestCollectionAPIs extends SolrTestCaseJ4 {
Map expected = (Map) fromJSONString(expectedOutputMapJson);
assertMapEqual(expected, output);
return output;
}
static void assertErrorContains(final ApiBag apiBag, final String path, final SolrRequest.METHOD method,
final String payload, final CoreContainer cc, String expectedErrorMsg) throws Exception {
try {
makeCall(apiBag, path, method, payload, cc);
fail("Expected exception");
} catch (RuntimeException e) {
assertTrue("Expected exception with error message '" + expectedErrorMsg + "' but got: " + e.getMessage(), e.getMessage().contains(expectedErrorMsg));
}
}
public static Pair<SolrQueryRequest, SolrQueryResponse> makeCall(final ApiBag apiBag, String path,
final SolrRequest.METHOD method,
final String payload, final CoreContainer cc) throws Exception {
SolrParams queryParams = new MultiMapSolrParams(Collections.EMPTY_MAP);
SolrParams queryParams = new MultiMapSolrParams(Collections.emptyMap());
if (path.indexOf('?') > 0) {
String queryStr = path.substring(path.indexOf('?') + 1);
path = path.substring(0, path.indexOf('?'));

View File

@ -54,6 +54,6 @@ public class TestConfigsApi extends SolrTestCaseJ4 {
"{name :sample, operation:delete}");
compareOutput(apiBag, "/cluster/configs", POST, "{create:{name : newconf, baseConfigSet: sample }}", null,
"{operation:create, name :newconf, baseConfigSet: sample }");
"{operation:create, name :newconf, baseConfigSet: sample, immutable: false }");
}
}

View File

@ -196,20 +196,11 @@ public class TestDistribIDF extends SolrTestCaseJ4 {
private void createCollection(String name, String config, String router) throws Exception {
CollectionAdminResponse response;
if (router.equals(ImplicitDocRouter.NAME)) {
CollectionAdminRequest.Create create = new CollectionAdminRequest.Create();
create.setConfigName(config);
create.setCollectionName(name);
create.setReplicationFactor(1);
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollectionWithImplicitRouter(name,config,"a,b,c",1);
create.setMaxShardsPerNode(1);
create.setRouterName(router);
create.setShards("a,b,c");
response = create.process(solrCluster.getSolrClient());
} else {
CollectionAdminRequest.Create create = new CollectionAdminRequest.Create();
create.setConfigName(config);
create.setCollectionName(name);
create.setNumShards(2);
create.setReplicationFactor(1);
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(name,config,2,1);
create.setMaxShardsPerNode(1);
response = create.process(solrCluster.getSolrClient());
}

View File

@ -86,7 +86,7 @@ copy_snapshot_files() {
for shardId in $(hdfs dfs -stat "%n" "${copylisting_dir_path}/*"); do
oPath="${destPath}/${snapshotName}/snapshot.${shardId}"
echo "Copying the index files for ${shardId} to ${oPath}"
${distCpCmd} -f " ${copylisting_dir_path}/${shardId}" "${oPath}" > /dev/null
${distCpCmd} -f "${copylisting_dir_path}/${shardId}" "${oPath}" > /dev/null
done
else
echo "Directory ${copylisting_dir_path} does not exist."

View File

@ -20,7 +20,16 @@
A `codecFactory` can be specified in `solrconfig.xml` to determine which Lucene {lucene-javadocs}/core/org/apache/lucene/codecs/Codec.html[`Codec`] is used when writing the index to disk.
If not specified, Lucene's default codec is implicitly used, but a {solr-javadocs}/solr-core/org/apache/solr/core/SchemaCodecFactory.html[`solr.SchemaCodecFactory`] is also available which supports 2 key features:
If not specified, Lucene's default codec is implicitly used.
There are two alternatives to Lucene's default codec:
. {solr-javadocs}/solr-core/org/apache/solr/core/SchemaCodecFactory.html[`solr.SchemaCodecFactory`]
. {solr-javadocs}/solr-core/org/apache/solr/core/SimpleTextCodecFactory.html[`solr.SimpleTextCodecFactory`]
=== solr.SchemaCodecFactory
`solr.SchemaCodecFactory` supports 2 key features:
* Schema based per-fieldtype configuration for `docValuesFormat` and `postingsFormat` - see the <<field-type-definitions-and-properties.adoc#field-type-properties,Field Type Properties>> section for more details.
* A `compressionMode` option:
@ -35,3 +44,16 @@ Example:
<str name="compressionMode">BEST_COMPRESSION</str>
</codecFactory>
----
=== solr.SimpleTextCodecFactory
This factory for Lucene's `SimpleTextCodec` produces a plain text human-readable index format.
CAUTION: *FOR RECREATIONAL USE ONLY*. This codec should never be used in production. `SimpleTextCodec` is relatively slow and takes up a large amount of disk space. Its use should be limited to educational and debugging purposes.
Example:
[source,xml]
----
<codecFactory class="solr.SimpleTextCodecFactory"/>
----

View File

@ -119,7 +119,7 @@ The RELOAD action loads a new core from the configuration of an existing, regist
`admin/cores?action=RELOAD&core=_core-name_`
This is useful when you've made changes to a Solr core's configuration on disk, such as adding new field definitions. Calling the RELOAD action lets you apply the new configuration without having to restart the Web container.
This is useful when you've made changes to a Solr core's configuration on disk, such as adding new field definitions. Calling the RELOAD action lets you apply the new configuration without having to restart Solr.
[IMPORTANT]
====

View File

@ -55,7 +55,7 @@ a[data-toggle="tooltip"] {
/* Used for nav buttons */
.btn-primary {
color: #ffffff;
background-color: #0A7C38;
background-color: #F35B38;
border-color: #E6E7E8;
}

View File

@ -20,7 +20,7 @@
Solr can encrypt communications to and from clients, and between nodes in SolrCloud mode, with SSL.
This section describes enabling SSL with the example Jetty server using a self-signed certificate.
This section describes enabling SSL using a self-signed certificate.
For background on SSL certificates and keys, see http://www.tldp.org/HOWTO/SSL-Certificates-HOWTO/.

View File

@ -25,7 +25,7 @@ This section will describe the default `solr.xml` file included with Solr and ho
[[Formatofsolr.xml-Definingsolr.xml]]
== Defining solr.xml
You can find `solr.xml` in your Solr Home directory or in Zookeeper. The default `solr.xml` file looks like this:
You can find `solr.xml` in your `$SOLR_HOME` directory (usually `server/solr`) in standalone mode or in Zookeeper when using SolrCloud. The default `solr.xml` file looks like this:
[source,xml]
----
@ -66,17 +66,17 @@ There are no attributes that you can specify in the `<solr>` tag, which is the r
|`collectionsHandler` |As above, for custom CollectionsHandler implementations.
| `infoHandler` |As above, for custom InfoHandler implementations.
|`coreLoadThreads` |Specifies the number of threads that will be assigned to load cores in parallel.
|`coreRootDirectory` |The root of the core discovery tree, defaults to SOLR_HOME.
|`coreRootDirectory` |The root of the core discovery tree, defaults to `$SOLR_HOME`.
|`managementPath` |Currently non-operational.
|`sharedLib` |Specifies the path to a common library directory that will be shared across all cores. Any JAR files in this directory will be added to the search path for Solr plugins. This path is relative to the top-level container's Solr Home. Custom handlers may be placed in this directory
|`sharedLib` |Specifies the path to a common library directory that will be shared across all cores. Any JAR files in this directory will be added to the search path for Solr plugins. This path is relative to `$SOLR_HOME`. Custom handlers may be placed in this directory.
|`shareSchema` |This attribute, when set to true, ensures that the multiple cores pointing to the same Schema resource file will be referring to the same IndexSchema Object. Sharing the IndexSchema Object makes loading the core faster. If you use this feature, make sure that no core-specific property is used in your Schema file.
|`transientCacheSize` |Defines how many cores with transient=true that can be loaded before swapping the least recently used core for a new core.
|`configSetBaseDir` |The directory under which configsets for solr cores can be found. Defaults to SOLR_HOME/configsets
|`configSetBaseDir` |The directory under which configsets for Solr cores can be found. Defaults to `$SOLR_HOME/configsets`.
|===
==== The `<solrcloud>` element
==== The `<solrcloud>` Element
This element defines several parameters that relate so SolrCloud. This section is ignored unless the solr instance is started with either `-DzkRun` or `-DzkHost`
This element defines several parameters that relate so SolrCloud. This section is ignored unless theSolr instance is started with either `-DzkRun` or `-DzkHost`
// TODO: Change column width to %autowidth.spread when https://github.com/asciidoctor/asciidoctor-pdf/issues/599 is fixed
@ -96,18 +96,18 @@ This element defines several parameters that relate so SolrCloud. This section i
|`zkCredentialsProvider` & ` zkACLProvider` |Optional parameters that can be specified if you are using <<zookeeper-access-control.adoc#zookeeper-access-control,ZooKeeper Access Control>>.
|===
==== The `<logging>` element
==== The `<logging>` Element
// TODO: Change column width to %autowidth.spread when https://github.com/asciidoctor/asciidoctor-pdf/issues/599 is fixed
[cols="30,70",options="header"]
|===
|Node |Description
|`class` |The class to use for logging. The corresponding JAR file must be available to solr, perhaps through a `<lib>` directive in solrconfig.xml.
|`class` |The class to use for logging. The corresponding JAR file must be available to Solr, perhaps through a `<lib>` directive in `solrconfig.xml`.
|`enabled` |true/false - whether to enable logging or not.
|===
===== The `<logging><watcher>` element
===== The `<logging><watcher>` Element
// TODO: Change column width to %autowidth.spread when https://github.com/asciidoctor/asciidoctor-pdf/issues/599 is fixed
@ -118,7 +118,7 @@ This element defines several parameters that relate so SolrCloud. This section i
|`threshold` |The logging level above which your particular logging implementation will record. For example when using log4j one might specify DEBUG, WARN, INFO, etc.
|===
==== The `<shardHandlerFactory>` element
==== The `<shardHandlerFactory>` Element
Custom shard handlers can be defined in `solr.xml` if you wish to create a custom shard handler.
@ -134,8 +134,8 @@ Since this is a custom shard handler, sub-elements are specific to the implement
[cols="30,70",options="header"]
|===
|Node |Description
|`socketTimeout` |The read timeout for intra-cluster query and administrative requests. The default is the same as the distribUpdateSoTimeout specified in the solrcloud section.
|`connTimeout` |The connection timeout for intra-cluster query and administrative requests. Defaults to the distribUpdateConnTimeout specified in the solrcloud section
|`socketTimeout` |The read timeout for intra-cluster query and administrative requests. The default is the same as the `distribUpdateSoTimeout` specified in the `<solrcloud>` section.
|`connTimeout` |The connection timeout for intra-cluster query and administrative requests. Defaults to the `distribUpdateConnTimeout` specified in the `<solrcloud>` section
|`urlScheme` |URL scheme to be used in distributed search
|`maxConnectionsPerHost` |Maximum connections allowed per host. Defaults to 20
|`maxConnections` |Maximum total connections allowed. Defaults to 10000

View File

@ -425,7 +425,7 @@ This option is useful if you are running multiple standalone Solr instances on t
The `bin/solr` script allows enabling or disabling Basic Authentication, allowing you to configure authentication from the command line.
Currently, this script only enables Basic Authentication.
Currently, this script only enables Basic Authentication, and is only available when using SolrCloud mode.
=== Enabling Basic Authentication
@ -435,7 +435,7 @@ TIP: For more information about Solr's authentication plugins, see the section <
The `bin/solr auth enable` command makes several changes to enable Basic Authentication:
* Creates a `security.json` file and, if using SolrCloud, uploads it to ZooKeeper. The `security.json` file will look similar to:
* Creates a `security.json` file and uploads it to ZooKeeper. The `security.json` file will look similar to:
+
[source,json]
----
@ -484,15 +484,14 @@ When *true*, blocks all unauthenticated users from accessing Solr. This defaults
`-updateIncludeFileOnly`::
When *true*, only the settings in `bin/solr.in.sh` or `bin\solr.in.cmd` will be updated, and `security.json` will not be created.
// TODO 6.6 clarify when this is required
`-z`::
Defines the ZooKeeper connect string.
Defines the ZooKeeper connect string. This is useful if you want to enable authentication before all your Solr nodes have come up.
`-d`::
Defines the Solr server directory, by default `$SOLR_HOME/server`. It is not common to need to override the default.
Defines the Solr server directory, by default `$SOLR_HOME/server`. It is not common to need to override the default, and is only needed if you have customized the `$SOLR_HOME` directory path.
`-s`::
Defines the location of `solr.solr.home`, which by default is `server/solr`. If you have multiple instances of Solr on the same host, you likely need to define this.
Defines the location of `solr.solr.home`, which by default is `server/solr`. If you have multiple instances of Solr on the same host, or if you have customized the `$SOLR_HOME` directory path, you likely need to define this.
=== Disabling Basic Authentication

View File

@ -17,6 +17,7 @@
package org.apache.solr.client.solrj;
import static org.apache.solr.common.params.CoreAdminParams.*;
import static org.apache.solr.common.params.CollectionAdminParams.FLUSH;
import java.util.Iterator;
import java.util.Set;
@ -37,135 +38,131 @@ import com.google.common.collect.Sets;
public class CollectionAdminRequestRequiredParamsTest extends LuceneTestCase {
public void testBalanceShardUnique() {
final CollectionAdminRequest.BalanceShardUnique request = new CollectionAdminRequest.BalanceShardUnique()
.setCollection("foo")
.setPropertyName("prop");
CollectionAdminRequest.BalanceShardUnique request = CollectionAdminRequest.balanceReplicaProperty("foo","prop");
assertContainsParams(request.getParams(), ACTION, COLLECTION, "property");
request.setShardUnique(true);
assertContainsParams(request.getParams(), ACTION, COLLECTION, "property","shardUnique");
request.setOnlyActiveNodes(false);
assertContainsParams(request.getParams(), ACTION, COLLECTION, "property","shardUnique","onlyactivenodes");
request.setShardUnique(null);
assertContainsParams(request.getParams(), ACTION, COLLECTION, "property","onlyactivenodes");
}
public void testClusterProp() {
final CollectionAdminRequest.ClusterProp request = new CollectionAdminRequest.ClusterProp()
.setPropertyName("foo")
.setPropertyValue("bar");
CollectionAdminRequest.ClusterProp request = CollectionAdminRequest.setClusterProperty("foo","bar");
assertContainsParams(request.getParams(), ACTION, NAME, "val");
}
public void testAddRole() {
final CollectionAdminRequest.AddRole request = new CollectionAdminRequest.AddRole()
.setNode("node")
.setRole("role");
CollectionAdminRequest.AddRole request = CollectionAdminRequest.addRole("node","role");
assertContainsParams(request.getParams(), ACTION, "node", "role");
}
public void testRemoveRole() {
final CollectionAdminRequest.RemoveRole request = new CollectionAdminRequest.RemoveRole()
.setNode("node")
.setRole("role");
CollectionAdminRequest.RemoveRole request = CollectionAdminRequest.removeRole("node","role");
assertContainsParams(request.getParams(), ACTION, "node", "role");
}
public void testAddReplica() {
// with shard parameter
CollectionAdminRequest.AddReplica request = new CollectionAdminRequest.AddReplica()
.setShardName("shard")
.setCollectionName("collection");
// with shard parameter and "client side" implicit type param
CollectionAdminRequest.AddReplica request = CollectionAdminRequest.addReplicaToShard("collection", "shard");
assertContainsParams(request.getParams(), ACTION, COLLECTION, SHARD, ZkStateReader.REPLICA_TYPE);
// with only shard parameter and "server side" implicit type, so no param
request = CollectionAdminRequest.addReplicaToShard("collection", "shard", null);
assertContainsParams(request.getParams(), ACTION, COLLECTION, SHARD);
// with route parameter
request = new CollectionAdminRequest.AddReplica()
.setRouteKey("route")
.setCollectionName("collection");
request = CollectionAdminRequest.addReplicaByRouteKey("collection","route");
assertContainsParams(request.getParams(), ACTION, COLLECTION, ShardParams._ROUTE_);
// with type parameter
request = new CollectionAdminRequest.AddReplica()
.setShardName("shard")
.setCollectionName("collection")
.setType(Replica.Type.NRT);
// with explicit type parameter
request = CollectionAdminRequest.addReplicaToShard("collection", "shard", Replica.Type.NRT);
assertContainsParams(request.getParams(), ACTION, COLLECTION, SHARD, ZkStateReader.REPLICA_TYPE);
}
public void testAddReplicaProp() {
final CollectionAdminRequest.AddReplicaProp request = new CollectionAdminRequest.AddReplicaProp()
.setShardName("shard")
.setCollectionName("collection")
.setReplica("replica")
.setPropertyName("prop")
.setPropertyValue("value");
final CollectionAdminRequest.AddReplicaProp request = CollectionAdminRequest.addReplicaProperty
("collection", "shard", "replica", "prop", "value");
assertContainsParams(request.getParams(), ACTION, COLLECTION, SHARD, REPLICA, "property", "property.value");
}
public void testClusterStatus() {
final CollectionAdminRequest.ClusterStatus request = new CollectionAdminRequest.ClusterStatus();
final CollectionAdminRequest.ClusterStatus request = CollectionAdminRequest.getClusterStatus();
assertContainsParams(request.getParams(), ACTION);
request.setCollectionName("foo");
assertContainsParams(request.getParams(), ACTION, COLLECTION);
request.setShardName("foo");
assertContainsParams(request.getParams(), ACTION, COLLECTION, SHARD);
request.setRouteKey("foo");
request.setShardName(null);
assertContainsParams(request.getParams(), ACTION, COLLECTION, ShardParams._ROUTE_);
}
public void testCreateShard() {
final CollectionAdminRequest.CreateShard request = new CollectionAdminRequest.CreateShard()
.setCollectionName("collection")
.setShardName("shard");
final CollectionAdminRequest.CreateShard request = CollectionAdminRequest.createShard("collection","shard");
assertContainsParams(request.getParams(), ACTION, COLLECTION, SHARD);
}
public void testDeleteReplica() {
final CollectionAdminRequest.DeleteReplica request = new CollectionAdminRequest.DeleteReplica()
.setCollectionName("collection")
.setShardName("shard")
.setReplica("replica");
final CollectionAdminRequest.DeleteReplica request = CollectionAdminRequest.deleteReplica("collection","shard","replica");
assertContainsParams(request.getParams(), ACTION, COLLECTION, SHARD, REPLICA);
}
public void testDeleteReplicaProp() {
final CollectionAdminRequest.DeleteReplicaProp request = new CollectionAdminRequest.DeleteReplicaProp()
.setCollectionName("collection")
.setShardName("shard")
.setReplica("replica")
.setPropertyName("foo");
final CollectionAdminRequest.DeleteReplicaProp request = CollectionAdminRequest.deleteReplicaProperty
("collection", "shard", "replica", "foo");
assertContainsParams(request.getParams(), ACTION, COLLECTION, SHARD, REPLICA, "property");
}
public void testDeleteShard() {
final CollectionAdminRequest.DeleteShard request = new CollectionAdminRequest.DeleteShard()
.setCollectionName("collection")
.setShardName("shard");
final CollectionAdminRequest.DeleteShard request = CollectionAdminRequest.deleteShard("collection","shard");
assertContainsParams(request.getParams(), ACTION, COLLECTION, SHARD);
}
public void testSplitShard() {
final CollectionAdminRequest.SplitShard request = new CollectionAdminRequest.SplitShard()
.setCollectionName("collection")
final CollectionAdminRequest.SplitShard request = CollectionAdminRequest.splitShard("collection")
.setShardName("shard");
assertContainsParams(request.getParams(), ACTION, COLLECTION, SHARD);
}
public void testCreateCollection() {
final CollectionAdminRequest.Create request = new CollectionAdminRequest.Create()
.setCollectionName("collection");
assertContainsParams(request.getParams(), ACTION, NAME);
// shortest form
assertContainsParams(CollectionAdminRequest.createCollection("foo", null, 1, 1).getParams(),
ACTION, NAME, ZkStateReader.NUM_SHARDS_PROP,
"replicationFactor", ZkStateReader.NRT_REPLICAS);
// shortest form w/ "explicitly" choosing "implicit" router
assertContainsParams(CollectionAdminRequest.createCollectionWithImplicitRouter("foo", null, "bar", 1).getParams(),
ACTION, NAME, "shards", "router.name",
"replicationFactor", ZkStateReader.NRT_REPLICAS);
}
public void testReloadCollection() {
final CollectionAdminRequest.Reload request = new CollectionAdminRequest.Reload()
.setCollectionName("collection");
final CollectionAdminRequest.Reload request = CollectionAdminRequest.reloadCollection("collection");
assertContainsParams(request.getParams(), ACTION, NAME);
}
public void testDeleteCollection() {
final CollectionAdminRequest.Delete request = new CollectionAdminRequest.Delete()
.setCollectionName("collection");
final CollectionAdminRequest.Delete request = CollectionAdminRequest.deleteCollection("collection");
assertContainsParams(request.getParams(), ACTION, NAME);
}
public void testCreateAlias() {
final CollectionAdminRequest.CreateAlias request = new CollectionAdminRequest.CreateAlias()
.setAliasName("name")
.setAliasedCollections("collections");
final CollectionAdminRequest.CreateAlias request = CollectionAdminRequest.createAlias("name","collections");
assertContainsParams(request.getParams(), ACTION, NAME, "collections");
}
public void testDeleteAlias() {
final CollectionAdminRequest.DeleteAlias request = new CollectionAdminRequest.DeleteAlias()
.setAliasName("name");
final CollectionAdminRequest.DeleteAlias request = CollectionAdminRequest.deleteAlias("name");
assertContainsParams(request.getParams(), ACTION, NAME);
}
@ -175,10 +172,7 @@ public class CollectionAdminRequestRequiredParamsTest extends LuceneTestCase {
}
public void testMigrate() {
final CollectionAdminRequest.Migrate request = new CollectionAdminRequest.Migrate()
.setCollectionName("collection")
.setTargetCollection("target")
.setSplitKey("splitKey");
final CollectionAdminRequest.Migrate request = CollectionAdminRequest.migrateData("collection","targer","splitKey");
assertContainsParams(request.getParams(), ACTION, COLLECTION, "target.collection", "split.key");
}
@ -188,10 +182,21 @@ public class CollectionAdminRequestRequiredParamsTest extends LuceneTestCase {
}
public void testRequestStatus() {
final CollectionAdminRequest.RequestStatus request = new CollectionAdminRequest.RequestStatus()
.setRequestId("request");
final CollectionAdminRequest.RequestStatus request = CollectionAdminRequest.requestStatus("request");
assertContainsParams(request.getParams(), ACTION, REQUESTID);
}
public void testDeleteStatus() {
assertContainsParams(CollectionAdminRequest.deleteAsyncId("foo").getParams(),
ACTION, REQUESTID);
assertContainsParams(CollectionAdminRequest.deleteAllAsyncIds().getParams(),
ACTION, FLUSH);
}
public void testForceLeader() {
assertContainsParams(CollectionAdminRequest.forceLeaderElection("foo","bar").getParams(),
ACTION, COLLECTION, SHARD);
}
private void assertContainsParams(SolrParams solrParams, String... requiredParams) {
final Set<String> requiredParamsSet = Sets.newHashSet(requiredParams);

View File

@ -17,7 +17,7 @@
package org.apache.solr.client.solrj.request;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.solr.client.solrj.request.CollectionAdminRequest.Create;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.request.CollectionAdminRequest.CreateAlias;
import org.apache.solr.client.solrj.request.CollectionAdminRequest.CreateShard;
import org.apache.solr.common.SolrException;
@ -30,57 +30,57 @@ public class TestCollectionAdminRequest extends LuceneTestCase {
@Test
public void testInvalidCollectionNameRejectedWhenCreatingCollection() {
final Create createRequest = new Create();
try {
createRequest.setCollectionName("invalid$collection@name");
fail();
} catch (SolrException e) {
final String exceptionMessage = e.getMessage();
assertTrue(exceptionMessage.contains("Invalid collection"));
assertTrue(exceptionMessage.contains("invalid$collection@name"));
assertTrue(exceptionMessage.contains("must consist entirely of periods, underscores, hyphens, and alphanumerics"));
}
final SolrException e = expectThrows(SolrException.class, () -> {
CollectionAdminRequest.createCollection("invalid$collection@name", null, 1, 1);
});
final String exceptionMessage = e.getMessage();
assertTrue(exceptionMessage.contains("Invalid collection"));
assertTrue(exceptionMessage.contains("invalid$collection@name"));
assertTrue(exceptionMessage.contains("must consist entirely of periods, underscores, hyphens, and alphanumerics"));
}
@Test
public void testInvalidShardNamesRejectedWhenCreatingCollection() {
final Create createRequest = new Create();
try {
createRequest.setShards("invalid$shard@name");
fail();
} catch (SolrException e) {
final String exceptionMessage = e.getMessage();
assertTrue(exceptionMessage.contains("Invalid shard"));
assertTrue(exceptionMessage.contains("invalid$shard@name"));
assertTrue(exceptionMessage.contains("must consist entirely of periods, underscores, hyphens, and alphanumerics"));
}
public void testInvalidShardNamesRejectedWhenCreatingImplicitCollection() {
final SolrException e = expectThrows(SolrException.class, () -> {
CollectionAdminRequest.createCollectionWithImplicitRouter("fine", "fine", "invalid$shard@name",1,0,0);
});
final String exceptionMessage = e.getMessage();
assertTrue(exceptionMessage.contains("Invalid shard"));
assertTrue(exceptionMessage.contains("invalid$shard@name"));
assertTrue(exceptionMessage.contains("must consist entirely of periods, underscores, hyphens, and alphanumerics"));
}
@Test
public void testInvalidShardNamesRejectedWhenCallingSetShards() {
CollectionAdminRequest.Create request = CollectionAdminRequest.createCollectionWithImplicitRouter("fine",null,"fine",1);
final SolrException e = expectThrows(SolrException.class, () -> {
request.setShards("invalid$shard@name");
});
final String exceptionMessage = e.getMessage();
assertTrue(exceptionMessage.contains("Invalid shard"));
assertTrue(exceptionMessage.contains("invalid$shard@name"));
assertTrue(exceptionMessage.contains("must consist entirely of periods, underscores, hyphens, and alphanumerics"));
}
@Test
public void testInvalidAliasNameRejectedWhenCreatingAlias() {
final CreateAlias createAliasRequest = new CreateAlias();
try {
createAliasRequest.setAliasName("invalid$alias@name");
fail();
} catch (SolrException e) {
final String exceptionMessage = e.getMessage();
assertTrue(exceptionMessage.contains("Invalid alias"));
assertTrue(exceptionMessage.contains("invalid$alias@name"));
assertTrue(exceptionMessage.contains("must consist entirely of periods, underscores, hyphens, and alphanumerics"));
}
final SolrException e = expectThrows(SolrException.class, () -> {
CreateAlias createAliasRequest = CollectionAdminRequest.createAlias("invalid$alias@name","ignored");
});
final String exceptionMessage = e.getMessage();
assertTrue(exceptionMessage.contains("Invalid alias"));
assertTrue(exceptionMessage.contains("invalid$alias@name"));
assertTrue(exceptionMessage.contains("must consist entirely of periods, underscores, hyphens, and alphanumerics"));
}
@Test
public void testInvalidShardNameRejectedWhenCreatingShard() {
final CreateShard createShardRequest = new CreateShard();
try {
createShardRequest.setShardName("invalid$shard@name");
fail();
} catch (SolrException e) {
final String exceptionMessage = e.getMessage();
assertTrue(exceptionMessage.contains("Invalid shard"));
assertTrue(exceptionMessage.contains("invalid$shard@name"));
assertTrue(exceptionMessage.contains("must consist entirely of periods, underscores, hyphens, and alphanumerics"));
}
final SolrException e = expectThrows(SolrException.class, () -> {
CreateShard createShardRequest = CollectionAdminRequest.createShard("ignored","invalid$shard@name");
});
final String exceptionMessage = e.getMessage();
assertTrue(exceptionMessage.contains("Invalid shard"));
assertTrue(exceptionMessage.contains("invalid$shard@name"));
assertTrue(exceptionMessage.contains("must consist entirely of periods, underscores, hyphens, and alphanumerics"));
}
}

View File

@ -1892,8 +1892,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
throws SolrServerException, IOException {
CollectionAdminResponse resp = createCollection(testCollectionName, numShards, replicationFactor, maxShardsPerNode);
if (resp.getResponse().get("failure") != null) {
CollectionAdminRequest.Delete req = new CollectionAdminRequest.Delete();
req.setCollectionName(testCollectionName);
CollectionAdminRequest.Delete req = CollectionAdminRequest.deleteCollection(testCollectionName);
req.process(cloudClient);
resp = createCollection(testCollectionName, numShards, replicationFactor, maxShardsPerNode);
@ -2158,6 +2157,22 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
}
}
/**
* Logs a WARN if collection can't be deleted, but does not fail or throw an exception
* @return true if success, else false
*/
protected static boolean attemptCollectionDelete(CloudSolrClient client, String collectionName) {
// try to clean up
try {
CollectionAdminRequest.deleteCollection(collectionName).process(client);
return true;
} catch (Exception e) {
// don't fail the test
log.warn("Could not delete collection {} - ignoring", collectionName);
}
return false;
}
protected void logReplicationDetails(Replica replica, StringBuilder builder) throws IOException {
try (HttpSolrClient client = new HttpSolrClient.Builder(replica.getCoreUrl()).build()) {
ModifiableSolrParams params = new ModifiableSolrParams();
@ -2203,9 +2218,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
}
static CollectionAdminResponse getStatusResponse(String requestId, SolrClient client) throws SolrServerException, IOException {
CollectionAdminRequest.RequestStatus requestStatusRequest = new CollectionAdminRequest.RequestStatus();
requestStatusRequest.setRequestId(requestId);
return requestStatusRequest.process(client);
return CollectionAdminRequest.requestStatus(requestId).process(client);
}
}