Merge branch 'master' into feature/autoscaling

# Conflicts:
#	solr/core/src/java/org/apache/solr/cloud/MoveReplicaCmd.java
This commit is contained in:
Shalin Shekhar Mangar 2017-07-05 05:39:44 +05:30
commit 467c692d40
74 changed files with 1011 additions and 421 deletions

View File

@ -100,6 +100,10 @@ API Changes
DoubleValuesSource extensions, or with the new ShapeValuesSource and
ShapeValuesPredicate classes (Alan Woodward, David Smiley)
* LUCENE-7892: Doc-values query factory methods have been renamed so that their
name contains "slow" in order to cleary indicate that they would usually be a
bad choice. (Adrien Grand)
Bug Fixes
* LUCENE-7626: IndexWriter will no longer accept broken token offsets

View File

@ -78,7 +78,7 @@ public class NumericDocValuesField extends Field {
* alongside a range query that executes on points, such as
* {@link LongPoint#newRangeQuery}.
*/
public static Query newRangeQuery(String field, long lowerValue, long upperValue) {
public static Query newSlowRangeQuery(String field, long lowerValue, long upperValue) {
return new SortedNumericDocValuesRangeQuery(field, lowerValue, upperValue) {
@Override
SortedNumericDocValues getValues(LeafReader reader, String field) throws IOException {
@ -99,7 +99,7 @@ public class NumericDocValuesField extends Field {
* alongside a range query that executes on points, such as
* {@link LongPoint#newExactQuery}.
*/
public static Query newExactQuery(String field, long value) {
return newRangeQuery(field, value, value);
public static Query newSlowExactQuery(String field, long value) {
return newSlowRangeQuery(field, value, value);
}
}

View File

@ -89,7 +89,7 @@ public class SortedNumericDocValuesField extends Field {
* alongside a range query that executes on points, such as
* {@link LongPoint#newRangeQuery}.
*/
public static Query newRangeQuery(String field, long lowerValue, long upperValue) {
public static Query newSlowRangeQuery(String field, long lowerValue, long upperValue) {
return new SortedNumericDocValuesRangeQuery(field, lowerValue, upperValue) {
@Override
SortedNumericDocValues getValues(LeafReader reader, String field) throws IOException {
@ -114,7 +114,7 @@ public class SortedNumericDocValuesField extends Field {
* alongside a range query that executes on points, such as
* {@link LongPoint#newExactQuery}.
*/
public static Query newExactQuery(String field, long value) {
return newRangeQuery(field, value, value);
public static Query newSlowExactQuery(String field, long value) {
return newSlowRangeQuery(field, value, value);
}
}

View File

@ -80,9 +80,9 @@ public class TestDocValuesQueries extends LuceneTestCase {
final Query q1 = LongPoint.newRangeQuery("idx", min, max);
final Query q2;
if (sortedNumeric) {
q2 = SortedNumericDocValuesField.newRangeQuery("dv", min, max);
q2 = SortedNumericDocValuesField.newSlowRangeQuery("dv", min, max);
} else {
q2 = NumericDocValuesField.newRangeQuery("dv", min, max);
q2 = NumericDocValuesField.newSlowRangeQuery("dv", min, max);
}
assertSameMatches(searcher, q1, q2, false);
}
@ -185,11 +185,11 @@ public class TestDocValuesQueries extends LuceneTestCase {
}
public void testEquals() {
Query q1 = SortedNumericDocValuesField.newRangeQuery("foo", 3, 5);
QueryUtils.checkEqual(q1, SortedNumericDocValuesField.newRangeQuery("foo", 3, 5));
QueryUtils.checkUnequal(q1, SortedNumericDocValuesField.newRangeQuery("foo", 3, 6));
QueryUtils.checkUnequal(q1, SortedNumericDocValuesField.newRangeQuery("foo", 4, 5));
QueryUtils.checkUnequal(q1, SortedNumericDocValuesField.newRangeQuery("bar", 3, 5));
Query q1 = SortedNumericDocValuesField.newSlowRangeQuery("foo", 3, 5);
QueryUtils.checkEqual(q1, SortedNumericDocValuesField.newSlowRangeQuery("foo", 3, 5));
QueryUtils.checkUnequal(q1, SortedNumericDocValuesField.newSlowRangeQuery("foo", 3, 6));
QueryUtils.checkUnequal(q1, SortedNumericDocValuesField.newSlowRangeQuery("foo", 4, 5));
QueryUtils.checkUnequal(q1, SortedNumericDocValuesField.newSlowRangeQuery("bar", 3, 5));
Query q2 = SortedSetDocValuesField.newRangeQuery("foo", new BytesRef("bar"), new BytesRef("baz"), true, true);
QueryUtils.checkEqual(q2, SortedSetDocValuesField.newRangeQuery("foo", new BytesRef("bar"), new BytesRef("baz"), true, true));
@ -199,7 +199,7 @@ public class TestDocValuesQueries extends LuceneTestCase {
}
public void testToString() {
Query q1 = SortedNumericDocValuesField.newRangeQuery("foo", 3, 5);
Query q1 = SortedNumericDocValuesField.newSlowRangeQuery("foo", 3, 5);
assertEquals("foo:[3 TO 5]", q1.toString());
assertEquals("[3 TO 5]", q1.toString("foo"));
assertEquals("foo:[3 TO 5]", q1.toString("bar"));
@ -226,8 +226,8 @@ public class TestDocValuesQueries extends LuceneTestCase {
iw.close();
IndexSearcher searcher = newSearcher(reader);
for (Query query : Arrays.asList(
NumericDocValuesField.newRangeQuery("foo", 2, 4),
SortedNumericDocValuesField.newRangeQuery("foo", 2, 4),
NumericDocValuesField.newSlowRangeQuery("foo", 2, 4),
SortedNumericDocValuesField.newSlowRangeQuery("foo", 2, 4),
SortedDocValuesField.newRangeQuery("foo", new BytesRef("abc"), new BytesRef("bcd"), random().nextBoolean(), random().nextBoolean()),
SortedSetDocValuesField.newRangeQuery("foo", new BytesRef("abc"), new BytesRef("bcd"), random().nextBoolean(), random().nextBoolean()))) {
Weight w = searcher.createNormalizedWeight(query, random().nextBoolean());
@ -255,12 +255,12 @@ public class TestDocValuesQueries extends LuceneTestCase {
final long lo = NumericUtils.doubleToSortableLong(8.701032080293731E-226);
final long hi = NumericUtils.doubleToSortableLong(2.0801416404385346E-41);
Query query = SortedNumericDocValuesField.newRangeQuery("dv", lo, hi);
Query query = SortedNumericDocValuesField.newSlowRangeQuery("dv", lo, hi);
// TODO: assert expected matches
searcher.search(query, searcher.reader.maxDoc(), Sort.INDEXORDER);
// swap order, should still work
query = SortedNumericDocValuesField.newRangeQuery("dv", hi, lo);
query = SortedNumericDocValuesField.newSlowRangeQuery("dv", hi, lo);
// TODO: assert expected matches
searcher.search(query, searcher.reader.maxDoc(), Sort.INDEXORDER);

View File

@ -64,7 +64,7 @@ public class TestIndexOrDocValuesQuery extends LuceneTestCase {
// The term query is more selective, so the IndexOrDocValuesQuery should use doc values
final Query q1 = new BooleanQuery.Builder()
.add(new TermQuery(new Term("f1", "foo")), Occur.MUST)
.add(new IndexOrDocValuesQuery(LongPoint.newExactQuery("f2", 2), NumericDocValuesField.newRangeQuery("f2", 2L, 2L)), Occur.MUST)
.add(new IndexOrDocValuesQuery(LongPoint.newExactQuery("f2", 2), NumericDocValuesField.newSlowRangeQuery("f2", 2L, 2L)), Occur.MUST)
.build();
final Weight w1 = searcher.createNormalizedWeight(q1, random().nextBoolean());
@ -74,7 +74,7 @@ public class TestIndexOrDocValuesQuery extends LuceneTestCase {
// The term query is less selective, so the IndexOrDocValuesQuery should use points
final Query q2 = new BooleanQuery.Builder()
.add(new TermQuery(new Term("f1", "bar")), Occur.MUST)
.add(new IndexOrDocValuesQuery(LongPoint.newExactQuery("f2", 42), NumericDocValuesField.newRangeQuery("f2", 42L, 42L)), Occur.MUST)
.add(new IndexOrDocValuesQuery(LongPoint.newExactQuery("f2", 42), NumericDocValuesField.newSlowRangeQuery("f2", 42L, 42L)), Occur.MUST)
.build();
final Weight w2 = searcher.createNormalizedWeight(q2, random().nextBoolean());

View File

@ -143,7 +143,7 @@ public class LatLonDocValuesField extends Field {
* best used wrapped in an {@link IndexOrDocValuesQuery} alongside a
* {@link LatLonPoint#newBoxQuery}.
*/
public static Query newBoxQuery(String field, double minLatitude, double maxLatitude, double minLongitude, double maxLongitude) {
public static Query newSlowBoxQuery(String field, double minLatitude, double maxLatitude, double minLongitude, double maxLongitude) {
// exact double values of lat=90.0D and lon=180.0D must be treated special as they are not represented in the encoding
// and should not drag in extra bogus junk! TODO: should encodeCeil just throw ArithmeticException to be less trappy here?
if (minLatitude == 90.0) {
@ -175,7 +175,7 @@ public class LatLonDocValuesField extends Field {
* @return query matching points within this distance
* @throws IllegalArgumentException if {@code field} is null, location has invalid coordinates, or radius is invalid.
*/
public static Query newDistanceQuery(String field, double latitude, double longitude, double radiusMeters) {
public static Query newSlowDistanceQuery(String field, double latitude, double longitude, double radiusMeters) {
return new LatLonDocValuesDistanceQuery(field, latitude, longitude, radiusMeters);
}
}

View File

@ -36,12 +36,12 @@ public class TestLatLonDocValuesQueries extends BaseGeoPointTestCase {
@Override
protected Query newRectQuery(String field, double minLat, double maxLat, double minLon, double maxLon) {
return LatLonDocValuesField.newBoxQuery(field, minLat, maxLat, minLon, maxLon);
return LatLonDocValuesField.newSlowBoxQuery(field, minLat, maxLat, minLon, maxLon);
}
@Override
protected Query newDistanceQuery(String field, double centerLat, double centerLon, double radiusMeters) {
return LatLonDocValuesField.newDistanceQuery(field, centerLat, centerLon, radiusMeters);
return LatLonDocValuesField.newSlowDistanceQuery(field, centerLat, centerLon, radiusMeters);
}
@Override

View File

@ -33,6 +33,50 @@ Jetty 9.3.14.v20161028
(No Changes)
================== 7.1.0 ==================
Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.
Versions of Major Components
---------------------
Apache Tika 1.13
Carrot2 3.15.0
Velocity 1.7 and Velocity Tools 2.0
Apache UIMA 2.3.1
Apache ZooKeeper 3.4.10
Jetty 9.3.14.v20161028
Detailed Change List
----------------------
Upgrade Notes
----------------------
(No Notes)
New Features
----------------------
(No Changes)
Bug Fixes
----------------------
(No Changes)
Optimizations
----------------------
(No Changes)
Other Changes
----------------------
* SOLR-10827: Factor out abstract FilteringSolrMetricReporter class. (Christine Poerschke)
* SOLR-10957: Changed SolrCoreParser.init to use the resource loader from getSchema()
instead of the resource loader from getCore(). (Christine Poerschke)
================== 7.0.0 ==================
Versions of Major Components
@ -281,6 +325,11 @@ Bug Fixes
* SOLR-6807: CloudSolrClient's ZK state version check with the server was ignored when handleSelect=false
(David Smiley)
* SOLR-10878: MOVEREPLICA command may lose data when replicationFactor is 1. (ab, shalin)
* SOLR-10879: DELETEREPLICA and DELETENODE commands should prevent data loss when
replicationFactor is 1. (ab)
Optimizations
----------------------
@ -422,6 +471,9 @@ Other Changes
(added leading '/' in request handlers). Switch all tests referring to "standard" request handler to
instead refer to "/select" with SearchHandler. Deprecated the old StandardRequestHandler. (David Smiley)
* SOLR-10456: Deprecate timeout related setters from SolrClients, and replace with Builder based implementation
(Jason Gerlowski, Anshum Gupta)
================== 6.7.0 ==================
Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

View File

@ -305,9 +305,7 @@ public class TestSolrEntityProcessorEndToEnd extends AbstractDataImportHandlerTe
sidl.add(sd);
}
try (HttpSolrClient solrServer = getHttpSolrClient(getSourceUrl())) {
solrServer.setConnectionTimeout(15000);
solrServer.setSoTimeout(30000);
try (HttpSolrClient solrServer = getHttpSolrClient(getSourceUrl(), 15000, 30000)) {
solrServer.add(sidl);
solrServer.commit(true, true);
}

View File

@ -509,6 +509,13 @@ public class JettySolrRunner {
return new HttpSolrClient.Builder(getBaseUrl().toString()).build();
}
public SolrClient newClient(int connectionTimeoutMillis, int socketTimeoutMillis) {
return new HttpSolrClient.Builder(getBaseUrl().toString())
.withConnectionTimeout(connectionTimeoutMillis)
.withSocketTimeout(socketTimeoutMillis)
.build();
}
public DebugFilter getDebugFilter() {
return (DebugFilter)debugFilter.getFilter();
}

View File

@ -68,7 +68,7 @@ public class AddReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
ZkNodeProps addReplica(ClusterState clusterState, ZkNodeProps message, NamedList results, Runnable onComplete)
throws KeeperException, InterruptedException {
log.info("addReplica() : {}", Utils.toJSONString(message));
log.debug("addReplica() : {}", Utils.toJSONString(message));
String collection = message.getStr(COLLECTION_PROP);
String node = message.getStr(CoreAdminParams.NODE);
String shard = message.getStr(SHARD_ID_PROP);

View File

@ -19,6 +19,7 @@ package org.apache.solr.cloud;
import java.lang.invoke.MethodHandles;
import java.util.ArrayList;
import java.util.List;
import java.util.Locale;
import java.util.concurrent.CountDownLatch;
@ -26,7 +27,11 @@ import java.util.concurrent.TimeUnit;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.ZkNodeProps;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.util.NamedList;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
@ -54,8 +59,41 @@ public class DeleteNodeCmd implements OverseerCollectionMessageHandler.Cmd {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Source Node: " + node + " is not live");
}
List<ZkNodeProps> sourceReplicas = ReplaceNodeCmd.getReplicasOfNode(node, state);
List<String> singleReplicas = verifyReplicaAvailability(sourceReplicas, state);
if (!singleReplicas.isEmpty()) {
results.add("failure", "Can't delete the only existing non-PULL replica(s) on node " + node + ": " + singleReplicas.toString());
} else {
cleanupReplicas(results, state, sourceReplicas, ocmh, node, message.getStr(ASYNC));
}
}
// collect names of replicas that cannot be deleted
static List<String> verifyReplicaAvailability(List<ZkNodeProps> sourceReplicas, ClusterState state) {
List<String> res = new ArrayList<>();
for (ZkNodeProps sourceReplica : sourceReplicas) {
String coll = sourceReplica.getStr(COLLECTION_PROP);
String shard = sourceReplica.getStr(SHARD_ID_PROP);
String replicaName = sourceReplica.getStr(ZkStateReader.REPLICA_PROP);
DocCollection collection = state.getCollection(coll);
Slice slice = collection.getSlice(shard);
if (slice.getReplicas().size() < 2) {
// can't delete the only replica in existence
res.add(coll + "/" + shard + "/" + replicaName + ", type=" + sourceReplica.getStr(ZkStateReader.REPLICA_TYPE));
} else { // check replica types
int otherNonPullReplicas = 0;
for (Replica r : slice.getReplicas()) {
if (!r.getName().equals(replicaName) && !r.getType().equals(Replica.Type.PULL)) {
otherNonPullReplicas++;
}
}
// can't delete - there are no other non-pull replicas
if (otherNonPullReplicas == 0) {
res.add(coll + "/" + shard + "/" + replicaName + ", type=" + sourceReplica.getStr(ZkStateReader.REPLICA_TYPE));
}
}
}
return res;
}
static void cleanupReplicas(NamedList results,
ClusterState clusterState,
@ -67,7 +105,8 @@ public class DeleteNodeCmd implements OverseerCollectionMessageHandler.Cmd {
for (ZkNodeProps sourceReplica : sourceReplicas) {
String coll = sourceReplica.getStr(COLLECTION_PROP);
String shard = sourceReplica.getStr(SHARD_ID_PROP);
log.info("Deleting replica for collection={} shard={} on node={}", coll, shard, node);
String type = sourceReplica.getStr(ZkStateReader.REPLICA_TYPE);
log.info("Deleting replica type={} for collection={} shard={} on node={}", type, coll, shard, node);
NamedList deleteResult = new NamedList();
try {
if (async != null) sourceReplica = sourceReplica.plus(ASYNC, async);

View File

@ -201,9 +201,10 @@ public class LeaderInitiatedRecoveryThread extends Thread {
log.info("Asking core={} coreNodeName={} on " + recoveryUrl + " to recover", coreNeedingRecovery, replicaCoreNodeName);
}
try (HttpSolrClient client = new HttpSolrClient.Builder(recoveryUrl).build()) {
client.setSoTimeout(60000);
client.setConnectionTimeout(15000);
try (HttpSolrClient client = new HttpSolrClient.Builder(recoveryUrl)
.withConnectionTimeout(15000)
.withSocketTimeout(60000)
.build()) {
try {
client.request(recoverRequestCmd);

View File

@ -22,6 +22,8 @@ import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.cloud.ClusterState;
@ -56,10 +58,11 @@ public class MoveReplicaCmd implements Cmd{
}
private void moveReplica(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
log.info("moveReplica() : {}", Utils.toJSONString(message));
log.debug("moveReplica() : {}", Utils.toJSONString(message));
ocmh.checkRequired(message, COLLECTION_PROP, "targetNode");
String collection = message.getStr(COLLECTION_PROP);
String targetNode = message.getStr("targetNode");
int timeout = message.getInt("timeout", 10 * 60); // 10 minutes
String async = message.getStr(ASYNC);
@ -104,14 +107,14 @@ public class MoveReplicaCmd implements Cmd{
Object dataDir = replica.get("dataDir");
final String ulogDir = replica.getStr("ulogDir");
if (dataDir != null && dataDir.toString().startsWith("hdfs:/")) {
moveHdfsReplica(clusterState, results, dataDir.toString(), ulogDir, targetNode, async, coll, replica, slice);
moveHdfsReplica(clusterState, results, dataDir.toString(), ulogDir, targetNode, async, coll, replica, slice, timeout);
} else {
moveNormalReplica(clusterState, results, targetNode, async, coll, replica, slice);
moveNormalReplica(clusterState, results, targetNode, async, coll, replica, slice, timeout);
}
}
private void moveHdfsReplica(ClusterState clusterState, NamedList results, String dataDir, String ulogDir, String targetNode, String async,
DocCollection coll, Replica replica, Slice slice) throws Exception {
DocCollection coll, Replica replica, Slice slice, int timeout) throws Exception {
String newCoreName = Assign.buildCoreName(coll, slice.getName(), replica.getType());
ZkNodeProps removeReplicasProps = new ZkNodeProps(
@ -156,7 +159,7 @@ public class MoveReplicaCmd implements Cmd{
}
private void moveNormalReplica(ClusterState clusterState, NamedList results, String targetNode, String async,
DocCollection coll, Replica replica, Slice slice) throws Exception {
DocCollection coll, Replica replica, Slice slice, int timeout) throws Exception {
String newCoreName = Assign.buildCoreName(coll, slice.getName(), replica.getType());
ZkNodeProps addReplicasProps = new ZkNodeProps(
COLLECTION_PROP, coll.getName(),
@ -165,14 +168,41 @@ public class MoveReplicaCmd implements Cmd{
CoreAdminParams.NAME, newCoreName);
if(async!=null) addReplicasProps.getProperties().put(ASYNC, async);
NamedList addResult = new NamedList();
CountDownLatch countDownLatch = new CountDownLatch(1);
ReplaceNodeCmd.RecoveryWatcher watcher = null;
if (replica.equals(slice.getLeader())) {
watcher = new ReplaceNodeCmd.RecoveryWatcher(coll.getName(), slice.getName(),
replica.getName(), null, countDownLatch);
ocmh.zkStateReader.registerCollectionStateWatcher(coll.getName(), watcher);
}
ocmh.addReplica(clusterState, addReplicasProps, addResult, null);
if (addResult.get("failure") != null) {
String errorString = String.format(Locale.ROOT, "Failed to create replica for collection=%s shard=%s" +
" on node=%s", coll.getName(), slice.getName(), targetNode);
log.warn(errorString);
results.add("failure", errorString);
if (watcher != null) { // unregister
ocmh.zkStateReader.registerCollectionStateWatcher(coll.getName(), watcher);
}
return;
}
// wait for the other replica to be active if the source replica was a leader
if (watcher != null) {
try {
log.debug("Waiting for leader's replica to recover.");
if (!countDownLatch.await(timeout, TimeUnit.SECONDS)) {
String errorString = String.format(Locale.ROOT, "Timed out waiting for leader's replica to recover, collection=%s shard=%s" +
" on node=%s", coll.getName(), slice.getName(), targetNode);
log.warn(errorString);
results.add("failure", errorString);
return;
} else {
log.debug("Replica " + watcher.getRecoveredReplica() + " is active - deleting the source...");
}
} finally {
ocmh.zkStateReader.removeCollectionStateWatcher(coll.getName(), watcher);
}
}
ZkNodeProps removeReplicasProps = new ZkNodeProps(
COLLECTION_PROP, coll.getName(),

View File

@ -443,10 +443,11 @@ public class OverseerAutoReplicaFailoverThread implements Runnable, Closeable {
final String createUrl, final String dataDir, final String ulogDir,
final String coreNodeName, final String coreName, final String shardId) {
try (HttpSolrClient client = new HttpSolrClient.Builder(createUrl).build()) {
try (HttpSolrClient client = new HttpSolrClient.Builder(createUrl)
.withConnectionTimeout(30000)
.withSocketTimeout(60000)
.build()) {
log.debug("create url={}", createUrl);
client.setConnectionTimeout(30000);
client.setSoTimeout(60000);
Create createCmd = new Create();
createCmd.setCollection(collection);
createCmd.setCoreNodeName(coreNodeName);

View File

@ -428,20 +428,25 @@ public class OverseerCollectionMessageHandler implements OverseerMessageHandler
boolean waitForCoreNodeGone(String collectionName, String shard, String replicaName, int timeoutms) throws InterruptedException {
TimeOut timeout = new TimeOut(timeoutms, TimeUnit.MILLISECONDS);
boolean deleted = false;
// TODO: remove this workaround for SOLR-9440
zkStateReader.registerCore(collectionName);
try {
while (! timeout.hasTimedOut()) {
Thread.sleep(100);
DocCollection docCollection = zkStateReader.getClusterState().getCollection(collectionName);
if(docCollection != null) {
if (docCollection == null) { // someone already deleted the collection
return true;
}
Slice slice = docCollection.getSlice(shard);
if(slice == null || slice.getReplica(replicaName) == null) {
deleted = true;
return true;
}
}
// Return true if either someone already deleted the collection/slice/replica.
if (docCollection == null || deleted) break;
// replica still exists after the timeout
return false;
} finally {
zkStateReader.unregisterCore(collectionName);
}
return deleted;
}
void deleteCoreNode(String collectionName, String replicaName, Replica replica, String core) throws KeeperException, InterruptedException {
@ -512,9 +517,10 @@ public class OverseerCollectionMessageHandler implements OverseerMessageHandler
static UpdateResponse softCommit(String url) throws SolrServerException, IOException {
try (HttpSolrClient client = new HttpSolrClient.Builder(url).build()) {
client.setConnectionTimeout(30000);
client.setSoTimeout(120000);
try (HttpSolrClient client = new HttpSolrClient.Builder(url)
.withConnectionTimeout(30000)
.withSocketTimeout(120000)
.build()) {
UpdateRequest ureq = new UpdateRequest();
ureq.setParams(new ModifiableSolrParams());
ureq.setAction(AbstractUpdateRequest.ACTION.COMMIT, false, true, true);

View File

@ -255,8 +255,9 @@ public class RecoveryStrategy implements Runnable, Closeable {
final private void commitOnLeader(String leaderUrl) throws SolrServerException,
IOException {
try (HttpSolrClient client = new HttpSolrClient.Builder(leaderUrl).build()) {
client.setConnectionTimeout(30000);
try (HttpSolrClient client = new HttpSolrClient.Builder(leaderUrl)
.withConnectionTimeout(30000)
.build()) {
UpdateRequest ureq = new UpdateRequest();
ureq.setParams(new ModifiableSolrParams());
ureq.getParams().set(DistributedUpdateProcessor.COMMIT_END_POINT, true);

View File

@ -93,15 +93,6 @@ public class ReplaceNodeCmd implements OverseerCollectionMessageHandler.Cmd {
CountDownLatch replicasToRecover = new CountDownLatch(numLeaders);
for (ZkNodeProps sourceReplica : sourceReplicas) {
if (sourceReplica.getBool(ZkStateReader.LEADER_PROP, false)) {
String shardName = sourceReplica.getStr(SHARD_ID_PROP);
String replicaName = sourceReplica.getStr(ZkStateReader.REPLICA_PROP);
String collectionName = sourceReplica.getStr(COLLECTION_PROP);
String key = collectionName + "_" + replicaName;
RecoveryWatcher watcher = new RecoveryWatcher(collectionName, shardName, replicaName, replicasToRecover);
watchers.put(key, watcher);
zkStateReader.registerCollectionStateWatcher(collectionName, watcher);
}
NamedList nl = new NamedList();
log.info("Going to create replica for collection={} shard={} on node={}", sourceReplica.getStr(COLLECTION_PROP), sourceReplica.getStr(SHARD_ID_PROP), target);
ZkNodeProps msg = sourceReplica.plus("parallel", String.valueOf(parallel)).plus(CoreAdminParams.NODE, target);
@ -128,6 +119,16 @@ public class ReplaceNodeCmd implements OverseerCollectionMessageHandler.Cmd {
if (addedReplica != null) {
createdReplicas.add(addedReplica);
if (sourceReplica.getBool(ZkStateReader.LEADER_PROP, false)) {
String shardName = sourceReplica.getStr(SHARD_ID_PROP);
String replicaName = sourceReplica.getStr(ZkStateReader.REPLICA_PROP);
String collectionName = sourceReplica.getStr(COLLECTION_PROP);
String key = collectionName + "_" + replicaName;
RecoveryWatcher watcher = new RecoveryWatcher(collectionName, shardName, replicaName,
addedReplica.getStr(ZkStateReader.CORE_NAME_PROP), replicasToRecover);
watchers.put(key, watcher);
zkStateReader.registerCollectionStateWatcher(collectionName, watcher);
}
}
}
@ -208,16 +209,27 @@ public class ReplaceNodeCmd implements OverseerCollectionMessageHandler.Cmd {
}
// we use this watcher to wait for replicas to recover
private static class RecoveryWatcher implements CollectionStateWatcher {
static class RecoveryWatcher implements CollectionStateWatcher {
String collectionId;
String shardId;
String replicaId;
String targetCore;
CountDownLatch countDownLatch;
Replica recovered;
RecoveryWatcher(String collectionId, String shardId, String replicaId, CountDownLatch countDownLatch) {
/**
* Watch for recovery of a replica
* @param collectionId collection name
* @param shardId shard id
* @param replicaId source replica name (coreNodeName)
* @param targetCore specific target core name - if null then any active replica will do
* @param countDownLatch countdown when recovered
*/
RecoveryWatcher(String collectionId, String shardId, String replicaId, String targetCore, CountDownLatch countDownLatch) {
this.collectionId = collectionId;
this.shardId = shardId;
this.replicaId = replicaId;
this.targetCore = targetCore;
this.countDownLatch = countDownLatch;
}
@ -241,7 +253,12 @@ public class ReplaceNodeCmd implements OverseerCollectionMessageHandler.Cmd {
continue;
}
// check its state
String coreName = replica.getStr(ZkStateReader.CORE_NAME_PROP);
if (targetCore != null && !targetCore.equals(coreName)) {
continue;
}
if (replica.isActive(liveNodes)) { // recovered - stop waiting
recovered = replica;
countDownLatch.countDown();
return true;
}
@ -250,5 +267,9 @@ public class ReplaceNodeCmd implements OverseerCollectionMessageHandler.Cmd {
// set the watch again to wait for the new replica to recover
return false;
}
public Replica getRecoveredReplica() {
return recovered;
}
}
}

View File

@ -292,9 +292,11 @@ public class SyncStrategy {
recoverRequestCmd.setAction(CoreAdminAction.REQUESTRECOVERY);
recoverRequestCmd.setCoreName(coreName);
try (HttpSolrClient client = new HttpSolrClient.Builder(baseUrl).withHttpClient(SyncStrategy.this.client).build()) {
client.setConnectionTimeout(30000);
client.setSoTimeout(120000);
try (HttpSolrClient client = new HttpSolrClient.Builder(baseUrl)
.withHttpClient(SyncStrategy.this.client)
.withConnectionTimeout(30000)
.withSocketTimeout(120000)
.build()) {
client.request(recoverRequestCmd);
} catch (Throwable t) {
SolrException.log(log, ZkCoreNodeProps.getCoreUrl(leaderProps) + ": Could not tell a replica to recover", t);

View File

@ -1671,9 +1671,10 @@ public class ZkController {
log.info("Replica " + myCoreNodeName +
" NOT in leader-initiated recovery, need to wait for leader to see down state.");
try (HttpSolrClient client = new Builder(leaderBaseUrl).build()) {
client.setConnectionTimeout(15000);
client.setSoTimeout(120000);
try (HttpSolrClient client = new Builder(leaderBaseUrl)
.withConnectionTimeout(15000)
.withSocketTimeout(120000)
.build()) {
WaitForState prepCmd = new WaitForState();
prepCmd.setCoreName(leaderCoreName);
prepCmd.setNodeName(getNodeName());

View File

@ -789,8 +789,9 @@ public class CdcrRequestHandler extends RequestHandlerBase implements SolrCoreAw
private void commitOnLeader(String leaderUrl) throws SolrServerException,
IOException {
try (HttpSolrClient client = new HttpSolrClient.Builder(leaderUrl).build()) {
client.setConnectionTimeout(30000);
try (HttpSolrClient client = new HttpSolrClient.Builder(leaderUrl)
.withConnectionTimeout(30000)
.build()) {
UpdateRequest ureq = new UpdateRequest();
ureq.setParams(new ModifiableSolrParams());
ureq.getParams().set(DistributedUpdateProcessor.COMMIT_END_POINT, true);
@ -827,9 +828,10 @@ public class CdcrRequestHandler extends RequestHandlerBase implements SolrCoreAw
@Override
public Long call() throws Exception {
try (HttpSolrClient server = new HttpSolrClient.Builder(baseUrl).build()) {
server.setConnectionTimeout(15000);
server.setSoTimeout(60000);
try (HttpSolrClient server = new HttpSolrClient.Builder(baseUrl)
.withConnectionTimeout(15000)
.withSocketTimeout(60000)
.build()) {
ModifiableSolrParams params = new ModifiableSolrParams();
params.set(CommonParams.ACTION, CdcrParams.CdcrAction.SHARDCHECKPOINT.toString());

View File

@ -131,9 +131,10 @@ class CdcrUpdateLogSynchronizer implements CdcrStateManager.CdcrStateObserver {
return;
}
HttpSolrClient server = new HttpSolrClient.Builder(leaderUrl).build();
server.setConnectionTimeout(15000);
server.setSoTimeout(60000);
HttpSolrClient server = new HttpSolrClient.Builder(leaderUrl)
.withConnectionTimeout(15000)
.withSocketTimeout(60000)
.build();
ModifiableSolrParams params = new ModifiableSolrParams();
params.set(CommonParams.ACTION, CdcrParams.CdcrAction.LASTPROCESSEDVERSION.toString());

View File

@ -276,9 +276,11 @@ public class IndexFetcher {
QueryRequest req = new QueryRequest(params);
// TODO modify to use shardhandler
try (HttpSolrClient client = new Builder(masterUrl).withHttpClient(myHttpClient).build()) {
client.setSoTimeout(soTimeout);
client.setConnectionTimeout(connTimeout);
try (HttpSolrClient client = new Builder(masterUrl)
.withHttpClient(myHttpClient)
.withConnectionTimeout(connTimeout)
.withSocketTimeout(soTimeout)
.build()) {
return client.request(req);
} catch (SolrServerException e) {
@ -298,9 +300,11 @@ public class IndexFetcher {
QueryRequest req = new QueryRequest(params);
// TODO modify to use shardhandler
try (HttpSolrClient client = new HttpSolrClient.Builder(masterUrl).withHttpClient(myHttpClient).build()) {
client.setSoTimeout(soTimeout);
client.setConnectionTimeout(connTimeout);
try (HttpSolrClient client = new HttpSolrClient.Builder(masterUrl)
.withHttpClient(myHttpClient)
.withConnectionTimeout(connTimeout)
.withSocketTimeout(soTimeout)
.build()) {
NamedList response = client.request(req);
List<Map<String, Object>> files = (List<Map<String,Object>>) response.get(CMD_GET_FILE_LIST);
@ -1691,9 +1695,9 @@ public class IndexFetcher {
try (HttpSolrClient client = new Builder(masterUrl)
.withHttpClient(myHttpClient)
.withResponseParser(null)
.withConnectionTimeout(connTimeout)
.withSocketTimeout(soTimeout)
.build()) {
client.setSoTimeout(soTimeout);
client.setConnectionTimeout(connTimeout);
QueryRequest req = new QueryRequest(params);
response = client.request(req);
is = (InputStream) response.get("stream");
@ -1800,9 +1804,11 @@ public class IndexFetcher {
params.set(CommonParams.QT, ReplicationHandler.PATH);
// TODO use shardhandler
try (HttpSolrClient client = new HttpSolrClient.Builder(masterUrl).withHttpClient(myHttpClient).build()) {
client.setSoTimeout(soTimeout);
client.setConnectionTimeout(connTimeout);
try (HttpSolrClient client = new HttpSolrClient.Builder(masterUrl)
.withHttpClient(myHttpClient)
.withConnectionTimeout(connTimeout)
.withSocketTimeout(soTimeout)
.build()) {
QueryRequest request = new QueryRequest(params);
return client.request(request);
}

View File

@ -442,9 +442,10 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
ZkNodeProps leaderProps = docCollection.getLeader(shard);
ZkCoreNodeProps nodeProps = new ZkCoreNodeProps(leaderProps);
try (HttpSolrClient client = new Builder(nodeProps.getBaseUrl()).build()) {
client.setConnectionTimeout(15000);
client.setSoTimeout(60000);
try (HttpSolrClient client = new Builder(nodeProps.getBaseUrl())
.withConnectionTimeout(15000)
.withSocketTimeout(60000)
.build()) {
RequestSyncShard reqSyncShard = new RequestSyncShard();
reqSyncShard.setCollection(collection);
reqSyncShard.setShard(shard);

View File

@ -224,9 +224,9 @@ public class HttpShardHandlerFactory extends ShardHandlerFactory implements org.
protected LBHttpSolrClient createLoadbalancer(HttpClient httpClient){
LBHttpSolrClient client = new Builder()
.withHttpClient(httpClient)
.withConnectionTimeout(connectionTimeout)
.withSocketTimeout(soTimeout)
.build();
client.setConnectionTimeout(connectionTimeout);
client.setSoTimeout(soTimeout);
return client;
}

View File

@ -0,0 +1,59 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.metrics;
import java.util.ArrayList;
import java.util.List;
import com.codahale.metrics.MetricFilter;
public abstract class FilteringSolrMetricReporter extends SolrMetricReporter {
protected List<String> filters = new ArrayList<>();
public FilteringSolrMetricReporter(SolrMetricManager metricManager, String registryName) {
super(metricManager, registryName);
}
public void setFilter(List<String> filters) {
if (filters == null || filters.isEmpty()) {
return;
}
this.filters.addAll(filters);
}
public void setFilter(String filter) {
if (filter != null && !filter.isEmpty()) {
this.filters.add(filter);
}
}
/**
* Report only metrics with names matching any of the prefix filters.
* If the filters list is empty then all names will match.
*/
protected MetricFilter newMetricFilter() {
final MetricFilter filter;
if (!filters.isEmpty()) {
filter = new SolrMetricManager.PrefixFilter(filters);
} else {
filter = MetricFilter.ALL;
}
return filter;
}
}

View File

@ -17,26 +17,24 @@
package org.apache.solr.metrics.reporters;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
import com.codahale.metrics.MetricFilter;
import com.codahale.metrics.ganglia.GangliaReporter;
import info.ganglia.gmetric4j.gmetric.GMetric;
import org.apache.solr.metrics.FilteringSolrMetricReporter;
import org.apache.solr.metrics.SolrMetricManager;
import org.apache.solr.metrics.SolrMetricReporter;
/**
*
*/
public class SolrGangliaReporter extends SolrMetricReporter {
public class SolrGangliaReporter extends FilteringSolrMetricReporter {
private String host = null;
private int port = -1;
private boolean multicast;
private String instancePrefix = null;
private List<String> filters = new ArrayList<>();
private boolean testing;
private GangliaReporter reporter;
@ -68,25 +66,6 @@ public class SolrGangliaReporter extends SolrMetricReporter {
this.instancePrefix = prefix;
}
/**
* Report only metrics with names matching any of the prefix filters.
* @param filters list of 0 or more prefixes. If the list is empty then
* all names will match.
*/
public void setFilter(List<String> filters) {
if (filters == null || filters.isEmpty()) {
return;
}
this.filters.addAll(filters);
}
// due to vagaries of SolrPluginUtils.invokeSetters we need this too
public void setFilter(String filter) {
if (filter != null && !filter.isEmpty()) {
this.filters.add(filter);
}
}
public void setMulticast(boolean multicast) {
this.multicast = multicast;
}
@ -141,12 +120,7 @@ public class SolrGangliaReporter extends SolrMetricReporter {
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.prefixedWith(instancePrefix);
MetricFilter filter;
if (!filters.isEmpty()) {
filter = new SolrMetricManager.PrefixFilter(filters);
} else {
filter = MetricFilter.ALL;
}
final MetricFilter filter = newMetricFilter();
builder = builder.filter(filter);
reporter = builder.build(ganglia);
reporter.start(period, TimeUnit.SECONDS);

View File

@ -17,8 +17,6 @@
package org.apache.solr.metrics.reporters;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
import com.codahale.metrics.MetricFilter;
@ -26,19 +24,19 @@ import com.codahale.metrics.graphite.Graphite;
import com.codahale.metrics.graphite.GraphiteReporter;
import com.codahale.metrics.graphite.GraphiteSender;
import com.codahale.metrics.graphite.PickledGraphite;
import org.apache.solr.metrics.FilteringSolrMetricReporter;
import org.apache.solr.metrics.SolrMetricManager;
import org.apache.solr.metrics.SolrMetricReporter;
/**
* Metrics reporter that wraps {@link com.codahale.metrics.graphite.GraphiteReporter}.
*/
public class SolrGraphiteReporter extends SolrMetricReporter {
public class SolrGraphiteReporter extends FilteringSolrMetricReporter {
private String host = null;
private int port = -1;
private boolean pickled = false;
private String instancePrefix = null;
private List<String> filters = new ArrayList<>();
private GraphiteReporter reporter = null;
private static final ReporterClientCache<GraphiteSender> serviceRegistry = new ReporterClientCache<>();
@ -66,25 +64,6 @@ public class SolrGraphiteReporter extends SolrMetricReporter {
this.instancePrefix = prefix;
}
/**
* Report only metrics with names matching any of the prefix filters.
* @param filters list of 0 or more prefixes. If the list is empty then
* all names will match.
*/
public void setFilter(List<String> filters) {
if (filters == null || filters.isEmpty()) {
return;
}
this.filters.addAll(filters);
}
public void setFilter(String filter) {
if (filter != null && !filter.isEmpty()) {
this.filters.add(filter);
}
}
public void setPickled(boolean pickled) {
this.pickled = pickled;
}
@ -113,12 +92,7 @@ public class SolrGraphiteReporter extends SolrMetricReporter {
.prefixedWith(instancePrefix)
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS);
MetricFilter filter;
if (!filters.isEmpty()) {
filter = new SolrMetricManager.PrefixFilter(filters);
} else {
filter = MetricFilter.ALL;
}
final MetricFilter filter = newMetricFilter();
builder = builder.filter(filter);
reporter = builder.build(graphite);
reporter.start(period, TimeUnit.SECONDS);

View File

@ -22,9 +22,7 @@ import javax.management.ObjectInstance;
import javax.management.ObjectName;
import java.lang.invoke.MethodHandles;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Set;
@ -33,6 +31,8 @@ import com.codahale.metrics.JmxReporter;
import com.codahale.metrics.MetricFilter;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.MetricRegistryListener;
import org.apache.solr.metrics.FilteringSolrMetricReporter;
import org.apache.solr.metrics.MetricsMap;
import org.apache.solr.metrics.SolrMetricManager;
import org.apache.solr.metrics.SolrMetricReporter;
@ -46,7 +46,7 @@ import org.slf4j.LoggerFactory;
* <p>NOTE: {@link JmxReporter} that this class uses exports only newly added metrics (it doesn't
* process already existing metrics in a registry)</p>
*/
public class SolrJmxReporter extends SolrMetricReporter {
public class SolrJmxReporter extends FilteringSolrMetricReporter {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
@ -56,7 +56,6 @@ public class SolrJmxReporter extends SolrMetricReporter {
private String agentId;
private String serviceUrl;
private String rootName;
private List<String> filters = new ArrayList<>();
private JmxReporter reporter;
private MetricRegistry registry;
@ -103,16 +102,7 @@ public class SolrJmxReporter extends SolrMetricReporter {
}
JmxObjectNameFactory jmxObjectNameFactory = new JmxObjectNameFactory(pluginInfo.name, fullDomain);
registry = metricManager.registry(registryName);
// filter out MetricsMap gauges - we have a better way of handling them
MetricFilter mmFilter = (name, metric) -> !(metric instanceof MetricsMap);
MetricFilter filter;
if (filters.isEmpty()) {
filter = mmFilter;
} else {
// apply also prefix filters
SolrMetricManager.PrefixFilter prefixFilter = new SolrMetricManager.PrefixFilter(filters);
filter = new SolrMetricManager.AndFilter(prefixFilter, mmFilter);
}
final MetricFilter filter = newMetricFilter();
reporter = JmxReporter.forRegistry(registry)
.registerWith(mBeanServer)
@ -128,6 +118,21 @@ public class SolrJmxReporter extends SolrMetricReporter {
log.info("JMX monitoring for '" + fullDomain + "' (registry '" + registryName + "') enabled at server: " + mBeanServer);
}
@Override
protected MetricFilter newMetricFilter() {
// filter out MetricsMap gauges - we have a better way of handling them
final MetricFilter mmFilter = (name, metric) -> !(metric instanceof MetricsMap);
final MetricFilter filter;
if (filters.isEmpty()) {
filter = mmFilter;
} else {
// apply also prefix filters
SolrMetricManager.PrefixFilter prefixFilter = new SolrMetricManager.PrefixFilter(filters);
filter = new SolrMetricManager.AndFilter(prefixFilter, mmFilter);
}
return filter;
}
/**
* Stops the reporter from publishing metrics.
*/
@ -222,24 +227,6 @@ public class SolrJmxReporter extends SolrMetricReporter {
return domain;
}
/**
* Report only metrics with names matching any of the prefix filters.
* @param filters list of 0 or more prefixes. If the list is empty then
* all names will match.
*/
public void setFilter(List<String> filters) {
if (filters == null || filters.isEmpty()) {
return;
}
this.filters.addAll(filters);
}
public void setFilter(String filter) {
if (filter != null && !filter.isEmpty()) {
this.filters.add(filter);
}
}
/**
* Return the reporter's MBeanServer.
*

View File

@ -18,14 +18,13 @@ package org.apache.solr.metrics.reporters;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
import com.codahale.metrics.MetricFilter;
import com.codahale.metrics.Slf4jReporter;
import org.apache.solr.metrics.FilteringSolrMetricReporter;
import org.apache.solr.metrics.SolrMetricManager;
import org.apache.solr.metrics.SolrMetricReporter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -42,14 +41,13 @@ import org.slf4j.LoggerFactory;
* metrics group, eg. <code>solr.jvm</code></li>
* </ul>
*/
public class SolrSlf4jReporter extends SolrMetricReporter {
public class SolrSlf4jReporter extends FilteringSolrMetricReporter {
@SuppressWarnings("unused") // we need this to pass validate-source-patterns
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private String instancePrefix = null;
private String logger = null;
private List<String> filters = new ArrayList<>();
private Slf4jReporter reporter;
/**
@ -67,25 +65,6 @@ public class SolrSlf4jReporter extends SolrMetricReporter {
this.instancePrefix = prefix;
}
/**
* Report only metrics with names matching any of the prefix filters.
* @param filters list of 0 or more prefixes. If the list is empty then
* all names will match.
*/
public void setFilter(List<String> filters) {
if (filters == null || filters.isEmpty()) {
return;
}
this.filters.addAll(filters);
}
public void setFilter(String filter) {
if (filter != null && !filter.isEmpty()) {
this.filters.add(filter);
}
}
public void setLogger(String logger) {
this.logger = logger;
}
@ -102,12 +81,7 @@ public class SolrSlf4jReporter extends SolrMetricReporter {
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS);
MetricFilter filter;
if (!filters.isEmpty()) {
filter = new SolrMetricManager.PrefixFilter(filters);
} else {
filter = MetricFilter.ALL;
}
final MetricFilter filter = newMetricFilter();
builder = builder.filter(filter);
if (logger == null || logger.isEmpty()) {
// construct logger name from Group

View File

@ -30,11 +30,13 @@ import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.core.SolrCore;
import org.apache.solr.handler.admin.MetricsCollectorHandler;
import org.apache.solr.metrics.FilteringSolrMetricReporter;
import org.apache.solr.metrics.SolrMetricManager;
import org.apache.solr.metrics.SolrMetricReporter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.codahale.metrics.MetricFilter;
/**
* This class reports selected metrics from replicas to shard leader.
* <p>The following configuration properties are supported:</p>
@ -56,7 +58,7 @@ import org.slf4j.LoggerFactory;
* &lt;/reporter&gt;
* </pre>
*/
public class SolrShardReporter extends SolrMetricReporter {
public class SolrShardReporter extends FilteringSolrMetricReporter {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
public static final List<String> DEFAULT_FILTERS = new ArrayList(){{
@ -70,7 +72,6 @@ public class SolrShardReporter extends SolrMetricReporter {
}};
private String handler = MetricsCollectorHandler.HANDLER_PATH;
private List<String> filters = new ArrayList<>();
private SolrReporter reporter;
@ -89,19 +90,6 @@ public class SolrShardReporter extends SolrMetricReporter {
this.handler = handler;
}
public void setFilter(List<String> filterConfig) {
if (filterConfig == null || filterConfig.isEmpty()) {
return;
}
filters.addAll(filterConfig);
}
public void setFilter(String filter) {
if (filter != null && !filter.isEmpty()) {
this.filters.add(filter);
}
}
@Override
protected void doInit() {
if (filters.isEmpty()) {
@ -110,6 +98,12 @@ public class SolrShardReporter extends SolrMetricReporter {
// start in setCore(SolrCore) when core is available
}
@Override
protected MetricFilter newMetricFilter() {
// unsupported here since setCore(SolrCore) directly uses the this.filters
throw new UnsupportedOperationException(getClass().getCanonicalName()+".newMetricFilter() is not supported");
}
@Override
protected void validate() throws IllegalStateException {
// (period < 1) means "don't start reporter" and so no (period > 0) validation needed

View File

@ -266,7 +266,7 @@ public class EnumField extends PrimitiveFieldType {
--upperValue;
}
}
query = new ConstantScoreQuery(NumericDocValuesField.newRangeQuery(field.getName(), lowerValue, upperValue));
query = new ConstantScoreQuery(NumericDocValuesField.newSlowRangeQuery(field.getName(), lowerValue, upperValue));
} else {
query = LegacyNumericRangeQuery.newIntRange(field.getName(), DEFAULT_PRECISION_STEP,
min == null ? null : minValue,

View File

@ -155,16 +155,16 @@ public class LatLonPointSpatialField extends AbstractSpatialFieldType implements
if (shape instanceof Circle) {
Circle circle = (Circle) shape;
double radiusMeters = circle.getRadius() * DistanceUtils.DEG_TO_KM * 1000;
return LatLonDocValuesField.newDistanceQuery(getFieldName(),
return LatLonDocValuesField.newSlowDistanceQuery(getFieldName(),
circle.getCenter().getY(), circle.getCenter().getX(),
radiusMeters);
} else if (shape instanceof Rectangle) {
Rectangle rect = (Rectangle) shape;
return LatLonDocValuesField.newBoxQuery(getFieldName(),
return LatLonDocValuesField.newSlowBoxQuery(getFieldName(),
rect.getMinY(), rect.getMaxY(), rect.getMinX(), rect.getMaxX());
} else if (shape instanceof Point) {
Point point = (Point) shape;
return LatLonDocValuesField.newDistanceQuery(getFieldName(),
return LatLonDocValuesField.newSlowDistanceQuery(getFieldName(),
point.getY(), point.getX(), 0);
} else {
throw new UnsupportedOperationException("Shape " + shape.getClass() + " is not supported by " + getClass());

View File

@ -164,9 +164,9 @@ public abstract class NumericFieldType extends PrimitiveFieldType {
}
if (multiValued) {
// In multiValued case use SortedNumericDocValuesField, this won't work for Trie*Fields wince they use BinaryDV in the multiValue case
return SortedNumericDocValuesField.newRangeQuery(field, actualLowerValue, actualUpperValue);
return SortedNumericDocValuesField.newSlowRangeQuery(field, actualLowerValue, actualUpperValue);
} else {
return NumericDocValuesField.newRangeQuery(field, actualLowerValue, actualUpperValue);
return NumericDocValuesField.newSlowRangeQuery(field, actualLowerValue, actualUpperValue);
}
}

View File

@ -57,7 +57,7 @@ public class SolrCoreParser extends CoreParser implements NamedListInitializedPl
if (req == null) {
loader = new SolrResourceLoader();
} else {
loader = req.getCore().getResourceLoader();
loader = req.getSchema().getResourceLoader();
}
final Iterable<Map.Entry<String,Object>> args = initArgs;

View File

@ -76,11 +76,9 @@ public class ConnectionReuseTest extends SolrCloudTestCase {
case 1:
return getHttpSolrClient(url.toString() + "/" + COLLECTION, httpClient);
case 2:
CloudSolrClient client = getCloudSolrClient(cluster.getZkServer().getZkAddress(), random().nextBoolean(), httpClient);
CloudSolrClient client = getCloudSolrClient(cluster.getZkServer().getZkAddress(), random().nextBoolean(), httpClient, 30000, 60000);
client.setParallelUpdates(random().nextBoolean());
client.setDefaultCollection(COLLECTION);
client.getLbClient().setConnectionTimeout(30000);
client.getLbClient().setSoTimeout(60000);
return client;
}
throw new RuntimeException("impossible");

View File

@ -782,8 +782,7 @@ public class BaseCdcrDistributedZkTest extends AbstractDistribZkTestBase {
protected static SolrClient createNewSolrServer(String baseUrl) {
try {
// setup the server...
HttpSolrClient s = getHttpSolrClient(baseUrl);
s.setConnectionTimeout(DEFAULT_CONNECTION_TIMEOUT);
HttpSolrClient s = getHttpSolrClient(baseUrl, DEFAULT_CONNECTION_TIMEOUT);
return s;
} catch (Exception ex) {
throw new RuntimeException(ex);

View File

@ -536,9 +536,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
private void testStopAndStartCoresInOneInstance() throws Exception {
JettySolrRunner jetty = jettys.get(0);
try (final HttpSolrClient httpSolrClient = (HttpSolrClient) jetty.newClient()) {
httpSolrClient.setConnectionTimeout(15000);
httpSolrClient.setSoTimeout(60000);
try (final HttpSolrClient httpSolrClient = (HttpSolrClient) jetty.newClient(15000, 60000)) {
ThreadPoolExecutor executor = null;
try {
executor = new ExecutorUtil.MDCAwareThreadPoolExecutor(0, Integer.MAX_VALUE,
@ -772,9 +770,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
String collection = elements[elements.length - 1];
String urlString = url.toString();
urlString = urlString.substring(0, urlString.length() - collection.length() - 1);
try (HttpSolrClient client = getHttpSolrClient(urlString)) {
client.setConnectionTimeout(15000);
client.setSoTimeout(60000);
try (HttpSolrClient client = getHttpSolrClient(urlString, 15000, 60000)) {
ModifiableSolrParams params = new ModifiableSolrParams();
//params.set("qt", "/admin/metrics?prefix=UPDATE.updateHandler&registry=solr.core." + collection);
params.set("qt", "/admin/metrics");
@ -860,9 +856,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
ZkCoreNodeProps props = new ZkCoreNodeProps(getCommonCloudSolrClient().getZkStateReader().getClusterState().getLeader(oneInstanceCollection2, "shard1"));
// now test that unloading a core gets us a new leader
try (HttpSolrClient unloadClient = getHttpSolrClient(jettys.get(0).getBaseUrl().toString())) {
unloadClient.setConnectionTimeout(15000);
unloadClient.setSoTimeout(60000);
try (HttpSolrClient unloadClient = getHttpSolrClient(jettys.get(0).getBaseUrl().toString(), 15000, 60000)) {
Unload unloadCmd = new Unload(true);
unloadCmd.setCoreName(props.getCoreName());
@ -1134,6 +1128,18 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
}
}
protected SolrClient createNewSolrClient(String collection, String baseUrl, int connectionTimeoutMillis, int socketTimeoutMillis) {
try {
// setup the server...
HttpSolrClient client = getHttpSolrClient(baseUrl + "/" + collection, connectionTimeoutMillis, socketTimeoutMillis);
return client;
}
catch (Exception ex) {
throw new RuntimeException(ex);
}
}
@Override
protected QueryResponse queryServer(ModifiableSolrParams params) throws SolrServerException, IOException {

View File

@ -59,7 +59,7 @@ public class ChaosMonkeyNothingIsSafeTest extends AbstractFullDistribZkTestBase
protected static final String[] fieldNames = new String[]{"f_i", "f_f", "f_d", "f_l", "f_dt"};
protected static final RandVal[] randVals = new RandVal[]{rint, rfloat, rdouble, rlong, rdate};
private int clientSoTimeout;
private int clientSoTimeout = 60000;
public String[] getFieldNames() {
return fieldNames;
@ -92,9 +92,7 @@ public class ChaosMonkeyNothingIsSafeTest extends AbstractFullDistribZkTestBase
}
fixShardCount(numShards);
// None of the operations used here are particularly costly, so this should work.
// Using this low timeout will also help us catch index stalling.
clientSoTimeout = 5000;
}
@Override
@ -102,9 +100,20 @@ public class ChaosMonkeyNothingIsSafeTest extends AbstractFullDistribZkTestBase
return onlyLeaderIndexes;
}
@Override
protected CloudSolrClient createCloudClient(String defaultCollection) {
CloudSolrClient client = getCloudSolrClient(zkServer.getZkAddress(), random().nextBoolean(), 30000, clientSoTimeout);
client.setParallelUpdates(random().nextBoolean());
if (defaultCollection != null) client.setDefaultCollection(defaultCollection);
return client;
}
@Test
public void test() throws Exception {
cloudClient.setSoTimeout(clientSoTimeout);
// None of the operations used here are particularly costly, so this should work.
// Using this low timeout will also help us catch index stalling.
clientSoTimeout = 5000;
cloudClient = createCloudClient(DEFAULT_COLLECTION);
boolean testSuccessful = false;
try {
handle.clear();

View File

@ -84,7 +84,7 @@ public class ChaosMonkeyNothingIsSafeWithPullReplicasTest extends AbstractFullDi
protected static final String[] fieldNames = new String[]{"f_i", "f_f", "f_d", "f_l", "f_dt"};
protected static final RandVal[] randVals = new RandVal[]{rint, rfloat, rdouble, rlong, rdate};
private int clientSoTimeout;
private int clientSoTimeout = 60000;
public String[] getFieldNames() {
return fieldNames;
@ -115,9 +115,7 @@ public class ChaosMonkeyNothingIsSafeWithPullReplicasTest extends AbstractFullDi
fixShardCount(numNodes);
log.info("Starting ChaosMonkey test with {} shards and {} nodes", sliceCount, numNodes);
// None of the operations used here are particularly costly, so this should work.
// Using this low timeout will also help us catch index stalling.
clientSoTimeout = 5000;
}
@Override
@ -125,9 +123,21 @@ public class ChaosMonkeyNothingIsSafeWithPullReplicasTest extends AbstractFullDi
return useTlogReplicas;
}
@Override
protected CloudSolrClient createCloudClient(String defaultCollection) {
CloudSolrClient client = getCloudSolrClient(zkServer.getZkAddress(), random().nextBoolean(), 30000, clientSoTimeout);
client.setParallelUpdates(random().nextBoolean());
if (defaultCollection != null) client.setDefaultCollection(defaultCollection);
return client;
}
@Test
public void test() throws Exception {
cloudClient.setSoTimeout(clientSoTimeout);
// None of the operations used here are particularly costly, so this should work.
// Using this low timeout will also help us catch index stalling.
clientSoTimeout = 5000;
cloudClient = createCloudClient(DEFAULT_COLLECTION);
DocCollection docCollection = cloudClient.getZkStateReader().getClusterState().getCollection(DEFAULT_COLLECTION);
assertEquals(this.sliceCount, docCollection.getSlices().size());
Slice s = docCollection.getSlice("shard1");

View File

@ -18,18 +18,28 @@
package org.apache.solr.cloud;
import java.lang.invoke.MethodHandles;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import org.apache.solr.client.solrj.impl.CloudSolrClient;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.response.RequestStatusState;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.util.StrUtils;
import org.junit.BeforeClass;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class DeleteNodeTest extends SolrCloudTestCase {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
@BeforeClass
public static void setupCluster() throws Exception {
@ -47,28 +57,61 @@ public class DeleteNodeTest extends SolrCloudTestCase {
cluster.waitForAllNodes(5000);
CloudSolrClient cloudClient = cluster.getSolrClient();
String coll = "deletenodetest_coll";
Set<String> liveNodes = cloudClient.getZkStateReader().getClusterState().getLiveNodes();
ClusterState state = cloudClient.getZkStateReader().getClusterState();
Set<String> liveNodes = state.getLiveNodes();
ArrayList<String> l = new ArrayList<>(liveNodes);
Collections.shuffle(l, random());
CollectionAdminRequest.Create create = pickRandom(
CollectionAdminRequest.createCollection(coll, "conf1", 5, 2, 0, 0),
CollectionAdminRequest.createCollection(coll, "conf1", 5, 1, 1, 0),
CollectionAdminRequest.createCollection(coll, "conf1", 5, 0, 1, 1));
CollectionAdminRequest.createCollection(coll, "conf1", 5, 0, 1, 1),
// check RF=1
CollectionAdminRequest.createCollection(coll, "conf1", 5, 1, 0, 0),
CollectionAdminRequest.createCollection(coll, "conf1", 5, 0, 1, 0)
);
create.setCreateNodeSet(StrUtils.join(l, ',')).setMaxShardsPerNode(3);
cloudClient.request(create);
state = cloudClient.getZkStateReader().getClusterState();
String node2bdecommissioned = l.get(0);
new CollectionAdminRequest.DeleteNode(node2bdecommissioned).processAsync("003", cloudClient);
CollectionAdminRequest.RequestStatus requestStatus = CollectionAdminRequest.requestStatus("003");
boolean success = false;
for (int i = 0; i < 200; i++) {
CollectionAdminRequest.RequestStatusResponse rsp = requestStatus.process(cloudClient);
if (rsp.getRequestStatus() == RequestStatusState.COMPLETED) {
success = true;
// check what replicas are on the node, and whether the call should fail
boolean shouldFail = false;
DocCollection docColl = state.getCollection(coll);
log.info("#### DocCollection: " + docColl);
List<Replica> replicas = docColl.getReplicas(node2bdecommissioned);
if (replicas != null) {
for (Replica replica : replicas) {
String shard = docColl.getShardId(node2bdecommissioned, replica.getStr(ZkStateReader.CORE_NAME_PROP));
Slice slice = docColl.getSlice(shard);
boolean hasOtherNonPullReplicas = false;
for (Replica r: slice.getReplicas()) {
if (!r.getName().equals(replica.getName()) &&
!r.getNodeName().equals(node2bdecommissioned) &&
r.getType() != Replica.Type.PULL) {
hasOtherNonPullReplicas = true;
break;
}
}
if (!hasOtherNonPullReplicas) {
shouldFail = true;
break;
}
}
}
new CollectionAdminRequest.DeleteNode(node2bdecommissioned).processAsync("003", cloudClient);
CollectionAdminRequest.RequestStatus requestStatus = CollectionAdminRequest.requestStatus("003");
CollectionAdminRequest.RequestStatusResponse rsp = null;
for (int i = 0; i < 200; i++) {
rsp = requestStatus.process(cloudClient);
if (rsp.getRequestStatus() == RequestStatusState.FAILED || rsp.getRequestStatus() == RequestStatusState.COMPLETED) {
break;
}
assertFalse(rsp.getRequestStatus() == RequestStatusState.FAILED);
Thread.sleep(50);
}
assertTrue(success);
log.info("####### DocCollection after: " + cloudClient.getZkStateReader().getClusterState().getCollection(coll));
if (shouldFail) {
assertTrue(String.valueOf(rsp), rsp.getRequestStatus() == RequestStatusState.FAILED);
} else {
assertFalse(String.valueOf(rsp), rsp.getRequestStatus() == RequestStatusState.FAILED);
}
}
}

View File

@ -691,8 +691,7 @@ public class FullSolrCloudDistribCmdsTest extends AbstractFullDistribZkTestBase
long beforeCount = results.getResults().getNumFound();
int cnt = TEST_NIGHTLY ? 2933 : 313;
try (ConcurrentUpdateSolrClient concurrentClient = getConcurrentUpdateSolrClient(
((HttpSolrClient) clients.get(0)).getBaseURL(), 10, 2)) {
concurrentClient.setConnectionTimeout(120000);
((HttpSolrClient) clients.get(0)).getBaseURL(), 10, 2, 120000)) {
for (int i = 0; i < cnt; i++) {
index_specific(concurrentClient, id, docId++, "text_t", "some text so that it not's negligent work to parse this doc, even though it's still a pretty short doc");
}

View File

@ -58,9 +58,9 @@ class FullThrottleStoppableIndexingThread extends StoppableIndexingThread {
.withHttpClient(httpClient)
.withQueueSize(8)
.withThreadCount(2)
.withConnectionTimeout(10000)
.withSocketTimeout(clientSoTimeout)
.build();
cusc.setConnectionTimeout(10000);
cusc.setSoTimeout(clientSoTimeout);
}
@Override

View File

@ -96,11 +96,11 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase {
CloudSolrClient client = new CloudSolrClient.Builder()
.withZkHost(zkServer.getZkAddress())
.sendDirectUpdatesToAnyShardReplica()
.withConnectionTimeout(30000)
.withSocketTimeout(60000)
.build();
client.setParallelUpdates(random().nextBoolean());
if (defaultCollection != null) client.setDefaultCollection(defaultCollection);
client.getLbClient().setConnectionTimeout(30000);
client.getLbClient().setSoTimeout(60000);
return client;
}

View File

@ -80,14 +80,8 @@ public class MigrateRouteKeyTest extends SolrCloudTestCase {
}
protected void invokeCollectionMigration(CollectionAdminRequest.AsyncCollectionAdminRequest request) throws IOException, SolrServerException, InterruptedException {
if (random().nextBoolean()) {
cluster.getSolrClient().setSoTimeout(60000); // can take a while
request.process(cluster.getSolrClient());
}
else {
request.processAndWait(cluster.getSolrClient(), 60000);
}
}
@Test
public void multipleShardMigrateTest() throws Exception {

View File

@ -0,0 +1,69 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.cloud;
import com.carrotsearch.randomizedtesting.ThreadFilter;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.solr.cloud.hdfs.HdfsTestUtil;
import org.apache.solr.common.cloud.ZkConfigManager;
import org.apache.solr.util.BadHdfsThreadsFilter;
import org.junit.AfterClass;
import org.junit.BeforeClass;
/**
*
*/
@ThreadLeakFilters(defaultFilters = true, filters = {
BadHdfsThreadsFilter.class, // hdfs currently leaks thread(s)
MoveReplicaHDFSTest.ForkJoinThreadsFilter.class
})
public class MoveReplicaHDFSTest extends MoveReplicaTest {
private static MiniDFSCluster dfsCluster;
@BeforeClass
public static void setupClass() throws Exception {
dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath());
ZkConfigManager configManager = new ZkConfigManager(zkClient());
configManager.uploadConfigDir(configset("cloud-hdfs"), "conf1");
System.setProperty("solr.hdfs.home", HdfsTestUtil.getDataDir(dfsCluster, "data"));
}
@AfterClass
public static void teardownClass() throws Exception {
cluster.shutdown(); // need to close before the MiniDFSCluster
HdfsTestUtil.teardownClass(dfsCluster);
dfsCluster = null;
}
public static class ForkJoinThreadsFilter implements ThreadFilter {
@Override
public boolean reject(Thread t) {
String name = t.getName();
if (name.startsWith("ForkJoinPool.commonPool")) {
return true;
}
return false;
}
}
}

View File

@ -20,6 +20,7 @@ package org.apache.solr.cloud;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Set;
@ -31,6 +32,7 @@ import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.request.CoreAdminRequest;
import org.apache.solr.client.solrj.response.CoreAdminResponse;
import org.apache.solr.client.solrj.response.RequestStatusState;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
import org.junit.BeforeClass;
@ -40,6 +42,7 @@ import org.slf4j.LoggerFactory;
public class MoveReplicaTest extends SolrCloudTestCase {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
@BeforeClass
public static void setupCluster() throws Exception {
configureCluster(4)
@ -56,10 +59,11 @@ public class MoveReplicaTest extends SolrCloudTestCase {
cluster.waitForAllNodes(5000);
String coll = "movereplicatest_coll";
log.info("total_jettys: " + cluster.getJettySolrRunners().size());
int REPLICATION = 2;
CloudSolrClient cloudClient = cluster.getSolrClient();
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(coll, "conf1", 2, 2);
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(coll, "conf1", 2, REPLICATION);
create.setMaxShardsPerNode(2);
cloudClient.request(create);
@ -94,16 +98,87 @@ public class MoveReplicaTest extends SolrCloudTestCase {
break;
}
assertFalse(rsp.getRequestStatus() == RequestStatusState.FAILED);
Thread.sleep(50);
Thread.sleep(500);
}
assertTrue(success);
checkNumOfCores(cloudClient, replica.getNodeName(), 0);
checkNumOfCores(cloudClient, targetNode, 2);
assertTrue("should be at least one core on target node!", getNumOfCores(cloudClient, targetNode) > 0);
// wait for recovery
boolean recovered = false;
for (int i = 0; i < 300; i++) {
DocCollection collState = getCollectionState(coll);
log.debug("###### " + collState);
Collection<Replica> replicas = collState.getSlice(shardId).getReplicas();
boolean allActive = true;
boolean hasLeaders = true;
if (replicas != null && !replicas.isEmpty()) {
for (Replica r : replicas) {
if (!r.getNodeName().equals(targetNode)) {
continue;
}
if (!r.isActive(Collections.singleton(targetNode))) {
log.info("Not active: " + r);
allActive = false;
}
}
} else {
allActive = false;
}
for (Slice slice : collState.getSlices()) {
if (slice.getLeader() == null) {
hasLeaders = false;
}
}
if (allActive && hasLeaders) {
// check the number of active replicas
assertEquals("total number of replicas", REPLICATION, replicas.size());
recovered = true;
break;
} else {
log.info("--- waiting, allActive=" + allActive + ", hasLeaders=" + hasLeaders);
Thread.sleep(1000);
}
}
assertTrue("replica never fully recovered", recovered);
moveReplica = new CollectionAdminRequest.MoveReplica(coll, shardId, targetNode, replica.getNodeName());
moveReplica.process(cloudClient);
checkNumOfCores(cloudClient, replica.getNodeName(), 1);
checkNumOfCores(cloudClient, targetNode, 1);
// wait for recovery
recovered = false;
for (int i = 0; i < 300; i++) {
DocCollection collState = getCollectionState(coll);
log.debug("###### " + collState);
Collection<Replica> replicas = collState.getSlice(shardId).getReplicas();
boolean allActive = true;
boolean hasLeaders = true;
if (replicas != null && !replicas.isEmpty()) {
for (Replica r : replicas) {
if (!r.getNodeName().equals(replica.getNodeName())) {
continue;
}
if (!r.isActive(Collections.singleton(replica.getNodeName()))) {
log.info("Not active yet: " + r);
allActive = false;
}
}
} else {
allActive = false;
}
for (Slice slice : collState.getSlices()) {
if (slice.getLeader() == null) {
hasLeaders = false;
}
}
if (allActive && hasLeaders) {
assertEquals("total number of replicas", REPLICATION, replicas.size());
recovered = true;
break;
} else {
Thread.sleep(1000);
}
}
assertTrue("replica never fully recovered", recovered);
}
private Replica getRandomReplica(String coll, CloudSolrClient cloudClient) {

View File

@ -932,9 +932,7 @@ public class ShardSplitTest extends BasicDistributedZkTest {
.getBaseURL();
baseUrl = baseUrl.substring(0, baseUrl.length() - "collection1".length());
try (HttpSolrClient baseServer = getHttpSolrClient(baseUrl)) {
baseServer.setConnectionTimeout(30000);
baseServer.setSoTimeout(60000 * 5);
try (HttpSolrClient baseServer = getHttpSolrClient(baseUrl, 30000, 60000 * 5)) {
baseServer.request(request);
}
}
@ -1007,15 +1005,13 @@ public class ShardSplitTest extends BasicDistributedZkTest {
@Override
protected SolrClient createNewSolrClient(String collection, String baseUrl) {
HttpSolrClient client = (HttpSolrClient) super.createNewSolrClient(collection, baseUrl);
client.setSoTimeout(5 * 60 * 1000);
HttpSolrClient client = (HttpSolrClient) super.createNewSolrClient(collection, baseUrl, DEFAULT_CONNECTION_TIMEOUT, 5 * 60 * 1000);
return client;
}
@Override
protected SolrClient createNewSolrClient(int port) {
HttpSolrClient client = (HttpSolrClient) super.createNewSolrClient(port);
client.setSoTimeout(5 * 60 * 1000);
HttpSolrClient client = (HttpSolrClient) super.createNewSolrClient(port, DEFAULT_CONNECTION_TIMEOUT, 5 * 60 * 1000);
return client;
}

View File

@ -108,9 +108,8 @@ public class SyncSliceTest extends AbstractFullDistribZkTestBase {
.getBaseURL();
baseUrl = baseUrl.substring(0, baseUrl.length() - "collection1".length());
try (HttpSolrClient baseClient = getHttpSolrClient(baseUrl)) {
// we only set the connect timeout, not so timeout
baseClient.setConnectionTimeout(30000);
try (HttpSolrClient baseClient = getHttpSolrClient(baseUrl, 30000)) {
baseClient.request(request);
}

View File

@ -92,9 +92,7 @@ public class TestRandomRequestDistribution extends AbstractFullDistribZkTestBase
assertEquals(1, replicas.size());
String baseUrl = replicas.iterator().next().getStr(ZkStateReader.BASE_URL_PROP);
if (!baseUrl.endsWith("/")) baseUrl += "/";
try (HttpSolrClient client = getHttpSolrClient(baseUrl + "a1x2")) {
client.setSoTimeout(5000);
client.setConnectionTimeout(2000);
try (HttpSolrClient client = getHttpSolrClient(baseUrl + "a1x2", 2000, 5000)) {
log.info("Making requests to " + baseUrl + "a1x2");
for (int i = 0; i < 10; i++) {
@ -170,9 +168,7 @@ public class TestRandomRequestDistribution extends AbstractFullDistribZkTestBase
if (!baseUrl.endsWith("/")) baseUrl += "/";
String path = baseUrl + "football";
log.info("Firing queries against path=" + path);
try (HttpSolrClient client = getHttpSolrClient(path)) {
client.setSoTimeout(5000);
client.setConnectionTimeout(2000);
try (HttpSolrClient client = getHttpSolrClient(path, 2000, 5000)) {
SolrCore leaderCore = null;
for (JettySolrRunner jetty : jettys) {

View File

@ -189,8 +189,7 @@ public class TestRequestStatusCollectionAPI extends BasicDistributedZkTest {
String baseUrl = ((HttpSolrClient) shardToJetty.get(SHARD1).get(0).client.solrClient).getBaseURL();
baseUrl = baseUrl.substring(0, baseUrl.length() - "collection1".length());
try (HttpSolrClient baseServer = getHttpSolrClient(baseUrl)) {
baseServer.setConnectionTimeout(15000);
try (HttpSolrClient baseServer = getHttpSolrClient(baseUrl, 15000)) {
return baseServer.request(request);
}

View File

@ -221,8 +221,7 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
// so that we start with some versions when we reload...
DirectUpdateHandler2.commitOnClose = false;
try (HttpSolrClient addClient = getHttpSolrClient(jettys.get(2).getBaseUrl() + "/unloadcollection_shard1_replica3")) {
addClient.setConnectionTimeout(30000);
try (HttpSolrClient addClient = getHttpSolrClient(jettys.get(2).getBaseUrl() + "/unloadcollection_shard1_replica3", 30000)) {
// add a few docs
for (int x = 20; x < 100; x++) {
@ -235,9 +234,7 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
//collectionClient.commit();
// unload the leader
try (HttpSolrClient collectionClient = getHttpSolrClient(leaderProps.getBaseUrl())) {
collectionClient.setConnectionTimeout(15000);
collectionClient.setSoTimeout(30000);
try (HttpSolrClient collectionClient = getHttpSolrClient(leaderProps.getBaseUrl(), 15000, 30000)) {
Unload unloadCmd = new Unload(false);
unloadCmd.setCoreName(leaderProps.getCoreName());
@ -259,9 +256,7 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
// ensure there is a leader
zkStateReader.getLeaderRetry("unloadcollection", "shard1", 15000);
try (HttpSolrClient addClient = getHttpSolrClient(jettys.get(1).getBaseUrl() + "/unloadcollection_shard1_replica2")) {
addClient.setConnectionTimeout(30000);
addClient.setSoTimeout(90000);
try (HttpSolrClient addClient = getHttpSolrClient(jettys.get(1).getBaseUrl() + "/unloadcollection_shard1_replica2", 30000, 90000)) {
// add a few docs while the leader is down
for (int x = 101; x < 200; x++) {
@ -281,9 +276,7 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
// unload the leader again
leaderProps = getLeaderUrlFromZk("unloadcollection", "shard1");
try (HttpSolrClient collectionClient = getHttpSolrClient(leaderProps.getBaseUrl())) {
collectionClient.setConnectionTimeout(15000);
collectionClient.setSoTimeout(30000);
try (HttpSolrClient collectionClient = getHttpSolrClient(leaderProps.getBaseUrl(), 15000, 30000)) {
Unload unloadCmd = new Unload(false);
unloadCmd.setCoreName(leaderProps.getCoreName());
@ -313,27 +306,21 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
long found1, found3;
try (HttpSolrClient adminClient = getHttpSolrClient(jettys.get(1).getBaseUrl() + "/unloadcollection_shard1_replica2")) {
adminClient.setConnectionTimeout(15000);
adminClient.setSoTimeout(30000);
try (HttpSolrClient adminClient = getHttpSolrClient(jettys.get(1).getBaseUrl() + "/unloadcollection_shard1_replica2", 15000, 30000)) {
adminClient.commit();
SolrQuery q = new SolrQuery("*:*");
q.set("distrib", false);
found1 = adminClient.query(q).getResults().getNumFound();
}
try (HttpSolrClient adminClient = getHttpSolrClient(jettys.get(2).getBaseUrl() + "/unloadcollection_shard1_replica3")) {
adminClient.setConnectionTimeout(15000);
adminClient.setSoTimeout(30000);
try (HttpSolrClient adminClient = getHttpSolrClient(jettys.get(2).getBaseUrl() + "/unloadcollection_shard1_replica3", 15000, 30000)) {
adminClient.commit();
SolrQuery q = new SolrQuery("*:*");
q.set("distrib", false);
found3 = adminClient.query(q).getResults().getNumFound();
}
try (HttpSolrClient adminClient = getHttpSolrClient(jettys.get(3).getBaseUrl() + "/unloadcollection_shard1_replica4")) {
adminClient.setConnectionTimeout(15000);
adminClient.setSoTimeout(30000);
try (HttpSolrClient adminClient = getHttpSolrClient(jettys.get(3).getBaseUrl() + "/unloadcollection_shard1_replica4", 15000, 30000)) {
adminClient.commit();
SolrQuery q = new SolrQuery("*:*");
q.set("distrib", false);
@ -348,9 +335,7 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
private void testUnloadLotsOfCores() throws Exception {
JettySolrRunner jetty = jettys.get(0);
try (final HttpSolrClient adminClient = (HttpSolrClient) jetty.newClient()) {
adminClient.setConnectionTimeout(15000);
adminClient.setSoTimeout(60000);
try (final HttpSolrClient adminClient = (HttpSolrClient) jetty.newClient(15000, 60000)) {
int numReplicas = atLeast(3);
ThreadPoolExecutor executor = new ExecutorUtil.MDCAwareThreadPoolExecutor(0, Integer.MAX_VALUE,
5, TimeUnit.SECONDS, new SynchronousQueue<Runnable>(),

View File

@ -180,7 +180,7 @@ public class StressHdfsTest extends BasicDistributedZkTest {
int i = 0;
for (SolrClient client : clients) {
try (HttpSolrClient c = getHttpSolrClient(getBaseUrl(client) + "/" + DELETE_DATA_DIR_COLLECTION)) {
try (HttpSolrClient c = getHttpSolrClient(getBaseUrl(client) + "/" + DELETE_DATA_DIR_COLLECTION, 30000)) {
int docCnt = random().nextInt(1000) + 1;
for (int j = 0; j < docCnt; j++) {
c.add(getDoc("id", i++, "txt_t", "just some random text for a doc"));
@ -192,7 +192,6 @@ public class StressHdfsTest extends BasicDistributedZkTest {
c.commit(true, true, true);
}
c.setConnectionTimeout(30000);
NamedList<Object> response = c.query(
new SolrQuery().setRequestHandler("/admin/system")).getResponse();
NamedList<Object> coreInfo = (NamedList<Object>) response.get("core");

View File

@ -136,15 +136,11 @@ public class OpenCloseCoreStressTest extends SolrTestCaseJ4 {
// Mostly to keep annoying logging messages from being sent out all the time.
for (int idx = 0; idx < indexingThreads; ++idx) {
HttpSolrClient client = getHttpSolrClient(url);
client.setConnectionTimeout(30000);
client.setSoTimeout(60000);
HttpSolrClient client = getHttpSolrClient(url, 30000, 60000);
indexingClients.add(client);
}
for (int idx = 0; idx < queryThreads; ++idx) {
HttpSolrClient client = getHttpSolrClient(url);
client.setConnectionTimeout(30000);
client.setSoTimeout(30000);
HttpSolrClient client = getHttpSolrClient(url, 30000, 30000);
queryingClients.add(client);
}

View File

@ -164,9 +164,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
try {
// setup the client...
final String baseUrl = buildUrl(port) + "/" + DEFAULT_TEST_CORENAME;
HttpSolrClient client = getHttpSolrClient(baseUrl);
client.setConnectionTimeout(15000);
client.setSoTimeout(90000);
HttpSolrClient client = getHttpSolrClient(baseUrl, 15000, 90000);
return client;
}
catch (Exception ex) {

View File

@ -83,9 +83,7 @@ public class TestReplicationHandlerBackup extends SolrJettyTestBase {
try {
// setup the client...
final String baseUrl = buildUrl(port, context);
HttpSolrClient client = getHttpSolrClient(baseUrl);
client.setConnectionTimeout(15000);
client.setSoTimeout(60000);
HttpSolrClient client = getHttpSolrClient(baseUrl, 15000, 60000);
return client;
}
catch (Exception ex) {

View File

@ -71,9 +71,7 @@ public class TestRestoreCore extends SolrJettyTestBase {
try {
// setup the client...
final String baseUrl = buildUrl(port, context);
HttpSolrClient client = getHttpSolrClient(baseUrl);
client.setConnectionTimeout(15000);
client.setSoTimeout(60000);
HttpSolrClient client = getHttpSolrClient(baseUrl, 15000, 60000);
return client;
}
catch (Exception ex) {

View File

@ -259,18 +259,14 @@ public class CoreAdminHandlerTest extends SolrTestCaseJ4 {
JettySolrRunner runner = new JettySolrRunner(solrHomeDirectory.getAbsolutePath(), buildJettyConfig("/solr"));
runner.start();
try (HttpSolrClient client = getHttpSolrClient(runner.getBaseUrl() + "/corex")) {
client.setConnectionTimeout(SolrTestCaseJ4.DEFAULT_CONNECTION_TIMEOUT);
client.setSoTimeout(SolrTestCaseJ4.DEFAULT_CONNECTION_TIMEOUT);
try (HttpSolrClient client = getHttpSolrClient(runner.getBaseUrl() + "/corex", DEFAULT_CONNECTION_TIMEOUT, DEFAULT_CONNECTION_TIMEOUT)) {
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", "123");
client.add(doc);
client.commit();
}
try (HttpSolrClient client = getHttpSolrClient(runner.getBaseUrl().toString())) {
client.setConnectionTimeout(SolrTestCaseJ4.DEFAULT_CONNECTION_TIMEOUT);
client.setSoTimeout(SolrTestCaseJ4.DEFAULT_CONNECTION_TIMEOUT);
try (HttpSolrClient client = getHttpSolrClient(runner.getBaseUrl().toString(), DEFAULT_CONNECTION_TIMEOUT, DEFAULT_CONNECTION_TIMEOUT)) {
CoreAdminRequest.Unload req = new CoreAdminRequest.Unload(false);
req.setDeleteInstanceDir(true);
req.setCoreName("corex");
@ -295,25 +291,22 @@ public class CoreAdminHandlerTest extends SolrTestCaseJ4 {
JettySolrRunner runner = new JettySolrRunner(solrHomeDirectory.getAbsolutePath(), buildJettyConfig("/solr"));
runner.start();
try (HttpSolrClient client = getHttpSolrClient(runner.getBaseUrl() + "/corex")) {
client.setConnectionTimeout(SolrTestCaseJ4.DEFAULT_CONNECTION_TIMEOUT);
client.setSoTimeout(SolrTestCaseJ4.DEFAULT_CONNECTION_TIMEOUT);
try (HttpSolrClient client = getHttpSolrClient(runner.getBaseUrl() + "/corex", DEFAULT_CONNECTION_TIMEOUT,
DEFAULT_CONNECTION_TIMEOUT)) {
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", "123");
client.add(doc);
client.commit();
}
try (HttpSolrClient client = getHttpSolrClient(runner.getBaseUrl() + "/corex")) {
client.setConnectionTimeout(SolrTestCaseJ4.DEFAULT_CONNECTION_TIMEOUT);
client.setSoTimeout(SolrTestCaseJ4.DEFAULT_CONNECTION_TIMEOUT);
try (HttpSolrClient client = getHttpSolrClient(runner.getBaseUrl() + "/corex", DEFAULT_CONNECTION_TIMEOUT,
DEFAULT_CONNECTION_TIMEOUT)) {
QueryResponse result = client.query(new SolrQuery("id:*"));
assertEquals(1,result.getResults().getNumFound());
}
try (HttpSolrClient client = getHttpSolrClient(runner.getBaseUrl().toString())) {
client.setConnectionTimeout(SolrTestCaseJ4.DEFAULT_CONNECTION_TIMEOUT);
client.setSoTimeout(SolrTestCaseJ4.DEFAULT_CONNECTION_TIMEOUT);
try (HttpSolrClient client = getHttpSolrClient(runner.getBaseUrl().toString(), DEFAULT_CONNECTION_TIMEOUT,
DEFAULT_CONNECTION_TIMEOUT)) {
CoreAdminRequest.Unload req = new CoreAdminRequest.Unload(false);
req.setDeleteInstanceDir(false);//random().nextBoolean());
req.setCoreName("corex");
@ -321,9 +314,8 @@ public class CoreAdminHandlerTest extends SolrTestCaseJ4 {
}
HttpSolrClient.RemoteSolrException rse = expectThrows(HttpSolrClient.RemoteSolrException.class, () -> {
try (HttpSolrClient client = getHttpSolrClient(runner.getBaseUrl() + "/corex")) {
client.setConnectionTimeout(SolrTestCaseJ4.DEFAULT_CONNECTION_TIMEOUT);
client.setSoTimeout(SolrTestCaseJ4.DEFAULT_CONNECTION_TIMEOUT * 1000);
try (HttpSolrClient client = getHttpSolrClient(runner.getBaseUrl() + "/corex", DEFAULT_CONNECTION_TIMEOUT,
DEFAULT_CONNECTION_TIMEOUT * 1000)) {
client.query(new SolrQuery("id:*"));
} finally {
runner.stop();
@ -344,9 +336,7 @@ public class CoreAdminHandlerTest extends SolrTestCaseJ4 {
JettySolrRunner runner = new JettySolrRunner(solrHomeDirectory.getAbsolutePath(), buildJettyConfig("/solr"));
runner.start();
try (HttpSolrClient client = getHttpSolrClient(runner.getBaseUrl() + "/corex")) {
client.setConnectionTimeout(SolrTestCaseJ4.DEFAULT_CONNECTION_TIMEOUT);
client.setSoTimeout(SolrTestCaseJ4.DEFAULT_CONNECTION_TIMEOUT);
try (HttpSolrClient client = getHttpSolrClient(runner.getBaseUrl() + "/corex", DEFAULT_CONNECTION_TIMEOUT, DEFAULT_CONNECTION_TIMEOUT)) {
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", "123");
client.add(doc);
@ -365,9 +355,7 @@ public class CoreAdminHandlerTest extends SolrTestCaseJ4 {
String top = SolrTestCaseJ4.TEST_HOME() + "/collection1/conf";
FileUtils.copyFile(new File(top, "bad-error-solrconfig.xml"), new File(subHome, "solrconfig.xml"));
try (HttpSolrClient client = getHttpSolrClient(runner.getBaseUrl().toString())) {
client.setConnectionTimeout(SolrTestCaseJ4.DEFAULT_CONNECTION_TIMEOUT);
client.setSoTimeout(SolrTestCaseJ4.DEFAULT_CONNECTION_TIMEOUT);
try (HttpSolrClient client = getHttpSolrClient(runner.getBaseUrl().toString(), DEFAULT_CONNECTION_TIMEOUT, DEFAULT_CONNECTION_TIMEOUT)) {
try {
CoreAdminRequest.reloadCore("corex", client);
} catch (Exception e) {

View File

@ -195,6 +195,10 @@ public class CloudSolrClient extends SolrClient {
this.retryExpiryTime = TimeUnit.NANOSECONDS.convert(secs, TimeUnit.SECONDS);
}
/**
* @deprecated since 7.0 Use {@link Builder} methods instead.
*/
@Deprecated
public void setSoTimeout(int timeout) {
lbClient.setSoTimeout(timeout);
}
@ -264,15 +268,30 @@ public class CloudSolrClient extends SolrClient {
}
this.clientIsInternal = builder.httpClient == null;
this.shutdownLBHttpSolrServer = builder.loadBalancedSolrClient == null;
if(builder.lbClientBuilder != null) builder.loadBalancedSolrClient = builder.lbClientBuilder.build();
if(builder.lbClientBuilder != null) {
propagateLBClientConfigOptions(builder);
builder.loadBalancedSolrClient = builder.lbClientBuilder.build();
}
if(builder.loadBalancedSolrClient != null) builder.httpClient = builder.loadBalancedSolrClient.getHttpClient();
this.myClient = (builder.httpClient == null) ? HttpClientUtil.createClient(null) : builder.httpClient;
if (builder.loadBalancedSolrClient == null) builder.loadBalancedSolrClient = createLBHttpSolrClient(myClient);
if (builder.loadBalancedSolrClient == null) builder.loadBalancedSolrClient = createLBHttpSolrClient(builder, myClient);
this.lbClient = builder.loadBalancedSolrClient;
this.updatesToLeaders = builder.shardLeadersOnly;
this.directUpdatesToLeadersOnly = builder.directUpdatesToLeadersOnly;
}
private void propagateLBClientConfigOptions(Builder builder) {
final LBHttpSolrClient.Builder lbBuilder = builder.lbClientBuilder;
if (builder.connectionTimeoutMillis != null) {
lbBuilder.withConnectionTimeout(builder.connectionTimeoutMillis);
}
if (builder.socketTimeoutMillis != null) {
lbBuilder.withSocketTimeout(builder.socketTimeoutMillis);
}
}
/**Sets the cache ttl for DocCollection Objects cached . This is only applicable for collections which are persisted outside of clusterstate.json
* @param seconds ttl value in seconds
*/
@ -1292,6 +1311,10 @@ public class CloudSolrClient extends SolrClient {
return results;
}
/**
* @deprecated since 7.0 Use {@link Builder} methods instead.
*/
@Deprecated
public void setConnectionTimeout(int timeout) {
this.lbClient.setConnectionTimeout(timeout);
}
@ -1325,10 +1348,16 @@ public class CloudSolrClient extends SolrClient {
return true;
}
private static LBHttpSolrClient createLBHttpSolrClient(HttpClient httpClient) {
final LBHttpSolrClient lbClient = new LBHttpSolrClient.Builder()
.withHttpClient(httpClient)
.build();
private static LBHttpSolrClient createLBHttpSolrClient(Builder cloudSolrClientBuilder, HttpClient httpClient) {
final LBHttpSolrClient.Builder lbBuilder = new LBHttpSolrClient.Builder();
lbBuilder.withHttpClient(httpClient);
if (cloudSolrClientBuilder.connectionTimeoutMillis != null) {
lbBuilder.withConnectionTimeout(cloudSolrClientBuilder.connectionTimeoutMillis);
}
if (cloudSolrClientBuilder.socketTimeoutMillis != null) {
lbBuilder.withSocketTimeout(cloudSolrClientBuilder.socketTimeoutMillis);
}
final LBHttpSolrClient lbClient = lbBuilder.build();
lbClient.setRequestWriter(new BinaryRequestWriter());
lbClient.setParser(new BinaryResponseParser());
@ -1348,6 +1377,8 @@ public class CloudSolrClient extends SolrClient {
protected boolean shardLeadersOnly;
protected boolean directUpdatesToLeadersOnly;
protected ClusterStateProvider stateProvider;
protected Integer connectionTimeoutMillis;
protected Integer socketTimeoutMillis;
public Builder() {
@ -1484,6 +1515,30 @@ public class CloudSolrClient extends SolrClient {
return this;
}
/**
* Tells {@link Builder} that created clients should obey the following timeout when connecting to Solr servers.
*/
public Builder withConnectionTimeout(int connectionTimeoutMillis) {
if (connectionTimeoutMillis <= 0) {
throw new IllegalArgumentException("connectionTimeoutMillis must be a positive integer.");
}
this.connectionTimeoutMillis = connectionTimeoutMillis;
return this;
}
/**
* Tells {@link Builder} that created clients should set the following read timeout on all sockets.
*/
public Builder withSocketTimeout(int socketTimeoutMillis) {
if (socketTimeoutMillis <= 0) {
throw new IllegalArgumentException("socketTimeoutMillis must be a positive integer.");
}
this.socketTimeoutMillis = socketTimeoutMillis;
return this;
}
/**
* Create a {@link CloudSolrClient} based on the provided configuration.
*/

View File

@ -129,6 +129,8 @@ public class ConcurrentUpdateSolrClient extends SolrClient {
this.threadCount = builder.threadCount;
this.runners = new LinkedList<>();
this.streamDeletes = builder.streamDeletes;
this.connectionTimeout = builder.connectionTimeoutMillis;
this.soTimeout = builder.socketTimeoutMillis;
if (builder.executorService != null) {
this.scheduler = builder.executorService;
@ -703,6 +705,10 @@ public class ConcurrentUpdateSolrClient extends SolrClient {
}
}
/**
* @deprecated since 7.0 Use {@link Builder} methods instead.
*/
@Deprecated
public void setConnectionTimeout(int timeout) {
this.connectionTimeout = timeout;
}
@ -710,7 +716,10 @@ public class ConcurrentUpdateSolrClient extends SolrClient {
/**
* set soTimeout (read timeout) on the underlying HttpConnectionManager. This is desirable for queries, but probably
* not for indexing.
*
* @deprecated since 7.0 Use {@link Builder} methods instead.
*/
@Deprecated
public void setSoTimeout(int timeout) {
this.soTimeout = timeout;
}
@ -768,6 +777,8 @@ public class ConcurrentUpdateSolrClient extends SolrClient {
protected int threadCount;
protected ExecutorService executorService;
protected boolean streamDeletes;
protected Integer connectionTimeoutMillis;
protected Integer socketTimeoutMillis;
/**
* Create a Builder object, based on the provided Solr URL.
@ -849,6 +860,31 @@ public class ConcurrentUpdateSolrClient extends SolrClient {
return this;
}
/**
* Tells {@link Builder} that created clients should obey the following timeout when connecting to Solr servers.
*/
public Builder withConnectionTimeout(int connectionTimeoutMillis) {
if (connectionTimeoutMillis <= 0) {
throw new IllegalArgumentException("connectionTimeoutMillis must be a positive integer.");
}
this.connectionTimeoutMillis = connectionTimeoutMillis;
return this;
}
/**
* Tells {@link Builder} that created clients should set the following read timeout on all sockets.
*/
public Builder withSocketTimeout(int socketTimeoutMillis) {
if (socketTimeoutMillis <= 0) {
throw new IllegalArgumentException("socketTimeoutMillis must be a positive integer.");
}
this.socketTimeoutMillis = socketTimeoutMillis;
return this;
}
/**
* Create a {@link ConcurrentUpdateSolrClient} based on the provided configuration options.
*/

View File

@ -199,6 +199,8 @@ public class HttpSolrClient extends SolrClient {
this.parser = builder.responseParser;
this.invariantParams = builder.invariantParams;
this.connectionTimeout = builder.connectionTimeoutMillis;
this.soTimeout = builder.socketTimeoutMillis;
}
public Set<String> getQueryParams() {
@ -713,7 +715,10 @@ public class HttpSolrClient extends SolrClient {
*
* @param timeout
* Timeout in milliseconds
**/
*
* @deprecated since 7.0 Use {@link Builder} methods instead.
*/
@Deprecated
public void setConnectionTimeout(int timeout) {
this.connectionTimeout = timeout;
}
@ -724,7 +729,10 @@ public class HttpSolrClient extends SolrClient {
*
* @param timeout
* Timeout in milliseconds
**/
*
s * @deprecated since 7.0 Use {@link Builder} methods instead.
*/
@Deprecated
public void setSoTimeout(int timeout) {
this.soTimeout = timeout;
}
@ -825,6 +833,8 @@ public class HttpSolrClient extends SolrClient {
protected ResponseParser responseParser;
protected boolean compression;
protected ModifiableSolrParams invariantParams = new ModifiableSolrParams();
protected Integer connectionTimeoutMillis;
protected Integer socketTimeoutMillis;
public Builder() {
this.responseParser = new BinaryResponseParser();
@ -932,6 +942,30 @@ public class HttpSolrClient extends SolrClient {
return this;
}
/**
* Tells {@link Builder} that created clients should obey the following timeout when connecting to Solr servers.
*/
public Builder withConnectionTimeout(int connectionTimeoutMillis) {
if (connectionTimeoutMillis <= 0) {
throw new IllegalArgumentException("connectionTimeoutMillis must be a positive integer.");
}
this.connectionTimeoutMillis = connectionTimeoutMillis;
return this;
}
/**
* Tells {@link Builder} that created clients should set the following read timeout on all sockets.
*/
public Builder withSocketTimeout(int socketTimeoutMillis) {
if (socketTimeoutMillis <= 0) {
throw new IllegalArgumentException("socketTimeoutMillis must be a positive integer.");
}
this.socketTimeoutMillis = socketTimeoutMillis;
return this;
}
/**
* Create a {@link HttpSolrClient} based on provided configuration.
*/

View File

@ -268,7 +268,8 @@ public class LBHttpSolrClient extends SolrClient {
this.clientIsInternal = builder.httpClient == null;
this.httpSolrClientBuilder = builder.httpSolrClientBuilder;
this.httpClient = builder.httpClient == null ? constructClient(builder.baseSolrUrls.toArray(new String[builder.baseSolrUrls.size()])) : builder.httpClient;
this.connectionTimeout = builder.connectionTimeoutMillis;
this.soTimeout = builder.socketTimeoutMillis;
this.parser = builder.responseParser;
if (! builder.baseSolrUrls.isEmpty()) {
@ -316,16 +317,28 @@ public class LBHttpSolrClient extends SolrClient {
HttpSolrClient client;
if (httpSolrClientBuilder != null) {
synchronized (this) {
client = httpSolrClientBuilder
httpSolrClientBuilder
.withBaseSolrUrl(server)
.withHttpClient(httpClient)
.build();
.withHttpClient(httpClient);
if (connectionTimeout != null) {
httpSolrClientBuilder.withConnectionTimeout(connectionTimeout);
}
if (soTimeout != null) {
httpSolrClientBuilder.withSocketTimeout(soTimeout);
}
client = httpSolrClientBuilder.build();
}
} else {
client = new HttpSolrClient.Builder(server)
final HttpSolrClient.Builder clientBuilder = new HttpSolrClient.Builder(server)
.withHttpClient(httpClient)
.withResponseParser(parser)
.build();
.withResponseParser(parser);
if (connectionTimeout != null) {
clientBuilder.withConnectionTimeout(connectionTimeout);
}
if (soTimeout != null) {
clientBuilder.withSocketTimeout(soTimeout);
}
client = clientBuilder.build();
}
if (requestWriter != null) {
client.setRequestWriter(requestWriter);
@ -558,6 +571,10 @@ public class LBHttpSolrClient extends SolrClient {
return null;
}
/**
* @deprecated since 7.0 Use {@link Builder} methods instead.
*/
@Deprecated
public void setConnectionTimeout(int timeout) {
this.connectionTimeout = timeout;
synchronized (aliveServers) {
@ -575,7 +592,10 @@ public class LBHttpSolrClient extends SolrClient {
/**
* set soTimeout (read timeout) on the underlying HttpConnectionManager. This is desirable for queries, but probably
* not for indexing.
*
* @deprecated since 7.0 Use {@link Builder} methods instead.
*/
@Deprecated
public void setSoTimeout(int timeout) {
this.soTimeout = timeout;
synchronized (aliveServers) {
@ -866,6 +886,8 @@ public class LBHttpSolrClient extends SolrClient {
protected HttpClient httpClient;
protected ResponseParser responseParser;
protected HttpSolrClient.Builder httpSolrClientBuilder;
protected Integer connectionTimeoutMillis;
protected Integer socketTimeoutMillis;
public Builder() {
this.baseSolrUrls = new ArrayList<>();
@ -959,6 +981,30 @@ public class LBHttpSolrClient extends SolrClient {
return this;
}
/**
* Tells {@link Builder} that created clients should obey the following timeout when connecting to Solr servers.
*/
public Builder withConnectionTimeout(int connectionTimeoutMillis) {
if (connectionTimeoutMillis <= 0) {
throw new IllegalArgumentException("connectionTimeoutMillis must be a positive integer.");
}
this.connectionTimeoutMillis = connectionTimeoutMillis;
return this;
}
/**
* Tells {@link Builder} that created clients should set the following read timeout on all sockets.
*/
public Builder withSocketTimeout(int socketTimeoutMillis) {
if (socketTimeoutMillis <= 0) {
throw new IllegalArgumentException("socketTimeoutMillis must be a positive integer.");
}
this.socketTimeoutMillis = socketTimeoutMillis;
return this;
}
/**
* Create a {@link HttpSolrClient} based on provided configuration.
*/

View File

@ -40,8 +40,7 @@ public class SolrExampleBinaryTest extends SolrExampleTests {
try {
// setup the server...
String url = jetty.getBaseUrl().toString() + "/collection1";
HttpSolrClient client = getHttpSolrClient(url);
client.setConnectionTimeout(DEFAULT_CONNECTION_TIMEOUT);
HttpSolrClient client = getHttpSolrClient(url, DEFAULT_CONNECTION_TIMEOUT);
client.setUseMultiPartPost(random().nextBoolean());
// where the magic happens

View File

@ -37,9 +37,8 @@ public class SolrExampleXMLTest extends SolrExampleTests {
public SolrClient createNewSolrClient() {
try {
String url = jetty.getBaseUrl().toString() + "/collection1";
HttpSolrClient client = getHttpSolrClient(url);
HttpSolrClient client = getHttpSolrClient(url, DEFAULT_CONNECTION_TIMEOUT);
client.setUseMultiPartPost(random().nextBoolean());
client.setConnectionTimeout(DEFAULT_CONNECTION_TIMEOUT);
client.setParser(new XMLResponseParser());
client.setRequestWriter(new RequestWriter());
return client;

View File

@ -40,8 +40,7 @@ public class SolrExceptionTest extends LuceneTestCase {
// switched to a local address to avoid going out on the net, ns lookup issues, etc.
// set a 1ms timeout to let the connection fail faster.
httpClient = HttpClientUtil.createClient(null);
try (HttpSolrClient client = getHttpSolrClient("http://[ff01::114]:11235/solr/", httpClient)) {
client.setConnectionTimeout(1);
try (HttpSolrClient client = getHttpSolrClient("http://[ff01::114]:11235/solr/", httpClient, 1)) {
SolrQuery query = new SolrQuery("test123");
client.query(query);
}

View File

@ -133,8 +133,7 @@ public class SolrSchemalessExampleTest extends SolrExampleTestsBase {
try {
// setup the server...
String url = jetty.getBaseUrl().toString() + "/collection1";
HttpSolrClient client = getHttpSolrClient(url);
client.setConnectionTimeout(DEFAULT_CONNECTION_TIMEOUT);
HttpSolrClient client = getHttpSolrClient(url, DEFAULT_CONNECTION_TIMEOUT);
client.setUseMultiPartPost(random().nextBoolean());
if (random().nextBoolean()) {

View File

@ -206,9 +206,7 @@ public class TestLBHttpSolrClient extends SolrTestCaseJ4 {
CloseableHttpClient myHttpClient = HttpClientUtil.createClient(null);
try {
LBHttpSolrClient client = getLBHttpSolrClient(myHttpClient, s);
client.setConnectionTimeout(500);
client.setSoTimeout(500);
LBHttpSolrClient client = getLBHttpSolrClient(myHttpClient, 500, 500, s);
client.setAliveCheckInterval(500);
// Kill a server and test again

View File

@ -209,8 +209,7 @@ public class BasicHttpSolrClientTest extends SolrJettyTestBase {
public void testTimeout() throws Exception {
SolrQuery q = new SolrQuery("*:*");
try(HttpSolrClient client = getHttpSolrClient(jetty.getBaseUrl().toString() + "/slow/foo")) {
client.setSoTimeout(2000);
try(HttpSolrClient client = getHttpSolrClient(jetty.getBaseUrl().toString() + "/slow/foo", DEFAULT_CONNECTION_TIMEOUT, 2000)) {
client.query(q, METHOD.GET);
fail("No exception thrown.");
} catch (SolrServerException e) {

View File

@ -463,9 +463,7 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
SolrServerException, IOException {
NamedList<Object> resp;
try (HttpSolrClient client = getHttpSolrClient(baseUrl + "/"+ collectionName)) {
client.setConnectionTimeout(15000);
client.setSoTimeout(60000);
try (HttpSolrClient client = getHttpSolrClient(baseUrl + "/"+ collectionName, 15000, 60000)) {
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("qt", "/admin/mbeans");
params.set("stats", "true");

View File

@ -70,8 +70,7 @@ public class HttpSolrClientConPoolTest extends SolrJettyTestBase {
fooUrl = jetty.getBaseUrl().toString() + "/" + "collection1";
CloseableHttpClient httpClient = HttpClientUtil.createClient(new ModifiableSolrParams(), pool,
false /* let client shutdown it*/);
client1 = getHttpSolrClient(fooUrl, httpClient);
client1.setConnectionTimeout(DEFAULT_CONNECTION_TIMEOUT);
client1 = getHttpSolrClient(fooUrl, httpClient, DEFAULT_CONNECTION_TIMEOUT);
}
final String barUrl = yetty.getBaseUrl().toString() + "/" + "collection1";

View File

@ -152,8 +152,7 @@ abstract public class SolrJettyTestBase extends SolrTestCaseJ4
try {
// setup the client...
String url = jetty.getBaseUrl().toString() + "/" + "collection1";
HttpSolrClient client = getHttpSolrClient(url);
client.setConnectionTimeout(DEFAULT_CONNECTION_TIMEOUT);
HttpSolrClient client = getHttpSolrClient(url, DEFAULT_CONNECTION_TIMEOUT);
return client;
}
catch( Exception ex ) {

View File

@ -2295,6 +2295,50 @@ public abstract class SolrTestCaseJ4 extends LuceneTestCase {
.build();
}
/**
* This method <i>may</i> randomize unspecified aspects of the resulting SolrClient.
* Tests that do not wish to have any randomized behavior should use the
* {@link org.apache.solr.client.solrj.impl.CloudSolrClient.Builder} class directly
*/
public static CloudSolrClient getCloudSolrClient(String zkHost, boolean shardLeadersOnly, int socketTimeoutMillis) {
if (shardLeadersOnly) {
return new CloudSolrClientBuilder()
.withZkHost(zkHost)
.sendUpdatesOnlyToShardLeaders()
.withSocketTimeout(socketTimeoutMillis)
.build();
}
return new CloudSolrClientBuilder()
.withZkHost(zkHost)
.sendUpdatesToAllReplicasInShard()
.withSocketTimeout(socketTimeoutMillis)
.build();
}
/**
* This method <i>may</i> randomize unspecified aspects of the resulting SolrClient.
* Tests that do not wish to have any randomized behavior should use the
* {@link org.apache.solr.client.solrj.impl.CloudSolrClient.Builder} class directly
*/
public static CloudSolrClient getCloudSolrClient(String zkHost, boolean shardLeadersOnly, int connectionTimeoutMillis, int socketTimeoutMillis) {
if (shardLeadersOnly) {
return new CloudSolrClientBuilder()
.withZkHost(zkHost)
.sendUpdatesOnlyToShardLeaders()
.withConnectionTimeout(connectionTimeoutMillis)
.withSocketTimeout(socketTimeoutMillis)
.build();
}
return new CloudSolrClientBuilder()
.withZkHost(zkHost)
.sendUpdatesToAllReplicasInShard()
.withConnectionTimeout(connectionTimeoutMillis)
.withSocketTimeout(socketTimeoutMillis)
.build();
}
/**
* This method <i>may</i> randomize unspecified aspects of the resulting SolrClient.
* Tests that do not wish to have any randomized behavior should use the
@ -2315,6 +2359,31 @@ public abstract class SolrTestCaseJ4 extends LuceneTestCase {
.build();
}
/**
* This method <i>may</i> randomize unspecified aspects of the resulting SolrClient.
* Tests that do not wish to have any randomized behavior should use the
* {@link org.apache.solr.client.solrj.impl.CloudSolrClient.Builder} class directly
*/
public static CloudSolrClient getCloudSolrClient(String zkHost, boolean shardLeadersOnly, HttpClient httpClient,
int connectionTimeoutMillis, int socketTimeoutMillis) {
if (shardLeadersOnly) {
return new CloudSolrClientBuilder()
.withZkHost(zkHost)
.withHttpClient(httpClient)
.sendUpdatesOnlyToShardLeaders()
.withConnectionTimeout(connectionTimeoutMillis)
.withSocketTimeout(socketTimeoutMillis)
.build();
}
return new CloudSolrClientBuilder()
.withZkHost(zkHost)
.withHttpClient(httpClient)
.sendUpdatesToAllReplicasInShard()
.withConnectionTimeout(connectionTimeoutMillis)
.withSocketTimeout(socketTimeoutMillis)
.build();
}
/**
* This method <i>may</i> randomize unspecified aspects of the resulting SolrClient.
* Tests that do not wish to have any randomized behavior should use the
@ -2327,6 +2396,19 @@ public abstract class SolrTestCaseJ4 extends LuceneTestCase {
.build();
}
/**
* This method <i>may</i> randomize unspecified aspects of the resulting SolrClient.
* Tests that do not wish to have any randomized behavior should use the
* {@link org.apache.solr.client.solrj.impl.ConcurrentUpdateSolrClient.Builder} class directly
*/
public static ConcurrentUpdateSolrClient getConcurrentUpdateSolrClient(String baseSolrUrl, int queueSize, int threadCount, int connectionTimeoutMillis) {
return new ConcurrentUpdateSolrClient.Builder(baseSolrUrl)
.withQueueSize(queueSize)
.withThreadCount(threadCount)
.withConnectionTimeout(connectionTimeoutMillis)
.build();
}
/**
* This method <i>may</i> randomize unspecified aspects of the resulting SolrClient.
* Tests that do not wish to have any randomized behavior should use the
@ -2352,6 +2434,21 @@ public abstract class SolrTestCaseJ4 extends LuceneTestCase {
.build();
}
/**
* This method <i>may</i> randomize unspecified aspects of the resulting SolrClient.
* Tests that do not wish to have any randomized behavior should use the
* {@link org.apache.solr.client.solrj.impl.LBHttpSolrClient.Builder} class directly
*/
public static LBHttpSolrClient getLBHttpSolrClient(HttpClient client, int connectionTimeoutMillis,
int socketTimeoutMillis, String... solrUrls) {
return new LBHttpSolrClient.Builder()
.withHttpClient(client)
.withBaseSolrUrls(solrUrls)
.withConnectionTimeout(connectionTimeoutMillis)
.withSocketTimeout(socketTimeoutMillis)
.build();
}
/**
* This method <i>may</i> randomize unspecified aspects of the resulting SolrClient.
* Tests that do not wish to have any randomized behavior should use the
@ -2399,6 +2496,18 @@ public abstract class SolrTestCaseJ4 extends LuceneTestCase {
.build();
}
/**
* This method <i>may</i> randomize unspecified aspects of the resulting SolrClient.
* Tests that do not wish to have any randomized behavior should use the
* {@link org.apache.solr.client.solrj.impl.HttpSolrClient.Builder} class directly
*/
public static HttpSolrClient getHttpSolrClient(String url, HttpClient httpClient, int connectionTimeoutMillis) {
return new Builder(url)
.withHttpClient(httpClient)
.withConnectionTimeout(connectionTimeoutMillis)
.build();
}
/**
* This method <i>may</i> randomize unspecified aspects of the resulting SolrClient.
* Tests that do not wish to have any randomized behavior should use the
@ -2409,6 +2518,29 @@ public abstract class SolrTestCaseJ4 extends LuceneTestCase {
.build();
}
/**
* This method <i>may</i> randomize unspecified aspects of the resulting SolrClient.
* Tests that do not wish to have any randomized behavior should use the
* {@link org.apache.solr.client.solrj.impl.HttpSolrClient.Builder} class directly
*/
public static HttpSolrClient getHttpSolrClient(String url, int connectionTimeoutMillis) {
return new Builder(url)
.withConnectionTimeout(connectionTimeoutMillis)
.build();
}
/**
* This method <i>may</i> randomize unspecified aspects of the resulting SolrClient.
* Tests that do not wish to have any randomized behavior should use the
* {@link org.apache.solr.client.solrj.impl.HttpSolrClient.Builder} class directly
*/
public static HttpSolrClient getHttpSolrClient(String url, int connectionTimeoutMillis, int socketTimeoutMillis) {
return new Builder(url)
.withConnectionTimeout(connectionTimeoutMillis)
.withSocketTimeout(socketTimeoutMillis)
.build();
}
/**
* Returns a randomly generated Date in the appropriate Solr external (input) format
* @see #randomSkewedDate

View File

@ -295,11 +295,9 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
}
protected CloudSolrClient createCloudClient(String defaultCollection) {
CloudSolrClient client = getCloudSolrClient(zkServer.getZkAddress(), random().nextBoolean());
CloudSolrClient client = getCloudSolrClient(zkServer.getZkAddress(), random().nextBoolean(), 30000, 60000);
client.setParallelUpdates(random().nextBoolean());
if (defaultCollection != null) client.setDefaultCollection(defaultCollection);
client.getLbClient().setConnectionTimeout(30000);
client.getLbClient().setSoTimeout(60000);
return client;
}
@ -1709,14 +1707,28 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
return createNewSolrClient(DEFAULT_COLLECTION, port);
}
protected SolrClient createNewSolrClient(int port, int connectionTimeoutMillis, int socketTimeoutMillis) {
return createNewSolrClient(DEFAULT_COLLECTION, port, connectionTimeoutMillis, socketTimeoutMillis);
}
protected SolrClient createNewSolrClient(String coreName, int port) {
try {
// setup the server...
String baseUrl = buildUrl(port);
String url = baseUrl + (baseUrl.endsWith("/") ? "" : "/") + coreName;
HttpSolrClient client = getHttpSolrClient(url);
client.setConnectionTimeout(DEFAULT_CONNECTION_TIMEOUT);
client.setSoTimeout(60000);
HttpSolrClient client = getHttpSolrClient(url, DEFAULT_CONNECTION_TIMEOUT, 60000);
return client;
} catch (Exception ex) {
throw new RuntimeException(ex);
}
}
protected SolrClient createNewSolrClient(String coreName, int port, int connectionTimeoutMillis, int socketTimeoutMillis) {
try {
// setup the server...
String baseUrl = buildUrl(port);
String url = baseUrl + (baseUrl.endsWith("/") ? "" : "/") + coreName;
HttpSolrClient client = getHttpSolrClient(url, connectionTimeoutMillis, socketTimeoutMillis);
return client;
} catch (Exception ex) {
throw new RuntimeException(ex);
@ -1726,8 +1738,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
protected SolrClient createNewSolrClient(String collection, String baseUrl) {
try {
// setup the server...
HttpSolrClient client = getHttpSolrClient(baseUrl + "/" + collection);
client.setConnectionTimeout(DEFAULT_CONNECTION_TIMEOUT);
HttpSolrClient client = getHttpSolrClient(baseUrl + "/" + collection, DEFAULT_CONNECTION_TIMEOUT);
return client;
}
catch (Exception ex) {
@ -1811,9 +1822,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
boolean updatesToLeaders = random().nextBoolean();
boolean parallelUpdates = random().nextBoolean();
commonCloudSolrClient = getCloudSolrClient(zkServer.getZkAddress(),
updatesToLeaders);
commonCloudSolrClient.getLbClient().setConnectionTimeout(5000);
commonCloudSolrClient.getLbClient().setSoTimeout(120000);
updatesToLeaders, 5000, 120000);
commonCloudSolrClient.setParallelUpdates(parallelUpdates);
commonCloudSolrClient.setDefaultCollection(DEFAULT_COLLECTION);
commonCloudSolrClient.connect();