mirror of https://github.com/apache/lucene.git
SOLR-4933: if shard split fails with 500, wait a while to see if it succeeds on a retry
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1498763 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
1829c3a811
commit
e6497c1a9a
|
@ -17,6 +17,15 @@ package org.apache.solr.cloud;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.http.params.CoreConnectionPNames;
|
||||
import org.apache.solr.client.solrj.SolrQuery;
|
||||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
|
@ -41,16 +50,6 @@ import org.apache.zookeeper.KeeperException;
|
|||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
|
||||
public class ShardSplitTest extends BasicDistributedZkTest {
|
||||
|
||||
public static final String SHARD1_0 = SHARD1 + "_0";
|
||||
|
@ -159,7 +158,33 @@ public class ShardSplitTest extends BasicDistributedZkTest {
|
|||
}
|
||||
|
||||
commit();
|
||||
|
||||
try {
|
||||
checkDocCountsAndShardStates(docCounts, numReplicas);
|
||||
} catch (HttpSolrServer.RemoteSolrException e) {
|
||||
if (e.code() != 500) {
|
||||
throw e;
|
||||
}
|
||||
|
||||
// if we get a 500 error, the split should be retried ... let's wait and see if it works...
|
||||
Slice slice1_0 = null, slice1_1 = null;
|
||||
int i = 0;
|
||||
for (i = 0; i < 60; i++) {
|
||||
ZkStateReader zkStateReader = cloudClient.getZkStateReader();
|
||||
zkStateReader.updateClusterState(true);
|
||||
clusterState = zkStateReader.getClusterState();
|
||||
slice1_0 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, "shard1_0");
|
||||
slice1_1 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, "shard1_1");
|
||||
if (slice1_0 != null && slice1_1 != null) {
|
||||
break;
|
||||
}
|
||||
Thread.sleep(500);
|
||||
}
|
||||
|
||||
if (slice1_0 == null || slice1_1 == null) {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
// todo can't call waitForThingsToLevelOut because it looks for jettys of all shards
|
||||
// and the new sub-shards don't have any.
|
||||
|
|
|
@ -658,7 +658,7 @@ public class HttpSolrServer extends SolrServer {
|
|||
* status code that may have been returned by the remote server or a
|
||||
* proxy along the way.
|
||||
*/
|
||||
protected static class RemoteSolrException extends SolrException {
|
||||
public static class RemoteSolrException extends SolrException {
|
||||
/**
|
||||
* @param code Arbitrary HTTP status code
|
||||
* @param msg Exception Message
|
||||
|
|
Loading…
Reference in New Issue