SOLR-3831: Atomic updates do not distribute correctly to other nodes.

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1385136 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Mark Robert Miller 2012-09-15 17:39:28 +00:00
parent d8385729c2
commit eb275c2b43
7 changed files with 150 additions and 98 deletions

View File

@ -225,6 +225,9 @@ Bug Fixes
* SOLR-3844: SolrCore reload can fail because it tries to remove the index
write lock while already holding it. (Mark Miller)
* SOLR-3831: Atomic updates do not distribute correctly to other nodes.
(Jim Musil, Mark Miller)
Other Changes
----------------------

View File

@ -266,7 +266,7 @@ public class OverseerCollectionProcessor implements Runnable {
sreq.shards = new String[] {replica};
sreq.actualShards = sreq.shards;
sreq.params = params;
log.info("Collection Admin sending CoreAdmin cmd to " + replica);
shardHandler.submit(sreq, replica, sreq.params);
}
}

View File

@ -321,10 +321,7 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
boolean dropCmd = false;
if (!forwardToLeader) {
// clone the original doc
SolrInputDocument clonedDoc = cmd.solrDoc.deepCopy();
dropCmd = versionAdd(cmd, clonedDoc);
cmd.solrDoc = clonedDoc;
dropCmd = versionAdd(cmd);
}
if (dropCmd) {
@ -441,11 +438,10 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
/**
* @param cmd
* @param cloneDoc needs the version if it's assigned
* @return whether or not to drop this cmd
* @throws IOException
*/
private boolean versionAdd(AddUpdateCommand cmd, SolrInputDocument cloneDoc) throws IOException {
private boolean versionAdd(AddUpdateCommand cmd) throws IOException {
BytesRef idBytes = cmd.getIndexedId();
if (vinfo == null || idBytes == null) {
@ -518,7 +514,6 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
long version = vinfo.getNewClock();
cmd.setVersion(version);
cmd.getSolrInputDocument().setField(VersionInfo.VERSION_FIELD, version);
cloneDoc.setField(VersionInfo.VERSION_FIELD, version);
bucket.updateHighest(version);
} else {
// The leader forwarded us this update.
@ -550,9 +545,20 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
}
}
}
boolean willDistrib = isLeader && nodes != null && nodes.size() > 0;
SolrInputDocument clonedDoc = null;
if (willDistrib) {
clonedDoc = cmd.solrDoc.deepCopy();
}
// TODO: possibly set checkDeleteByQueries as a flag on the command?
doLocalAdd(cmd);
if (willDistrib) {
cmd.solrDoc = clonedDoc;
}
} // end synchronized (bucket)
} finally {

View File

@ -70,13 +70,14 @@ import org.apache.solr.common.params.UpdateParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.update.SolrCmdDistributor.Request;
import org.apache.solr.util.DefaultSolrThreadFactory;
import org.junit.Before;
/**
* This test simply does a bunch of basic things in solrcloud mode and asserts things
* work as expected.
*/
@Slow
public class BasicDistributedZkTest extends AbstractDistribZkTestBase {
public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
private static final String DEFAULT_COLLECTION = "collection1";
private static final boolean DEBUG = false;
@ -109,8 +110,19 @@ public class BasicDistributedZkTest extends AbstractDistribZkTestBase {
CompletionService<Request> completionService;
Set<Future<Request>> pending;
@Before
@Override
public void setUp() throws Exception {
super.setUp();
System.setProperty("numShards", Integer.toString(sliceCount));
}
public BasicDistributedZkTest() {
fixShardCount = true;
sliceCount = 2;
shardCount = 3;
completionService = new ExecutorCompletionService<Request>(executor);
pending = new HashSet<Future<Request>>();
@ -138,6 +150,14 @@ public class BasicDistributedZkTest extends AbstractDistribZkTestBase {
public void doTest() throws Exception {
// setLoggingLevel(null);
// make sure we have leaders for each shard
for (int j = 1; j < sliceCount; j++) {
zkStateReader.getLeaderProps(DEFAULT_COLLECTION, "shard" + j, 10000);
} // make sure we again have leaders for each shard
waitForRecoveriesToFinish(false);
del("*:*");
indexr(id,1, i1, 100, tlong, 100,t1,"now is the time for all good men"
@ -180,74 +200,74 @@ public class BasicDistributedZkTest extends AbstractDistribZkTestBase {
// random value sort
for (String f : fieldNames) {
query("q","*:*", "sort",f+" desc");
query("q","*:*", "sort",f+" asc");
query(false, new String[] {"q","*:*", "sort",f+" desc"});
query(false, new String[] {"q","*:*", "sort",f+" asc"});
}
// these queries should be exactly ordered and scores should exactly match
query("q","*:*", "sort",i1+" desc");
query("q","*:*", "sort",i1+" asc");
query("q","*:*", "sort",i1+" desc", "fl","*,score");
query("q","*:*", "sort","n_tl1 asc", "fl","*,score");
query("q","*:*", "sort","n_tl1 desc");
query(false, new String[] {"q","*:*", "sort",i1+" desc"});
query(false, new String[] {"q","*:*", "sort",i1+" asc"});
query(false, new String[] {"q","*:*", "sort",i1+" desc", "fl","*,score"});
query(false, new String[] {"q","*:*", "sort","n_tl1 asc", "fl","*,score"});
query(false, new String[] {"q","*:*", "sort","n_tl1 desc"});
handle.put("maxScore", SKIPVAL);
query("q","{!func}"+i1);// does not expect maxScore. So if it comes ,ignore it. JavaBinCodec.writeSolrDocumentList()
query(false, new String[] {"q","{!func}"+i1});// does not expect maxScore. So if it comes ,ignore it. JavaBinCodec.writeSolrDocumentList()
//is agnostic of request params.
handle.remove("maxScore");
query("q","{!func}"+i1, "fl","*,score"); // even scores should match exactly here
query(false, new String[] {"q","{!func}"+i1, "fl","*,score"}); // even scores should match exactly here
handle.put("highlighting", UNORDERED);
handle.put("response", UNORDERED);
handle.put("maxScore", SKIPVAL);
query("q","quick");
query("q","all","fl","id","start","0");
query("q","all","fl","foofoofoo","start","0"); // no fields in returned docs
query("q","all","fl","id","start","100");
query(false, new String[] {"q","quick"});
query(false, new String[] {"q","all","fl","id","start","0"});
query(false, new String[] {"q","all","fl","foofoofoo","start","0"}); // no fields in returned docs
query(false, new String[] {"q","all","fl","id","start","100"});
handle.put("score", SKIPVAL);
query("q","quick","fl","*,score");
query("q","all","fl","*,score","start","1");
query("q","all","fl","*,score","start","100");
query(false, new String[] {"q","quick","fl","*,score"});
query(false, new String[] {"q","all","fl","*,score","start","1"});
query(false, new String[] {"q","all","fl","*,score","start","100"});
query("q","now their fox sat had put","fl","*,score",
"hl","true","hl.fl",t1);
query(false, new String[] {"q","now their fox sat had put","fl","*,score",
"hl","true","hl.fl",t1});
query("q","now their fox sat had put","fl","foofoofoo",
"hl","true","hl.fl",t1);
query(false, new String[] {"q","now their fox sat had put","fl","foofoofoo",
"hl","true","hl.fl",t1});
query("q","matchesnothing","fl","*,score");
query(false, new String[] {"q","matchesnothing","fl","*,score"});
query("q","*:*", "rows",100, "facet","true", "facet.field",t1);
query("q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.limit",-1, "facet.sort","count");
query("q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.limit",-1, "facet.sort","count", "facet.mincount",2);
query("q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.limit",-1, "facet.sort","index");
query("q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.limit",-1, "facet.sort","index", "facet.mincount",2);
query("q","*:*", "rows",100, "facet","true", "facet.field",t1,"facet.limit",1);
query("q","*:*", "rows",100, "facet","true", "facet.query","quick", "facet.query","all", "facet.query","*:*");
query("q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.offset",1);
query("q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.mincount",2);
query(false, new Object[] {"q","*:*", "rows",100, "facet","true", "facet.field",t1});
query(false, new Object[] {"q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.limit",-1, "facet.sort","count"});
query(false, new Object[] {"q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.limit",-1, "facet.sort","count", "facet.mincount",2});
query(false, new Object[] {"q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.limit",-1, "facet.sort","index"});
query(false, new Object[] {"q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.limit",-1, "facet.sort","index", "facet.mincount",2});
query(false, new Object[] {"q","*:*", "rows",100, "facet","true", "facet.field",t1,"facet.limit",1});
query(false, new Object[] {"q","*:*", "rows",100, "facet","true", "facet.query","quick", "facet.query","all", "facet.query","*:*"});
query(false, new Object[] {"q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.offset",1});
query(false, new Object[] {"q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.mincount",2});
// test faceting multiple things at once
query("q","*:*", "rows",100, "facet","true", "facet.query","quick", "facet.query","all", "facet.query","*:*"
,"facet.field",t1);
query(false, new Object[] {"q","*:*", "rows",100, "facet","true", "facet.query","quick", "facet.query","all", "facet.query","*:*"
,"facet.field",t1});
// test filter tagging, facet exclusion, and naming (multi-select facet support)
query("q","*:*", "rows",100, "facet","true", "facet.query","{!key=myquick}quick", "facet.query","{!key=myall ex=a}all", "facet.query","*:*"
query(false, new Object[] {"q","*:*", "rows",100, "facet","true", "facet.query","{!key=myquick}quick", "facet.query","{!key=myall ex=a}all", "facet.query","*:*"
,"facet.field","{!key=mykey ex=a}"+t1
,"facet.field","{!key=other ex=b}"+t1
,"facet.field","{!key=again ex=a,b}"+t1
,"facet.field",t1
,"fq","{!tag=a}id:[1 TO 7]", "fq","{!tag=b}id:[3 TO 9]"
,"fq","{!tag=a}id:[1 TO 7]", "fq","{!tag=b}id:[3 TO 9]"}
);
query("q", "*:*", "facet", "true", "facet.field", "{!ex=t1}SubjectTerms_mfacet", "fq", "{!tag=t1}SubjectTerms_mfacet:(test 1)", "facet.limit", "10", "facet.mincount", "1");
query(false, new Object[] {"q", "*:*", "facet", "true", "facet.field", "{!ex=t1}SubjectTerms_mfacet", "fq", "{!tag=t1}SubjectTerms_mfacet:(test 1)", "facet.limit", "10", "facet.mincount", "1"});
// test field that is valid in schema but missing in all shards
query("q","*:*", "rows",100, "facet","true", "facet.field",missingField, "facet.mincount",2);
query(false, new Object[] {"q","*:*", "rows",100, "facet","true", "facet.field",missingField, "facet.mincount",2});
// test field that is valid in schema and missing in some shards
query("q","*:*", "rows",100, "facet","true", "facet.field",oddField, "facet.mincount",2);
query(false, new Object[] {"q","*:*", "rows",100, "facet","true", "facet.field",oddField, "facet.mincount",2});
query("q","*:*", "sort",i1+" desc", "stats", "true", "stats.field", i1);
query(false, new Object[] {"q","*:*", "sort",i1+" desc", "stats", "true", "stats.field", i1});
/*** TODO: the failure may come back in "exception"
try {
@ -263,9 +283,9 @@ public class BasicDistributedZkTest extends AbstractDistribZkTestBase {
// This makes it much more likely that we may not get the top facet values and hence
// we turn of that checking.
handle.put("facet_fields", SKIPVAL);
query("q","*:*", "rows",0, "facet","true", "facet.field",t1,"facet.limit",5, "facet.shard.limit",5);
query(false, new Object[] {"q","*:*", "rows",0, "facet","true", "facet.field",t1,"facet.limit",5, "facet.shard.limit",5});
// check a complex key name
query("q","*:*", "rows",0, "facet","true", "facet.field","{!key='a b/c \\' \\} foo'}"+t1,"facet.limit",5, "facet.shard.limit",5);
query(false, new Object[] {"q","*:*", "rows",0, "facet","true", "facet.field","{!key='a b/c \\' \\} foo'}"+t1,"facet.limit",5, "facet.shard.limit",5});
handle.remove("facet_fields");
@ -277,20 +297,20 @@ public class BasicDistributedZkTest extends AbstractDistribZkTestBase {
index_specific(i, id,100, i1, 107 ,t1,"oh no, a duplicate!");
}
commit();
query("q","duplicate", "hl","true", "hl.fl", t1);
query("q","fox duplicate horses", "hl","true", "hl.fl", t1);
query("q","*:*", "rows",100);
query(false, new Object[] {"q","duplicate", "hl","true", "hl.fl", t1});
query(false, new Object[] {"q","fox duplicate horses", "hl","true", "hl.fl", t1});
query(false, new Object[] {"q","*:*", "rows",100});
}
// test debugging
handle.put("explain", SKIPVAL);
handle.put("debug", UNORDERED);
handle.put("time", SKIPVAL);
query("q","now their fox sat had put","fl","*,score",CommonParams.DEBUG_QUERY, "true");
query("q", "id:[1 TO 5]", CommonParams.DEBUG_QUERY, "true");
query("q", "id:[1 TO 5]", CommonParams.DEBUG, CommonParams.TIMING);
query("q", "id:[1 TO 5]", CommonParams.DEBUG, CommonParams.RESULTS);
query("q", "id:[1 TO 5]", CommonParams.DEBUG, CommonParams.QUERY);
query(false, new Object[] {"q","now their fox sat had put","fl","*,score",CommonParams.DEBUG_QUERY, "true"});
query(false, new Object[] {"q", "id:[1 TO 5]", CommonParams.DEBUG_QUERY, "true"});
query(false, new Object[] {"q", "id:[1 TO 5]", CommonParams.DEBUG, CommonParams.TIMING});
query(false, new Object[] {"q", "id:[1 TO 5]", CommonParams.DEBUG, CommonParams.RESULTS});
query(false, new Object[] {"q", "id:[1 TO 5]", CommonParams.DEBUG, CommonParams.QUERY});
// TODO: This test currently fails because debug info is obtained only
// on shards with matches.
@ -343,9 +363,15 @@ public class BasicDistributedZkTest extends AbstractDistribZkTestBase {
SolrRequest request = new QueryRequest(params);
request.setPath("/admin/collections");
clients.get(clientIndex).request(request);
final String baseUrl = ((HttpSolrServer) clients.get(clientIndex)).getBaseURL().substring(
0,
((HttpSolrServer) clients.get(clientIndex)).getBaseURL().length()
- DEFAULT_COLLECTION.length() - 1);
createNewSolrServer("", baseUrl).request(request);
}
Set<Entry<String,List<Integer>>> collectionInfosEntrySet = collectionInfos.entrySet();
for (Entry<String,List<Integer>> entry : collectionInfosEntrySet) {
String collection = entry.getKey();
@ -357,7 +383,11 @@ public class BasicDistributedZkTest extends AbstractDistribZkTestBase {
HttpSolrServer collectionClient = new HttpSolrServer(url);
// poll for a second - it can take a moment before we are ready to serve
waitForNon404or503(collectionClient);
waitForNon403or404or503(collectionClient);
}
for (int i = 0; i < cnt; i++) {
waitForRecoveriesToFinish("awholynewcollection_" + i, zkStateReader, false);
}
List<String> collectionNameList = new ArrayList<String>();
@ -400,7 +430,12 @@ public class BasicDistributedZkTest extends AbstractDistribZkTestBase {
request.setPath("/admin/collections");
// we can use this client because we just want base url
clients.get(0).request(request);
final String baseUrl = ((HttpSolrServer) clients.get(0)).getBaseURL().substring(
0,
((HttpSolrServer) clients.get(0)).getBaseURL().length()
- DEFAULT_COLLECTION.length() - 1);
createNewSolrServer("", baseUrl).request(request);
// reloads make take a short while
boolean allTimesAreCorrect = waitForReloads(collectionName, urlToTimeBefore);
@ -413,7 +448,7 @@ public class BasicDistributedZkTest extends AbstractDistribZkTestBase {
request = new QueryRequest(params);
request.setPath("/admin/collections");
clients.get(0).request(request);
createNewSolrServer("", baseUrl).request(request);
// ensure its out of the state
checkForMissingCollection(collectionName);
@ -502,7 +537,7 @@ public class BasicDistributedZkTest extends AbstractDistribZkTestBase {
throw new RuntimeException("Could not find a live node for collection:" + collection);
}
private void waitForNon404or503(HttpSolrServer collectionClient)
private void waitForNon403or404or503(HttpSolrServer collectionClient)
throws Exception {
SolrException exp = null;
long timeoutAt = System.currentTimeMillis() + 30000;
@ -513,7 +548,7 @@ public class BasicDistributedZkTest extends AbstractDistribZkTestBase {
try {
collectionClient.query(new SolrQuery("*:*"));
} catch (SolrException e) {
if (!(e.code() == 403 || e.code() == 503)) {
if (!(e.code() == 403 || e.code() == 503 || e.code() == 404)) {
throw e;
}
exp = e;
@ -535,7 +570,6 @@ public class BasicDistributedZkTest extends AbstractDistribZkTestBase {
boolean found = false;
boolean sliceMatch = false;
while (System.currentTimeMillis() < timeoutAt) {
solrj.getZkStateReader().updateClusterState(true);
ClusterState clusterState = solrj.getZkStateReader().getClusterState();
Map<String,Map<String,Slice>> collections = clusterState
.getCollectionStates();
@ -544,35 +578,16 @@ public class BasicDistributedZkTest extends AbstractDistribZkTestBase {
// did we find expectedSlices slices/shards?
if (slices.size() == expectedSlices) {
sliceMatch = true;
found = true;
// also make sure each are active
Iterator<Entry<String,Slice>> it = slices.entrySet().iterator();
while (it.hasNext()) {
Entry<String,Slice> sliceEntry = it.next();
Map<String,Replica> sliceShards = sliceEntry.getValue()
.getReplicasMap();
Iterator<Entry<String,Replica>> shardIt = sliceShards
.entrySet().iterator();
while (shardIt.hasNext()) {
Entry<String,Replica> shardEntry = shardIt.next();
if (!shardEntry.getValue().getStr(ZkStateReader.STATE_PROP)
.equals(ZkStateReader.ACTIVE)) {
found = false;
break;
}
}
}
if (found) break;
}
found = true;
break;
}
Thread.sleep(100);
Thread.sleep(500);
}
if (!found) {
printLayout();
if (!sliceMatch) {
fail("Could not find new " + expectedSlices + " slice collection called " + collectionName);
} else {
fail("Found expected # of slices, but some nodes are not active for collection called " + collectionName);
}
}
}
@ -648,6 +663,8 @@ public class BasicDistributedZkTest extends AbstractDistribZkTestBase {
// cloud level test mainly needed just to make sure that versions and errors are propagated correctly
private void doOptimisticLockingAndUpdating() throws Exception {
printLayout();
SolrInputDocument sd = sdoc("id", 1000, "_version_", -1);
indexDoc(sd);
@ -717,10 +734,14 @@ public class BasicDistributedZkTest extends AbstractDistribZkTestBase {
}
private void testANewCollectionInOneInstanceWithManualShardAssignement() throws Exception {
System.clearProperty("numShards");
List<SolrServer> collectionClients = new ArrayList<SolrServer>();
SolrServer client = clients.get(0);
otherCollectionClients.put(oneInstanceCollection2, collectionClients);
String baseUrl = ((HttpSolrServer) client).getBaseURL();
final String baseUrl = ((HttpSolrServer) client).getBaseURL().substring(
0,
((HttpSolrServer) client).getBaseURL().length()
- DEFAULT_COLLECTION.length() - 1);
createCollection(oneInstanceCollection2, collectionClients, baseUrl, 1, "slice1");
createCollection(oneInstanceCollection2, collectionClients, baseUrl, 2, "slice2");
createCollection(oneInstanceCollection2, collectionClients, baseUrl, 3, "slice2");
@ -743,6 +764,8 @@ public class BasicDistributedZkTest extends AbstractDistribZkTestBase {
assertAllActive(oneInstanceCollection2, solrj.getZkStateReader());
printLayout();
// TODO: enable when we don't falsely get slice1...
// solrj.getZkStateReader().getLeaderUrl(oneInstanceCollection2, "slice1", 30000);
// solrj.getZkStateReader().getLeaderUrl(oneInstanceCollection2, "slice2", 30000);
@ -784,7 +807,10 @@ public class BasicDistributedZkTest extends AbstractDistribZkTestBase {
private void testSearchByCollectionName() throws SolrServerException {
SolrServer client = clients.get(0);
String baseUrl = ((HttpSolrServer) client).getBaseURL();
final String baseUrl = ((HttpSolrServer) client).getBaseURL().substring(
0,
((HttpSolrServer) client).getBaseURL().length()
- DEFAULT_COLLECTION.length() - 1);
// the cores each have different names, but if we add the collection name to the url
// we should get mapped to the right core
@ -798,7 +824,10 @@ public class BasicDistributedZkTest extends AbstractDistribZkTestBase {
List<SolrServer> collectionClients = new ArrayList<SolrServer>();
SolrServer client = clients.get(0);
otherCollectionClients.put(oneInstanceCollection , collectionClients);
String baseUrl = ((HttpSolrServer) client).getBaseURL();
final String baseUrl = ((HttpSolrServer) client).getBaseURL().substring(
0,
((HttpSolrServer) client).getBaseURL().length()
- DEFAULT_COLLECTION.length() - 1);
createCollection(oneInstanceCollection, collectionClients, baseUrl, 1);
createCollection(oneInstanceCollection, collectionClients, baseUrl, 2);
createCollection(oneInstanceCollection, collectionClients, baseUrl, 3);
@ -961,13 +990,17 @@ public class BasicDistributedZkTest extends AbstractDistribZkTestBase {
int unique = 0;
for (final SolrServer client : clients) {
unique++;
final String baseUrl = ((HttpSolrServer) client).getBaseURL()
.substring(
0,
((HttpSolrServer) client).getBaseURL().length()
- DEFAULT_COLLECTION.length() -1);
final int frozeUnique = unique;
Callable call = new Callable() {
public Object call() {
HttpSolrServer server;
try {
server = new HttpSolrServer(
((HttpSolrServer) client).getBaseURL());
server = new HttpSolrServer(baseUrl);
Create createCmd = new Create();
createCmd.setCoreName(collection);
@ -983,8 +1016,7 @@ public class BasicDistributedZkTest extends AbstractDistribZkTestBase {
}
};
collectionClients.add(createNewSolrServer(collection,
((HttpSolrServer) client).getBaseURL()));
collectionClients.add(createNewSolrServer(collection, baseUrl));
pending.add(completionService.submit(call));
while (pending != null && pending.size() > 0) {
@ -1043,6 +1075,7 @@ public class BasicDistributedZkTest extends AbstractDistribZkTestBase {
if (solrj != null) {
solrj.shutdown();
}
System.clearProperty("numShards");
System.clearProperty("zkHost");
}
}

View File

@ -373,6 +373,10 @@ public abstract class BaseDistributedSearchTestCase extends SolrTestCaseJ4 {
}
protected void query(Object... q) throws Exception {
query(true, q);
}
protected void query(boolean setDistribParams, Object[] q) throws Exception {
final ModifiableSolrParams params = new ModifiableSolrParams();
@ -385,7 +389,7 @@ public abstract class BaseDistributedSearchTestCase extends SolrTestCaseJ4 {
validateControlData(controlRsp);
params.remove("distrib");
setDistributedParams(params);
if (setDistribParams) setDistributedParams(params);
QueryResponse rsp = queryServer(params);

View File

@ -128,6 +128,7 @@ public abstract class AbstractDistribZkTestBase extends BaseDistributedSearchTes
zkStateReader.updateClusterState(true);
ClusterState clusterState = zkStateReader.getClusterState();
Map<String,Slice> slices = clusterState.getSlices(collection);
assertNotNull("Could not find collection:" + collection, slices);
for (Map.Entry<String,Slice> entry : slices.entrySet()) {
Map<String,Replica> shards = entry.getValue().getReplicasMap();
for (Map.Entry<String,Replica> shard : shards.entrySet()) {

View File

@ -288,8 +288,13 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
StringBuilder sb = new StringBuilder();
for (int i = 1; i <= numJettys; i++) {
if (sb.length() > 0) sb.append(',');
JettySolrRunner j = createJetty(new File(getSolrHome()), testDir + "/jetty"
+ this.jettyIntCntr.incrementAndGet(), null, "solrconfig.xml", null);
int cnt = this.jettyIntCntr.incrementAndGet();
File jettyDir = new File(TEMP_DIR,
"solrtest-" + "jetty" + cnt + "-" + System.currentTimeMillis());
jettyDir.mkdirs();
org.apache.commons.io.FileUtils.copyDirectory(new File(getSolrHome()), jettyDir);
JettySolrRunner j = createJetty(jettyDir, testDir + "/jetty"
+ cnt, null, "solrconfig.xml", null);
jettys.add(j);
SolrServer client = createNewSolrServer(j.getLocalPort());
clients.add(client);