SOLR-3920: Fix server list caching in CloudSolrServer when using more than one collection list with the same instance.

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1402360 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Mark Robert Miller 2012-10-26 00:30:03 +00:00
parent 8ea018af63
commit 1233f3611f
3 changed files with 90 additions and 55 deletions

View File

@ -104,6 +104,9 @@ Bug Fixes
* SOLR-3981: Fixed bug that resulted in document boosts being compounded in
<copyField/> destination fields. (hossman)
* SOLR-3920: Fix server list caching in CloudSolrServer when using more than one
collection list with the same instance. (Grzegorz Sobczyk, Mark Miller)
Other Changes
----------------------

View File

@ -63,11 +63,12 @@ public class CloudSolrServer extends SolrServer {
private HttpClient myClient;
Random rand = new Random();
private Object cachLock = new Object();
// since the state shouldn't change often, should be very cheap reads
private volatile List<String> urlList;
private volatile List<String> leaderUrlList;
private volatile List<String> replicasList;
private Map<String,List<String>> urlLists = new HashMap<String,List<String>>();
private Map<String,List<String>> leaderUrlLists = new HashMap<String,List<String>>();
private Map<String,List<String>> replicasLists = new HashMap<String,List<String>>();
private volatile int lastClusterStateHashCode;
@ -201,54 +202,67 @@ public class CloudSolrServer extends SolrServer {
Set<String> liveNodes = clusterState.getLiveNodes();
if (sendToLeaders && leaderUrlList == null || !sendToLeaders && urlList == null || clusterState.hashCode() != this.lastClusterStateHashCode) {
// build a map of unique nodes
// TODO: allow filtering by group, role, etc
Map<String,ZkNodeProps> nodes = new HashMap<String,ZkNodeProps>();
List<String> urlList = new ArrayList<String>();
for (Slice slice : slices.values()) {
for (ZkNodeProps nodeProps : slice.getReplicasMap().values()) {
ZkCoreNodeProps coreNodeProps = new ZkCoreNodeProps(nodeProps);
String node = coreNodeProps.getNodeName();
if (!liveNodes.contains(coreNodeProps.getNodeName())
|| !coreNodeProps.getState().equals(ZkStateReader.ACTIVE)) continue;
if (nodes.put(node, nodeProps) == null) {
if (!sendToLeaders || (sendToLeaders && coreNodeProps.isLeader())) {
String url = coreNodeProps.getCoreUrl();
urlList.add(url);
} else if (sendToLeaders) {
String url = coreNodeProps.getCoreUrl();
replicas.add(url);
List<String> theUrlList;
synchronized (cachLock) {
System.out.println("work with collection:" + collection);
List<String> leaderUrlList = leaderUrlLists.get(collection);
List<String> urlList = urlLists.get(collection);
List<String> replicasList = replicasLists.get(collection);
if ((sendToLeaders && leaderUrlList == null) || (!sendToLeaders
&& urlList == null)
|| clusterState.hashCode() != this.lastClusterStateHashCode) {
System.out.println("build a new map for " + collection);
// build a map of unique nodes
// TODO: allow filtering by group, role, etc
Map<String,ZkNodeProps> nodes = new HashMap<String,ZkNodeProps>();
List<String> urlList2 = new ArrayList<String>();
for (Slice slice : slices.values()) {
for (ZkNodeProps nodeProps : slice.getReplicasMap().values()) {
ZkCoreNodeProps coreNodeProps = new ZkCoreNodeProps(nodeProps);
String node = coreNodeProps.getNodeName();
if (!liveNodes.contains(coreNodeProps.getNodeName())
|| !coreNodeProps.getState().equals(ZkStateReader.ACTIVE)) continue;
if (nodes.put(node, nodeProps) == null) {
if (!sendToLeaders || (sendToLeaders && coreNodeProps.isLeader())) {
String url = coreNodeProps.getCoreUrl();
urlList2.add(url);
} else if (sendToLeaders) {
String url = coreNodeProps.getCoreUrl();
replicas.add(url);
}
}
}
}
if (sendToLeaders) {
this.leaderUrlLists.put(collection, urlList2);
leaderUrlList = urlList2;
this.replicasLists.put(collection, replicas);
replicasList = replicas;
} else {
this.urlLists.put(collection, urlList2);
urlList = urlList2;
}
this.lastClusterStateHashCode = clusterState.hashCode();
}
if (sendToLeaders) {
this.leaderUrlList = urlList;
this.replicasList = replicas;
theUrlList = new ArrayList<String>(leaderUrlList.size());
theUrlList.addAll(leaderUrlList);
} else {
this.urlList = urlList;
theUrlList = new ArrayList<String>(urlList.size());
theUrlList.addAll(urlList);
}
Collections.shuffle(theUrlList, rand);
if (sendToLeaders) {
ArrayList<String> theReplicas = new ArrayList<String>(
replicasList.size());
theReplicas.addAll(replicasList);
Collections.shuffle(theReplicas, rand);
// System.out.println("leaders:" + theUrlList);
// System.out.println("replicas:" + theReplicas);
theUrlList.addAll(theReplicas);
}
this.lastClusterStateHashCode = clusterState.hashCode();
}
List<String> theUrlList;
if (sendToLeaders) {
theUrlList = new ArrayList<String>(leaderUrlList.size());
theUrlList.addAll(leaderUrlList);
} else {
theUrlList = new ArrayList<String>(urlList.size());
theUrlList.addAll(urlList);
}
Collections.shuffle(theUrlList, rand);
if (sendToLeaders) {
ArrayList<String> theReplicas = new ArrayList<String>(replicasList.size());
theReplicas.addAll(replicasList);
Collections.shuffle(theReplicas, rand);
// System.out.println("leaders:" + theUrlList);
// System.out.println("replicas:" + theReplicas);
theUrlList.addAll(theReplicas);
}
// System.out.println("########################## MAKING REQUEST TO " + theUrlList);
@ -276,15 +290,19 @@ public class CloudSolrServer extends SolrServer {
return lbServer;
}
List<String> getUrlList() {
return urlList;
// for tests
Map<String,List<String>> getUrlLists() {
return urlLists;
}
List<String> getLeaderUrlList() {
return leaderUrlList;
//for tests
Map<String,List<String>> getLeaderUrlLists() {
return leaderUrlLists;
}
List<String> getReplicasList() {
return replicasList;
//for tests
Map<String,List<String>> getReplicasLists() {
return replicasLists;
}
}

View File

@ -25,6 +25,7 @@ import java.util.Set;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.solr.cloud.AbstractFullDistribZkTestBase;
import org.apache.solr.cloud.AbstractZkTestCase;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.util.ExternalPaths;
import org.junit.After;
import org.junit.AfterClass;
@ -85,6 +86,7 @@ public class CloudSolrServerTest extends AbstractFullDistribZkTestBase {
@Override
public void doTest() throws Exception {
assertNotNull(cloudClient);
handle.clear();
handle.put("QTime", SKIPVAL);
@ -99,9 +101,9 @@ public class CloudSolrServerTest extends AbstractFullDistribZkTestBase {
// compare leaders list
CloudJettyRunner shard1Leader = shardToLeaderJetty.get("shard1");
CloudJettyRunner shard2Leader = shardToLeaderJetty.get("shard2");
assertEquals(2, cloudClient.getLeaderUrlList().size());
assertEquals(2, cloudClient.getLeaderUrlLists().get("collection1").size());
HashSet<String> leaderUrlSet = new HashSet<String>();
leaderUrlSet.addAll(cloudClient.getLeaderUrlList());
leaderUrlSet.addAll(cloudClient.getLeaderUrlLists().get("collection1"));
assertTrue("fail check for leader:" + shard1Leader.url + " in "
+ leaderUrlSet, leaderUrlSet.contains(shard1Leader.url + "/"));
assertTrue("fail check for leader:" + shard2Leader.url + " in "
@ -120,9 +122,9 @@ public class CloudSolrServerTest extends AbstractFullDistribZkTestBase {
replicas.remove(shard1Leader.url);
replicas.remove(shard2Leader.url);
assertEquals(replicas.size(), cloudClient.getReplicasList().size());
assertEquals(replicas.size(), cloudClient.getReplicasLists().get("collection1").size());
for (String url : cloudClient.getReplicasList()) {
for (String url : cloudClient.getReplicasLists().get("collection1")) {
assertTrue("fail check for replica:" + url + " in " + replicas,
replicas.contains(stripTrailingSlash(url)));
}
@ -135,5 +137,17 @@ public class CloudSolrServerTest extends AbstractFullDistribZkTestBase {
}
return url;
}
protected void indexr(Object... fields) throws Exception {
SolrInputDocument doc = getDoc(fields);
indexDoc(doc);
}
SolrInputDocument getDoc(Object... fields) throws Exception {
SolrInputDocument doc = new SolrInputDocument();
addFields(doc, fields);
return doc;
}
}