mirror of https://github.com/apache/lucene.git
SOLR-4140: Allow access to the collections API through CloudSolrServer without referencing an existing collection.
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1421071 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
930354533e
commit
38d16dc7b7
|
@ -143,6 +143,9 @@ New Features
|
|||
* SOLR-4166: LBHttpSolrServer ignores ResponseParser passed in constructor.
|
||||
(Steve Molloy via Mark Miller)
|
||||
|
||||
* SOLR-4140: Allow access to the collections API through CloudSolrServer
|
||||
without referencing an existing collection. (Per Steffensen via Mark Miller)
|
||||
|
||||
Optimizations
|
||||
----------------------
|
||||
|
||||
|
|
|
@ -748,8 +748,37 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
for (int i = 0; i < cnt; i++) {
|
||||
int numShards = _TestUtil.nextInt(random(), 0, shardCount) + 1;
|
||||
int replicationFactor = _TestUtil.nextInt(random(), 0, 3) + 2;
|
||||
int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrServer().getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
|
||||
createCollection(collectionInfos, i, numShards, replicationFactor, maxShardsPerNode);
|
||||
int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrServer()
|
||||
.getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
|
||||
|
||||
|
||||
CloudSolrServer client = null;
|
||||
try {
|
||||
if (i == 0) {
|
||||
// Test if we can create a collection through CloudSolrServer where
|
||||
// you havnt set default-collection
|
||||
// This is nice because you want to be able to create you first
|
||||
// collection using CloudSolrServer, and in such case there is
|
||||
// nothing reasonable to set as default-collection
|
||||
client = createCloudClient(null);
|
||||
} else if (i == 1) {
|
||||
// Test if we can create a collection through CloudSolrServer where
|
||||
// you have set default-collection to a non-existing collection
|
||||
// This is nice because you want to be able to create you first
|
||||
// collection using CloudSolrServer, and in such case there is
|
||||
// nothing reasonable to set as default-collection, but you might want
|
||||
// to use the same CloudSolrServer throughout the entire
|
||||
// lifetime of your client-application, so it is nice to be able to
|
||||
// set a default-collection on this CloudSolrServer once and for all
|
||||
// and use this CloudSolrServer to create the collection
|
||||
client = createCloudClient("awholynewcollection_" + i);
|
||||
}
|
||||
|
||||
createCollection(collectionInfos, "awholynewcollection_" + i,
|
||||
numShards, replicationFactor, maxShardsPerNode, client);
|
||||
} finally {
|
||||
if (client != null) client.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
Set<Entry<String,List<Integer>>> collectionInfosEntrySet = collectionInfos.entrySet();
|
||||
|
@ -882,7 +911,12 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
int replicationFactor = 2;
|
||||
int maxShardsPerNode = 1;
|
||||
collectionInfos = new HashMap<String,List<Integer>>();
|
||||
createCollection(collectionInfos, cnt, numShards, replicationFactor, maxShardsPerNode);
|
||||
CloudSolrServer client = createCloudClient("awholynewcollection_" + cnt);
|
||||
try {
|
||||
createCollection(collectionInfos, "awholynewcollection_" + cnt, numShards, replicationFactor, maxShardsPerNode, client);
|
||||
} finally {
|
||||
client.shutdown();
|
||||
}
|
||||
|
||||
// TODO: REMOVE THE SLEEP IN THE METHOD CALL WHEN WE HAVE COLLECTION API
|
||||
// RESPONSES
|
||||
|
@ -893,30 +927,34 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
|
||||
|
||||
protected void createCollection(Map<String,List<Integer>> collectionInfos,
|
||||
int i, int numShards, int numReplica, int maxShardsPerNode) throws SolrServerException, IOException {
|
||||
String collectionName, int numShards, int numReplicas, int maxShardsPerNode, SolrServer client) throws SolrServerException, IOException {
|
||||
ModifiableSolrParams params = new ModifiableSolrParams();
|
||||
params.set("action", CollectionAction.CREATE.toString());
|
||||
|
||||
params.set(OverseerCollectionProcessor.NUM_SLICES, numShards);
|
||||
params.set(OverseerCollectionProcessor.REPLICATION_FACTOR, numReplica);
|
||||
params.set(OverseerCollectionProcessor.REPLICATION_FACTOR, numReplicas);
|
||||
params.set(OverseerCollectionProcessor.MAX_SHARDS_PER_NODE, maxShardsPerNode);
|
||||
String collectionName = "awholynewcollection_" + i;
|
||||
|
||||
int clientIndex = random().nextInt(2);
|
||||
List<Integer> list = new ArrayList<Integer>();
|
||||
list.add(numShards);
|
||||
list.add(numReplica);
|
||||
list.add(numReplicas);
|
||||
list.add(maxShardsPerNode);
|
||||
collectionInfos.put(collectionName, list);
|
||||
params.set("name", collectionName);
|
||||
SolrRequest request = new QueryRequest(params);
|
||||
request.setPath("/admin/collections");
|
||||
|
||||
final String baseUrl = ((HttpSolrServer) clients.get(clientIndex)).getBaseURL().substring(
|
||||
0,
|
||||
((HttpSolrServer) clients.get(clientIndex)).getBaseURL().length()
|
||||
- DEFAULT_COLLECTION.length() - 1);
|
||||
|
||||
createNewSolrServer("", baseUrl).request(request);
|
||||
if (client == null) {
|
||||
final String baseUrl = ((HttpSolrServer) clients.get(clientIndex)).getBaseURL().substring(
|
||||
0,
|
||||
((HttpSolrServer) clients.get(clientIndex)).getBaseURL().length()
|
||||
- DEFAULT_COLLECTION.length() - 1);
|
||||
|
||||
createNewSolrServer("", baseUrl).request(request);
|
||||
} else {
|
||||
client.request(request);
|
||||
}
|
||||
}
|
||||
|
||||
private boolean waitForReloads(String collectionName, Map<String,Long> urlToTimeBefore) throws SolrServerException, IOException {
|
||||
|
|
|
@ -163,9 +163,10 @@ public class CloudSolrServer extends SolrServer {
|
|||
}
|
||||
|
||||
@Override
|
||||
public NamedList<Object> request(SolrRequest request) throws SolrServerException, IOException {
|
||||
public NamedList<Object> request(SolrRequest request)
|
||||
throws SolrServerException, IOException {
|
||||
connect();
|
||||
|
||||
|
||||
// TODO: if you can hash here, you could favor the shard leader
|
||||
|
||||
ClusterState clusterState = zkStateReader.getClusterState();
|
||||
|
@ -176,95 +177,111 @@ public class CloudSolrServer extends SolrServer {
|
|||
sendToLeaders = true;
|
||||
replicas = new ArrayList<String>();
|
||||
}
|
||||
|
||||
|
||||
SolrParams reqParams = request.getParams();
|
||||
if (reqParams == null) {
|
||||
reqParams = new ModifiableSolrParams();
|
||||
}
|
||||
String collection = reqParams.get("collection", defaultCollection);
|
||||
|
||||
if (collection == null) {
|
||||
throw new SolrServerException("No collection param specified on request and no default collection has been set.");
|
||||
}
|
||||
|
||||
// Extract each comma separated collection name and store in a List.
|
||||
List<String> collectionList = StrUtils.splitSmart(collection, ",", true);
|
||||
|
||||
// TODO: not a big deal because of the caching, but we could avoid looking at every shard
|
||||
// when getting leaders if we tweaked some things
|
||||
|
||||
// Retrieve slices from the cloud state and, for each collection specified,
|
||||
// add it to the Map of slices.
|
||||
Map<String,Slice> slices = new HashMap<String,Slice>();
|
||||
for (String collectionName : collectionList) {
|
||||
ClientUtils.addSlices(slices, collectionName, clusterState.getSlices(collectionName), true);
|
||||
}
|
||||
|
||||
Set<String> liveNodes = clusterState.getLiveNodes();
|
||||
|
||||
List<String> theUrlList;
|
||||
synchronized (cachLock) {
|
||||
List<String> leaderUrlList = leaderUrlLists.get(collection);
|
||||
List<String> urlList = urlLists.get(collection);
|
||||
List<String> replicasList = replicasLists.get(collection);
|
||||
|
||||
if ((sendToLeaders && leaderUrlList == null) || (!sendToLeaders
|
||||
&& urlList == null)
|
||||
|| clusterState.hashCode() != this.lastClusterStateHashCode) {
|
||||
// build a map of unique nodes
|
||||
// TODO: allow filtering by group, role, etc
|
||||
Map<String,ZkNodeProps> nodes = new HashMap<String,ZkNodeProps>();
|
||||
List<String> urlList2 = new ArrayList<String>();
|
||||
for (Slice slice : slices.values()) {
|
||||
for (ZkNodeProps nodeProps : slice.getReplicasMap().values()) {
|
||||
ZkCoreNodeProps coreNodeProps = new ZkCoreNodeProps(nodeProps);
|
||||
String node = coreNodeProps.getNodeName();
|
||||
if (!liveNodes.contains(coreNodeProps.getNodeName())
|
||||
|| !coreNodeProps.getState().equals(ZkStateReader.ACTIVE)) continue;
|
||||
if (nodes.put(node, nodeProps) == null) {
|
||||
if (!sendToLeaders || (sendToLeaders && coreNodeProps.isLeader())) {
|
||||
String url = coreNodeProps.getCoreUrl();
|
||||
urlList2.add(url);
|
||||
} else if (sendToLeaders) {
|
||||
String url = coreNodeProps.getCoreUrl();
|
||||
replicas.add(url);
|
||||
List<String> theUrlList = new ArrayList<String>();
|
||||
if (request.getPath().equals("/admin/collections")) {
|
||||
Set<String> liveNodes = clusterState.getLiveNodes();
|
||||
for (String liveNode : liveNodes) {
|
||||
int splitPointBetweenHostPortAndContext = liveNode.indexOf("_");
|
||||
theUrlList.add("http://"
|
||||
+ liveNode.substring(0, splitPointBetweenHostPortAndContext) + "/"
|
||||
+ liveNode.substring(splitPointBetweenHostPortAndContext + 1));
|
||||
}
|
||||
} else {
|
||||
String collection = reqParams.get("collection", defaultCollection);
|
||||
|
||||
if (collection == null) {
|
||||
throw new SolrServerException(
|
||||
"No collection param specified on request and no default collection has been set.");
|
||||
}
|
||||
|
||||
// Extract each comma separated collection name and store in a List.
|
||||
List<String> collectionList = StrUtils.splitSmart(collection, ",", true);
|
||||
|
||||
// TODO: not a big deal because of the caching, but we could avoid looking
|
||||
// at every shard
|
||||
// when getting leaders if we tweaked some things
|
||||
|
||||
// Retrieve slices from the cloud state and, for each collection
|
||||
// specified,
|
||||
// add it to the Map of slices.
|
||||
Map<String,Slice> slices = new HashMap<String,Slice>();
|
||||
for (String collectionName : collectionList) {
|
||||
ClientUtils.addSlices(slices, collectionName,
|
||||
clusterState.getSlices(collectionName), true);
|
||||
}
|
||||
Set<String> liveNodes = clusterState.getLiveNodes();
|
||||
|
||||
synchronized (cachLock) {
|
||||
List<String> leaderUrlList = leaderUrlLists.get(collection);
|
||||
List<String> urlList = urlLists.get(collection);
|
||||
List<String> replicasList = replicasLists.get(collection);
|
||||
|
||||
if ((sendToLeaders && leaderUrlList == null)
|
||||
|| (!sendToLeaders && urlList == null)
|
||||
|| clusterState.hashCode() != this.lastClusterStateHashCode) {
|
||||
// build a map of unique nodes
|
||||
// TODO: allow filtering by group, role, etc
|
||||
Map<String,ZkNodeProps> nodes = new HashMap<String,ZkNodeProps>();
|
||||
List<String> urlList2 = new ArrayList<String>();
|
||||
for (Slice slice : slices.values()) {
|
||||
for (ZkNodeProps nodeProps : slice.getReplicasMap().values()) {
|
||||
ZkCoreNodeProps coreNodeProps = new ZkCoreNodeProps(nodeProps);
|
||||
String node = coreNodeProps.getNodeName();
|
||||
if (!liveNodes.contains(coreNodeProps.getNodeName())
|
||||
|| !coreNodeProps.getState().equals(ZkStateReader.ACTIVE)) continue;
|
||||
if (nodes.put(node, nodeProps) == null) {
|
||||
if (!sendToLeaders
|
||||
|| (sendToLeaders && coreNodeProps.isLeader())) {
|
||||
String url = coreNodeProps.getCoreUrl();
|
||||
urlList2.add(url);
|
||||
} else if (sendToLeaders) {
|
||||
String url = coreNodeProps.getCoreUrl();
|
||||
replicas.add(url);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (sendToLeaders) {
|
||||
this.leaderUrlLists.put(collection, urlList2);
|
||||
leaderUrlList = urlList2;
|
||||
this.replicasLists.put(collection, replicas);
|
||||
replicasList = replicas;
|
||||
} else {
|
||||
this.urlLists.put(collection, urlList2);
|
||||
urlList = urlList2;
|
||||
}
|
||||
this.lastClusterStateHashCode = clusterState.hashCode();
|
||||
}
|
||||
|
||||
if (sendToLeaders) {
|
||||
this.leaderUrlLists.put(collection, urlList2);
|
||||
leaderUrlList = urlList2;
|
||||
this.replicasLists.put(collection, replicas);
|
||||
replicasList = replicas;
|
||||
theUrlList = new ArrayList<String>(leaderUrlList.size());
|
||||
theUrlList.addAll(leaderUrlList);
|
||||
} else {
|
||||
this.urlLists.put(collection, urlList2);
|
||||
urlList = urlList2;
|
||||
theUrlList = new ArrayList<String>(urlList.size());
|
||||
theUrlList.addAll(urlList);
|
||||
}
|
||||
Collections.shuffle(theUrlList, rand);
|
||||
if (sendToLeaders) {
|
||||
ArrayList<String> theReplicas = new ArrayList<String>(
|
||||
replicasList.size());
|
||||
theReplicas.addAll(replicasList);
|
||||
Collections.shuffle(theReplicas, rand);
|
||||
// System.out.println("leaders:" + theUrlList);
|
||||
// System.out.println("replicas:" + theReplicas);
|
||||
theUrlList.addAll(theReplicas);
|
||||
}
|
||||
this.lastClusterStateHashCode = clusterState.hashCode();
|
||||
}
|
||||
|
||||
if (sendToLeaders) {
|
||||
theUrlList = new ArrayList<String>(leaderUrlList.size());
|
||||
theUrlList.addAll(leaderUrlList);
|
||||
} else {
|
||||
theUrlList = new ArrayList<String>(urlList.size());
|
||||
theUrlList.addAll(urlList);
|
||||
}
|
||||
Collections.shuffle(theUrlList, rand);
|
||||
if (sendToLeaders) {
|
||||
ArrayList<String> theReplicas = new ArrayList<String>(
|
||||
replicasList.size());
|
||||
theReplicas.addAll(replicasList);
|
||||
Collections.shuffle(theReplicas, rand);
|
||||
// System.out.println("leaders:" + theUrlList);
|
||||
// System.out.println("replicas:" + theReplicas);
|
||||
theUrlList.addAll(theReplicas);
|
||||
}
|
||||
}
|
||||
|
||||
// System.out.println("########################## MAKING REQUEST TO " + theUrlList);
|
||||
|
||||
|
||||
// System.out.println("########################## MAKING REQUEST TO " +
|
||||
// theUrlList);
|
||||
|
||||
LBHttpSolrServer.Req req = new LBHttpSolrServer.Req(request, theUrlList);
|
||||
LBHttpSolrServer.Rsp rsp = lbServer.request(req);
|
||||
return rsp.getResponse();
|
||||
|
|
|
@ -199,13 +199,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
|
|||
assert(cloudInit == false);
|
||||
cloudInit = true;
|
||||
try {
|
||||
CloudSolrServer server = new CloudSolrServer(zkServer.getZkAddress());
|
||||
server.setDefaultCollection(DEFAULT_COLLECTION);
|
||||
server.getLbServer().getHttpClient().getParams()
|
||||
.setParameter(CoreConnectionPNames.CONNECTION_TIMEOUT, 5000);
|
||||
server.getLbServer().getHttpClient().getParams()
|
||||
.setParameter(CoreConnectionPNames.SO_TIMEOUT, 20000);
|
||||
cloudClient = server;
|
||||
cloudClient = createCloudClient(DEFAULT_COLLECTION);
|
||||
|
||||
cloudClient.connect();
|
||||
} catch (MalformedURLException e) {
|
||||
|
@ -217,7 +211,17 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
|
|||
chaosMonkey = new ChaosMonkey(zkServer, zkStateReader, DEFAULT_COLLECTION,
|
||||
shardToJetty, shardToLeaderJetty);
|
||||
}
|
||||
|
||||
|
||||
protected CloudSolrServer createCloudClient(String defaultCollection)
|
||||
throws MalformedURLException {
|
||||
CloudSolrServer server = new CloudSolrServer(zkServer.getZkAddress());
|
||||
if (defaultCollection != null) server.setDefaultCollection(defaultCollection);
|
||||
server.getLbServer().getHttpClient().getParams()
|
||||
.setParameter(CoreConnectionPNames.CONNECTION_TIMEOUT, 5000);
|
||||
server.getLbServer().getHttpClient().getParams()
|
||||
.setParameter(CoreConnectionPNames.SO_TIMEOUT, 20000);
|
||||
return server;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void createServers(int numServers) throws Exception {
|
||||
|
|
Loading…
Reference in New Issue