SOLR-10278: refactored suggester API

This commit is contained in:
Noble Paul 2017-04-18 17:44:47 +09:30
parent 97e2607a69
commit bb48de8b00
4 changed files with 86 additions and 39 deletions

View File

@ -49,7 +49,6 @@ import org.apache.solr.cloud.overseer.OverseerAction;
import org.apache.solr.cloud.rule.ReplicaAssigner;
import org.apache.solr.cloud.rule.ReplicaAssigner.Position;
import org.apache.solr.common.params.CommonParams;
import org.apache.solr.recipe.Policy;
import org.apache.solr.cloud.rule.Rule;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
@ -77,6 +76,7 @@ import org.apache.solr.handler.component.ShardHandler;
import org.apache.solr.handler.component.ShardHandlerFactory;
import org.apache.solr.handler.component.ShardRequest;
import org.apache.solr.handler.component.ShardResponse;
import org.apache.solr.recipe.PolicyHelper;
import org.apache.solr.util.DefaultSolrThreadFactory;
import org.apache.solr.util.RTimer;
import org.apache.solr.util.TimeOut;
@ -727,7 +727,7 @@ public class OverseerCollectionMessageHandler implements OverseerMessageHandler
.withClusterStateProvider(new ZkClientClusterStateProvider(zkStateReader))
.build()) {
ClientDataProvider clientDataProvider = new ClientDataProvider(csc);
Map<String, List<String>> locations = Policy.getReplicaLocations(collName,
Map<String, List<String>> locations = PolicyHelper.getReplicaLocations(collName,
zkStateReader.getZkClient().getJson(SOLR_AUTOSCALING_CONF_PATH, true),
policyName, clientDataProvider, shardNames, repFactor);
Map<Position, String> result = new HashMap<>();

View File

@ -281,45 +281,13 @@ public class Policy implements MapWriter {
}
enum Hint {
public enum Hint {
COLL, SHARD, SRC_NODE, TARGET_NODE
}
}
public static Map<String, List<String>> getReplicaLocations(String collName, Map<String, Object> autoScalingJson,
String policyName, ClusterDataProvider cdp,
List<String> shardNames,
int repFactor) {
Map<String, List<String>> positionMapping = new HashMap<>();
for (String shardName : shardNames) positionMapping.put(shardName, new ArrayList<>(repFactor));
Map policyJson = (Map) Utils.getObjectByPath(autoScalingJson, false, asList("policies", policyName));
if (policyJson == null) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "no such policy exists " + policyName);
}
Map defaultPolicy = (Map) Utils.getObjectByPath(autoScalingJson, false, asList("policies", "default"));
Map<String, Object> merged = Policy.mergePolicies(collName, policyJson, defaultPolicy);
Policy policy = new Policy(merged);
Policy.Session session = policy.createSession(cdp);
for (String shardName : shardNames) {
for (int i = 0; i < repFactor; i++) {
Policy.Suggester suggester = session.getSuggester(ADDREPLICA)
.hint(COLL, collName)
.hint(SHARD, shardName);
Map op = suggester.getOperation();
if (op == null) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No node can satisfy the rules "+ Utils.toJSONString(policy));
}
session = suggester.getSession();
positionMapping.get(shardName).add((String) op.get(CoreAdminParams.NODE));
}
}
return positionMapping;
}
public static Map<String, Object> mergePolicies(String coll,
Map<String, Object> collPolicy,
Map<String, Object> defaultPolicy) {

View File

@ -0,0 +1,74 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.recipe;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.params.CoreAdminParams;
import org.apache.solr.common.util.Utils;
import static java.util.Arrays.asList;
import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
import static org.apache.solr.recipe.Policy.Suggester.Hint.COLL;
import static org.apache.solr.recipe.Policy.Suggester.Hint.SHARD;
public class PolicyHelper {
public static Map<String, List<String>> getReplicaLocations(String collName, Map<String, Object> autoScalingJson,
String policyName, ClusterDataProvider cdp,
List<String> shardNames,
int repFactor) {
Map<String, List<String>> positionMapping = new HashMap<>();
for (String shardName : shardNames) positionMapping.put(shardName, new ArrayList<>(repFactor));
Map policyJson = (Map) Utils.getObjectByPath(autoScalingJson, false, asList("policies", policyName));
if (policyJson == null) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "no such policy exists " + policyName);
}
Map defaultPolicy = (Map) Utils.getObjectByPath(autoScalingJson, false, asList("policies", "default"));
Map<String, Object> merged = Policy.mergePolicies(collName, policyJson, defaultPolicy);
Policy policy = new Policy(merged);
Policy.Session session = policy.createSession(cdp);
for (String shardName : shardNames) {
for (int i = 0; i < repFactor; i++) {
Policy.Suggester suggester = session.getSuggester(ADDREPLICA)
.hint(COLL, collName)
.hint(SHARD, shardName);
Map op = suggester.getOperation();
if (op == null) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No node can satisfy the rules "+ Utils.toJSONString(policy));
}
session = suggester.getSession();
positionMapping.get(shardName).add((String) op.get(CoreAdminParams.NODE));
}
}
return positionMapping;
}
public List<Map> addNode(Map<String, Object> autoScalingJson, String node, ClusterDataProvider cdp){
return null;
}
}

View File

@ -171,20 +171,19 @@ public class TestPolicy extends SolrTestCaseJ4 {
}
/* public void testOtherTag(){
public void testOtherTag(){
String rules = "{" +
"conditions:[" +
"{nodeRole:'!overseer', strict:false}," +
"{replica:'<1',node:node3}," +
"{replica:'<2',node:'#ANY', shard:'#EACH'}," +
"{replica:<3,shard:'#EACH', rack:'#ANY' }" +
"{replica:'<3',shard:'#EACH', rack:'#ANY' }" +
"]," +
" preferences:[" +
"{minimize:cores , precision:2}," +
"{maximize:freedisk, precision:50}, " +
"{minimize:heap, precision:1000}]}";
Map<String, Map> nodeValues = (Map<String, Map>) Utils.fromJSONString("{" +
"node1:{cores:12, freedisk: 334, heap:10480, rack: rack4}," +
"node2:{cores:4, freedisk: 749, heap:6873, rack: rack3}," +
@ -194,7 +193,13 @@ public class TestPolicy extends SolrTestCaseJ4 {
Policy policy = new Policy((Map<String, Object>) Utils.fromJSONString(rules));
Policy.Session session = policy.createSession(getClusterDataProvider(nodeValues, clusterState));
}*/
Map op = session
.getSuggester(ADDREPLICA)
.hint(Hint.COLL, "newColl")
.hint(Hint.SHARD, "s1").getOperation();
assertNotNull(op);
}
private ClusterDataProvider getClusterDataProvider(final Map<String, Map> nodeValues, String clusterState) {
return new ClusterDataProvider() {