SOLR-10278: fix errors in move replica suggester

This commit is contained in:
Noble Paul 2017-05-02 20:21:37 +09:30
parent 2818ee91dd
commit 56859741c2
6 changed files with 95 additions and 15 deletions

View File

@ -35,6 +35,7 @@ import org.apache.solr.client.solrj.request.GenericSolrRequest;
import org.apache.solr.client.solrj.response.SimpleSolrResponse;
import org.apache.solr.cloud.autoscaling.ClusterDataProvider;
import org.apache.solr.cloud.autoscaling.Policy.ReplicaInfo;
import org.apache.solr.common.MapWriter;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.DocCollection;
@ -55,12 +56,13 @@ import org.slf4j.LoggerFactory;
/**Class that implements {@link ClusterStateProvider} accepting a SolrClient
*
*/
public class SolrClientDataProvider implements ClusterDataProvider {
public class SolrClientDataProvider implements ClusterDataProvider, MapWriter {
private final CloudSolrClient solrClient;
private Set<String> liveNodes;
private Map<String,Object> snitchSession = new HashMap<>();
private final Map<String, Map<String, Map<String, List<ReplicaInfo>>>> data = new HashMap<>();
private Map<String,Map> nodeVsTags = new HashMap<>();
public SolrClientDataProvider(CloudSolrClient solrClient) {
this.solrClient = solrClient;
@ -94,6 +96,7 @@ public class SolrClientDataProvider implements ClusterDataProvider {
AutoScalingSnitch snitch = new AutoScalingSnitch();
ClientSnitchCtx ctx = new ClientSnitchCtx(null, node, snitchSession, solrClient);
snitch.getRemoteInfo(node, new HashSet<>(tags), ctx);
nodeVsTags.put(node, ctx.getTags());
return ctx.getTags();
}
@ -202,4 +205,12 @@ public class SolrClientDataProvider implements ClusterDataProvider {
}
}
@Override
public void writeMap(EntryWriter ew) throws IOException {
ew.put("liveNodes", liveNodes);
ew.put("replicaInfo", Utils.getDeepCopy(data, 5));
ew.put("ndeValues", nodeVsTags);
}
}

View File

@ -189,7 +189,8 @@ public class Clause implements MapWriter, Comparable<Clause> {
} else {
if (!tag.isPass(row)) result.set(TestStatus.FAIL);
}
if (result.get() == FAIL) row.violations.add(this);
if (result.get() == FAIL)
row.violations.add(this);
return result.get();
}

View File

@ -44,17 +44,20 @@ public class MoveReplicaSuggester extends Suggester {
Row fromRow = fromReplica.second();
String coll = fromReplica.first().collection;
String shard = fromReplica.first().shard;
Pair<Row, Policy.ReplicaInfo> tmpRow = fromRow.removeReplica(coll, shard);
if (tmpRow.first() == null) {
Pair<Row, Policy.ReplicaInfo> pair = fromRow.removeReplica(coll, shard);
Row tmpRow = pair.first();
if (tmpRow == null) {
//no such replica available
continue;
}
tmpRow.violations.clear();
for (Clause clause : session.expandedClauses) {
if (strict || clause.strict) clause.test(tmpRow.first());
if (strict || clause.strict) {
clause.test(tmpRow);
}
}
int i = getMatrix().indexOf(fromRow);
if (tmpRow.first().violations.isEmpty()) {
if (tmpRow.violations.isEmpty()) {
for (int j = getMatrix().size() - 1; j > i; i--) {
Row targetRow = getMatrix().get(j);
if (!isAllowed(targetRow.node, Hint.TARGET_NODE)) continue;
@ -70,7 +73,7 @@ public class MoveReplicaSuggester extends Suggester {
COLLECTION_PROP, coll,
SHARD_ID_PROP, shard,
NODE, fromRow.node,
REPLICA, tmpRow.second().name,
REPLICA, pair.second().name,
"targetNode", targetRow.node);
}
}

View File

@ -190,7 +190,11 @@ public class Policy implements MapWriter {
p.setApproxVal(tmpMatrix);
}
//approximate values are set now. Let's do recursive sorting
Collections.sort(matrix, (r1, r2) -> clusterPreferences.get(0).compare(r1, r2, true));
Collections.sort(matrix, (r1, r2) -> {
int result = clusterPreferences.get(0).compare(r1, r2, true);
if(result == 0) result = clusterPreferences.get(0).compare(r1, r2, false);
return result;
});
}
for (Clause clause : expandedClauses) {
@ -232,8 +236,8 @@ public class Policy implements MapWriter {
}
public Session createSession(ClusterDataProvider snitch) {
return new Session(snitch);
public Session createSession(ClusterDataProvider dataProvider) {
return new Session(dataProvider);
}
enum SortParam {

View File

@ -44,15 +44,15 @@ class Preference implements MapWriter {
// there are 2 modes of compare.
// recursive, it uses the precision to tie & when there is a tie use the next preference to compare
// in non-recursive mode, precision is not taken into consideration and sort is done on actual value
int compare(Row r1, Row r2, boolean recursive) {
Object o1 = recursive ? r1.cells[idx].val_ : r1.cells[idx].val;
Object o2 = recursive ? r2.cells[idx].val_ : r2.cells[idx].val;
int compare(Row r1, Row r2, boolean useApprox) {
Object o1 = useApprox ? r1.cells[idx].val_ : r1.cells[idx].val;
Object o2 = useApprox ? r2.cells[idx].val_ : r2.cells[idx].val;
int result = 0;
if (o1 instanceof Integer && o2 instanceof Integer) result = ((Integer) o1).compareTo((Integer) o2);
if (o1 instanceof Long && o2 instanceof Long) result = ((Long) o1).compareTo((Long) o2);
if (o1 instanceof Float && o2 instanceof Float) result = ((Float) o1).compareTo((Float) o2);
if (o1 instanceof Double && o2 instanceof Double) result = ((Double) o1).compareTo((Double) o2);
return result == 0 ? next == null ? 0 : next.compare(r1, r2, recursive) : sort.sortval * result;
return result == 0 ? next == null ? 0 : next.compare(r1, r2, useApprox) : sort.sortval * result;
}
//sets the new value according to precision in val_

View File

@ -20,6 +20,7 @@ package org.apache.solr.cloud.autoscaling;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
@ -29,6 +30,7 @@ import java.util.Map;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.cloud.autoscaling.Policy.Suggester.Hint;
import org.apache.solr.common.params.CollectionParams;
import org.apache.solr.common.util.Utils;
import org.apache.solr.common.util.ValidatingJsonMap;
@ -212,6 +214,65 @@ public class TestPolicy extends SolrTestCaseJ4 {
}
public void testMoveReplica(){
String autoscaleJson = "{" +
" 'cluster-policy':[" +
" {'cores':'<10','node':'#ANY'}," +
" {'replica':'<2','shard':'#EACH','node':'#ANY'}," +
" {'nodeRole':'!overseer','replica':'#ANY'}]," +
" 'cluster-preferences':[" +
" {'minimize':'cores', 'precision':3}," +
" {'maximize':'freedisk','precision':100}]}";
Map replicaInfoMap = (Map) Utils.fromJSONString("{ '127.0.0.1:60099_solr':{}," +
" '127.0.0.1:60089_solr':{'compute_plan_action_test':{'shard1':[" +
" {'core_node1':{}}," +
" {'core_node2':{}}]}}}");
Map m = (Map) Utils.getObjectByPath(replicaInfoMap, false, "127.0.0.1:60089_solr/compute_plan_action_test");
m.put("shard1", Arrays.asList(
new Policy.ReplicaInfo("core_node1", "compute_plan_action_test", "shard1", Collections.emptyMap()),
new Policy.ReplicaInfo("core_node2", "compute_plan_action_test", "shard1", Collections.emptyMap())
));
Map<String, Map<String,Object>> tagsMap = (Map) Utils.fromJSONString( "{" +
" '127.0.0.1:60099_solr':{" +
" 'cores':0," +
" 'freedisk':918005641216}," +
" '127.0.0.1:60089_solr':{" +
" 'cores':2," +
" 'freedisk':918005641216}}}");
Policy policy = new Policy((Map<String, Object>) Utils.fromJSONString(autoscaleJson));
Policy.Session session = policy.createSession(new ClusterDataProvider() {
@Override
public Map<String, Object> getNodeValues(String node, Collection<String> tags) {
return tagsMap.get(node);
}
@Override
public Map<String, Map<String, List<Policy.ReplicaInfo>>> getReplicaInfo(String node, Collection<String> keys) {
return (Map<String, Map<String, List<Policy.ReplicaInfo>>>) replicaInfoMap.get(node);
}
@Override
public Collection<String> getNodes() {
return replicaInfoMap.keySet();
}
@Override
public String getPolicy(String coll) {
return null;
}
});
Policy.Suggester suggester = session.getSuggester(CollectionParams.CollectionAction.MOVEREPLICA)
.hint(Policy.Suggester.Hint.TARGET_NODE, "127.0.0.1:60099_solr");
Map op = suggester.getOperation();
assertNotNull(op);
}
/* public void testOtherTag(){
String rules = "{" +