mirror of https://github.com/apache/lucene.git
SOLR-13325: Add a collection selector to ComputePlanAction (#1512)
ComputePlanAction now supports a collection selector of the form `collections: {policy: my_policy}` which can be used to select multiple collections that match collection property/value pairs. This is useful to maintain a whitelist of collections for which actions should be taken without needing to hard-code the collection names. The collection hints are pushed down to the policy engine so operations for non-matching collections are not computed at all. The AutoAddReplicasPlanAction now becomes a thin shim over ComputePlanAction and simply adds a collection selector for the collection property autoAddReplicas=true.
This commit is contained in:
parent
3fba3daa95
commit
338671e511
|
@ -119,6 +119,12 @@ Improvements
|
|||
|
||||
* SOLR-14407: Handle shards.purpose in the postlogs tool (Joel Bernstein)
|
||||
|
||||
* SOLR-13325: ComputePlanAction now supports a collection selector of the form `collections: {policy: my_policy}`
|
||||
which can be used to select multiple collections that match collection property/value pairs. This is useful to
|
||||
maintain a whitelist of collections for which actions are taken without needing to hard code the collection names
|
||||
themselves. The collection hints are pushed down to the policy engine so operations for non-matching collections
|
||||
are not computed at all. (ab, shalin)
|
||||
|
||||
Optimizations
|
||||
---------------------
|
||||
* SOLR-8306: Do not collect expand documents when expand.rows=0 (Marshall Sanders, Amelia Henderson)
|
||||
|
|
|
@ -18,46 +18,23 @@
|
|||
package org.apache.solr.cloud.autoscaling;
|
||||
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.NoneSuggester;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
|
||||
import org.apache.solr.client.solrj.cloud.SolrCloudManager;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.Suggester;
|
||||
import org.apache.solr.client.solrj.impl.ClusterStateProvider;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.cloud.ClusterState;
|
||||
import org.apache.solr.common.cloud.DocCollection;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
import org.apache.solr.core.SolrResourceLoader;
|
||||
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.AUTO_ADD_REPLICAS;
|
||||
|
||||
/**
|
||||
* This class configures the parent ComputePlanAction to compute plan
|
||||
* only for collections which have autoAddReplicas=true.
|
||||
*/
|
||||
public class AutoAddReplicasPlanAction extends ComputePlanAction {
|
||||
|
||||
@Override
|
||||
protected Suggester getSuggester(Policy.Session session, TriggerEvent event, ActionContext context, SolrCloudManager cloudManager) throws IOException {
|
||||
// for backward compatibility
|
||||
ClusterStateProvider stateProvider = cloudManager.getClusterStateProvider();
|
||||
String autoAddReplicas = stateProvider.getClusterProperty(ZkStateReader.AUTO_ADD_REPLICAS, (String) null);
|
||||
if (autoAddReplicas != null && autoAddReplicas.equals("false")) {
|
||||
return NoneSuggester.get(session);
|
||||
}
|
||||
|
||||
Suggester suggester = super.getSuggester(session, event, context, cloudManager);
|
||||
ClusterState clusterState;
|
||||
try {
|
||||
clusterState = stateProvider.getClusterState();
|
||||
} catch (IOException e) {
|
||||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Exception getting cluster state", e);
|
||||
}
|
||||
|
||||
boolean anyCollections = false;
|
||||
for (DocCollection collection: clusterState.getCollectionsMap().values()) {
|
||||
if (collection.getAutoAddReplicas()) {
|
||||
anyCollections = true;
|
||||
suggester.hint(Suggester.Hint.COLL, collection.getName());
|
||||
}
|
||||
}
|
||||
|
||||
if (!anyCollections) return NoneSuggester.get(session);
|
||||
return suggester;
|
||||
public void configure(SolrResourceLoader loader, SolrCloudManager cloudManager, Map<String, Object> properties) throws TriggerValidationException {
|
||||
properties.put("collections", Collections.singletonMap(AUTO_ADD_REPLICAS, "true"));
|
||||
super.configure(loader, cloudManager, properties);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,38 +17,28 @@
|
|||
|
||||
package org.apache.solr.cloud.autoscaling;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.cloud.SolrCloudManager;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.NoneSuggester;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.Suggester;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.UnsupportedSuggester;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.*;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.cloud.ClusterState;
|
||||
import org.apache.solr.common.cloud.DocCollection;
|
||||
import org.apache.solr.common.cloud.Replica;
|
||||
import org.apache.solr.common.params.AutoScalingParams;
|
||||
import org.apache.solr.common.params.CollectionParams;
|
||||
import org.apache.solr.common.params.CoreAdminParams;
|
||||
import org.apache.solr.common.util.Pair;
|
||||
import org.apache.solr.common.util.StrUtils;
|
||||
import org.apache.solr.core.SolrResourceLoader;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.apache.solr.cloud.autoscaling.TriggerEvent.NODE_NAMES;
|
||||
|
||||
/**
|
||||
|
@ -61,7 +51,8 @@ import static org.apache.solr.cloud.autoscaling.TriggerEvent.NODE_NAMES;
|
|||
public class ComputePlanAction extends TriggerActionBase {
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
Set<String> collections = new HashSet<>();
|
||||
// accept all collections by default
|
||||
Predicate<String> collectionsPredicate = s -> true;
|
||||
|
||||
public ComputePlanAction() {
|
||||
super();
|
||||
|
@ -72,9 +63,37 @@ public class ComputePlanAction extends TriggerActionBase {
|
|||
@Override
|
||||
public void configure(SolrResourceLoader loader, SolrCloudManager cloudManager, Map<String, Object> properties) throws TriggerValidationException {
|
||||
super.configure(loader, cloudManager, properties);
|
||||
String colString = (String) properties.get("collections");
|
||||
if (colString != null && !colString.isEmpty()) {
|
||||
collections.addAll(StrUtils.splitSmart(colString, ','));
|
||||
|
||||
Object value = properties.get("collections");
|
||||
if (value instanceof String) {
|
||||
String colString = (String) value;
|
||||
if (!colString.isEmpty()) {
|
||||
List<String> whiteListedCollections = StrUtils.splitSmart(colString, ',');
|
||||
collectionsPredicate = whiteListedCollections::contains;
|
||||
}
|
||||
} else if (value instanceof Map) {
|
||||
Map<String, String> matchConditions = (Map<String, String>) value;
|
||||
collectionsPredicate = collectionName -> {
|
||||
try {
|
||||
DocCollection collection = cloudManager.getClusterStateProvider().getCollection(collectionName);
|
||||
if (collection == null) {
|
||||
log.debug("Collection: {} was not found while evaluating conditions", collectionName);
|
||||
return false;
|
||||
}
|
||||
for (Map.Entry<String, String> entry : matchConditions.entrySet()) {
|
||||
if (!entry.getValue().equals(collection.get(entry.getKey()))) {
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Collection: {} does not match condition: {}:{}", collectionName, entry.getKey(), entry.getValue());
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
} catch (IOException e) {
|
||||
log.error("Exception fetching collection information for: {}", collectionName, e);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -142,14 +161,6 @@ public class ComputePlanAction extends TriggerActionBase {
|
|||
if (log.isDebugEnabled()) {
|
||||
log.debug("Computed Plan: {}", operation.getParams());
|
||||
}
|
||||
if (!collections.isEmpty()) {
|
||||
String coll = operation.getParams().get(CoreAdminParams.COLLECTION);
|
||||
if (coll != null && !collections.contains(coll)) {
|
||||
// discard an op that doesn't affect our collections
|
||||
log.debug("-- discarding due to collection={} not in {}", coll, collections);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
Map<String, Object> props = context.getProperties();
|
||||
props.compute("operations", (k, v) -> {
|
||||
List<SolrRequest> operations = (List<SolrRequest>) v;
|
||||
|
@ -217,29 +228,7 @@ public class ComputePlanAction extends TriggerActionBase {
|
|||
suggester = getNodeAddedSuggester(cloudManager, session, event);
|
||||
break;
|
||||
case NODELOST:
|
||||
String preferredOp = (String) event.getProperty(AutoScalingParams.PREFERRED_OP, CollectionParams.CollectionAction.MOVEREPLICA.toLower());
|
||||
CollectionParams.CollectionAction action = CollectionParams.CollectionAction.get(preferredOp);
|
||||
switch (action) {
|
||||
case MOVEREPLICA:
|
||||
suggester = session.getSuggester(action)
|
||||
.hint(Suggester.Hint.SRC_NODE, event.getProperty(NODE_NAMES));
|
||||
break;
|
||||
case DELETENODE:
|
||||
int start = (Integer)event.getProperty(START, 0);
|
||||
List<String> srcNodes = (List<String>) event.getProperty(NODE_NAMES);
|
||||
if (srcNodes.isEmpty() || start >= srcNodes.size()) {
|
||||
return NoneSuggester.get(session);
|
||||
}
|
||||
String sourceNode = srcNodes.get(start);
|
||||
suggester = session.getSuggester(action)
|
||||
.hint(Suggester.Hint.SRC_NODE, Collections.singletonList(sourceNode));
|
||||
event.getProperties().put(START, ++start);
|
||||
break;
|
||||
case NONE:
|
||||
return NoneSuggester.get(session);
|
||||
default:
|
||||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unsupported preferredOperation: " + action.toLower() + " specified for node lost trigger");
|
||||
}
|
||||
suggester = getNodeLostSuggester(cloudManager, session, event);
|
||||
break;
|
||||
case SEARCHRATE:
|
||||
case METRIC:
|
||||
|
@ -258,13 +247,15 @@ public class ComputePlanAction extends TriggerActionBase {
|
|||
for (Map.Entry<Suggester.Hint, Object> e : op.getHints().entrySet()) {
|
||||
suggester = suggester.hint(e.getKey(), e.getValue());
|
||||
}
|
||||
if (applyCollectionHints(cloudManager, suggester) == 0) return NoneSuggester.get(session);
|
||||
suggester = suggester.forceOperation(true);
|
||||
event.getProperties().put(START, ++start);
|
||||
break;
|
||||
case SCHEDULED:
|
||||
preferredOp = (String) event.getProperty(AutoScalingParams.PREFERRED_OP, CollectionParams.CollectionAction.MOVEREPLICA.toLower());
|
||||
action = CollectionParams.CollectionAction.get(preferredOp);
|
||||
String preferredOp = (String) event.getProperty(AutoScalingParams.PREFERRED_OP, CollectionParams.CollectionAction.MOVEREPLICA.toLower());
|
||||
CollectionParams.CollectionAction action = CollectionParams.CollectionAction.get(preferredOp);
|
||||
suggester = session.getSuggester(action);
|
||||
if (applyCollectionHints(cloudManager, suggester) == 0) return NoneSuggester.get(session);
|
||||
break;
|
||||
default:
|
||||
throw new UnsupportedOperationException("No support for events other than nodeAdded, nodeLost, searchRate, metric, scheduled and indexSize. Received: " + event.getEventType());
|
||||
|
@ -272,6 +263,53 @@ public class ComputePlanAction extends TriggerActionBase {
|
|||
return suggester;
|
||||
}
|
||||
|
||||
private Suggester getNodeLostSuggester(SolrCloudManager cloudManager, Policy.Session session, TriggerEvent event) throws IOException {
|
||||
String preferredOp = (String) event.getProperty(AutoScalingParams.PREFERRED_OP, CollectionParams.CollectionAction.MOVEREPLICA.toLower());
|
||||
CollectionParams.CollectionAction action = CollectionParams.CollectionAction.get(preferredOp);
|
||||
switch (action) {
|
||||
case MOVEREPLICA:
|
||||
Suggester s = session.getSuggester(action)
|
||||
.hint(Suggester.Hint.SRC_NODE, event.getProperty(NODE_NAMES));
|
||||
if (applyCollectionHints(cloudManager, s) == 0) return NoneSuggester.get(session);
|
||||
return s;
|
||||
case DELETENODE:
|
||||
int start = (Integer)event.getProperty(START, 0);
|
||||
List<String> srcNodes = (List<String>) event.getProperty(NODE_NAMES);
|
||||
if (srcNodes.isEmpty() || start >= srcNodes.size()) {
|
||||
return NoneSuggester.get(session);
|
||||
}
|
||||
String sourceNode = srcNodes.get(start);
|
||||
s = session.getSuggester(action)
|
||||
.hint(Suggester.Hint.SRC_NODE, event.getProperty(NODE_NAMES));
|
||||
if (applyCollectionHints(cloudManager, s) == 0) return NoneSuggester.get(session);
|
||||
s.hint(Suggester.Hint.SRC_NODE, Collections.singletonList(sourceNode));
|
||||
event.getProperties().put(START, ++start);
|
||||
return s;
|
||||
case NONE:
|
||||
return NoneSuggester.get(session);
|
||||
default:
|
||||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unsupported preferredOperation: " + action.toLower() + " specified for node lost trigger");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Applies collection hints for all collections that match the {@link #collectionsPredicate}
|
||||
* and returns the number of collections that matched.
|
||||
* @return number of collections that match the {@link #collectionsPredicate}
|
||||
* @throws IOException if {@link org.apache.solr.client.solrj.impl.ClusterStateProvider} throws IOException
|
||||
*/
|
||||
private int applyCollectionHints(SolrCloudManager cloudManager, Suggester s) throws IOException {
|
||||
ClusterState clusterState = cloudManager.getClusterStateProvider().getClusterState();
|
||||
Set<String> set = clusterState.getCollectionStates().keySet().stream()
|
||||
.filter(collectionRef -> collectionsPredicate.test(collectionRef))
|
||||
.collect(Collectors.toSet());
|
||||
if (set.size() < clusterState.getCollectionStates().size()) {
|
||||
// apply hints only if a subset of collections are selected
|
||||
set.forEach(c -> s.hint(Suggester.Hint.COLL, c));
|
||||
}
|
||||
return set.size();
|
||||
}
|
||||
|
||||
private Suggester getNodeAddedSuggester(SolrCloudManager cloudManager, Policy.Session session, TriggerEvent event) throws IOException {
|
||||
String preferredOp = (String) event.getProperty(AutoScalingParams.PREFERRED_OP, CollectionParams.CollectionAction.MOVEREPLICA.toLower());
|
||||
Replica.Type replicaType = (Replica.Type) event.getProperty(AutoScalingParams.REPLICA_TYPE, Replica.Type.NRT);
|
||||
|
@ -283,14 +321,15 @@ public class ComputePlanAction extends TriggerActionBase {
|
|||
case ADDREPLICA:
|
||||
// add all collection/shard pairs and let policy engine figure out which one
|
||||
// to place on the target node
|
||||
// todo in future we can prune ineligible collection/shard pairs
|
||||
ClusterState clusterState = cloudManager.getClusterStateProvider().getClusterState();
|
||||
Set<Pair<String, String>> collShards = new HashSet<>();
|
||||
clusterState.getCollectionStates().forEach((collectionName, collectionRef) -> {
|
||||
DocCollection docCollection = collectionRef.get();
|
||||
clusterState.getCollectionStates().entrySet().stream()
|
||||
.filter(e -> collectionsPredicate.test(e.getKey()))
|
||||
.forEach(entry -> {
|
||||
DocCollection docCollection = entry.getValue().get();
|
||||
if (docCollection != null) {
|
||||
docCollection.getActiveSlices().stream()
|
||||
.map(slice -> new Pair<>(collectionName, slice.getName()))
|
||||
.map(slice -> new Pair<>(entry.getKey(), slice.getName()))
|
||||
.forEach(collShards::add);
|
||||
}
|
||||
});
|
||||
|
|
|
@ -84,6 +84,7 @@ public class AutoAddReplicasPlanActionTest extends SolrCloudTestCase{
|
|||
|
||||
String collection1 = "testSimple1";
|
||||
String collection2 = "testSimple2";
|
||||
String collection3 = "testSimple3";
|
||||
CollectionAdminRequest.createCollection(collection1, "conf", 2, 2)
|
||||
.setCreateNodeSet(jetty1.getNodeName()+","+jetty2.getNodeName())
|
||||
.setAutoAddReplicas(true)
|
||||
|
@ -94,8 +95,8 @@ public class AutoAddReplicasPlanActionTest extends SolrCloudTestCase{
|
|||
.setAutoAddReplicas(false)
|
||||
.setMaxShardsPerNode(1)
|
||||
.process(cluster.getSolrClient());
|
||||
// the number of cores in jetty1 (5) will be larger than jetty3 (1)
|
||||
CollectionAdminRequest.createCollection("testSimple3", "conf", 3, 1)
|
||||
// the number of cores in jetty1 (6) will be larger than jetty3 (1)
|
||||
CollectionAdminRequest.createCollection(collection3, "conf", 3, 1)
|
||||
.setCreateNodeSet(jetty1.getNodeName())
|
||||
.setAutoAddReplicas(false)
|
||||
.setMaxShardsPerNode(3)
|
||||
|
@ -103,7 +104,7 @@ public class AutoAddReplicasPlanActionTest extends SolrCloudTestCase{
|
|||
|
||||
cluster.waitForActiveCollection(collection1, 2, 4);
|
||||
cluster.waitForActiveCollection(collection2, 1, 2);
|
||||
cluster.waitForActiveCollection("testSimple3", 3, 3);
|
||||
cluster.waitForActiveCollection(collection3, 3, 3);
|
||||
|
||||
// we remove the implicit created trigger, so the replicas won't be moved
|
||||
String removeTriggerCommand = "{" +
|
||||
|
@ -139,7 +140,7 @@ public class AutoAddReplicasPlanActionTest extends SolrCloudTestCase{
|
|||
|
||||
cluster.waitForActiveCollection(collection1, 2, 4);
|
||||
cluster.waitForActiveCollection(collection2, 1, 2);
|
||||
cluster.waitForActiveCollection("testSimple3", 3, 3);
|
||||
cluster.waitForActiveCollection(collection3, 3, 3);
|
||||
|
||||
assertTrue("Timeout waiting for all live and active", ClusterStateUtil.waitForAllActiveAndLiveReplicas(cluster.getSolrClient().getZkStateReader(), 30000));
|
||||
|
||||
|
@ -184,7 +185,7 @@ public class AutoAddReplicasPlanActionTest extends SolrCloudTestCase{
|
|||
|
||||
cluster.waitForActiveCollection(collection1, 2, 4);
|
||||
cluster.waitForActiveCollection(collection2, 1, 2);
|
||||
cluster.waitForActiveCollection("testSimple3", 3, 3);
|
||||
cluster.waitForActiveCollection(collection3, 3, 3);
|
||||
|
||||
assertTrue("Timeout waiting for all live and active", ClusterStateUtil.waitForAllActiveAndLiveReplicas(cluster.getSolrClient().getZkStateReader(), 30000));
|
||||
|
||||
|
@ -211,6 +212,7 @@ public class AutoAddReplicasPlanActionTest extends SolrCloudTestCase{
|
|||
@SuppressForbidden(reason = "Needs currentTimeMillis to create unique id")
|
||||
private List<SolrRequest> getOperations(JettySolrRunner actionJetty, String lostNodeName) throws Exception {
|
||||
try (AutoAddReplicasPlanAction action = new AutoAddReplicasPlanAction()) {
|
||||
action.configure(actionJetty.getCoreContainer().getResourceLoader(), actionJetty.getCoreContainer().getZkController().getSolrCloudManager(), new HashMap<>());
|
||||
TriggerEvent lostNode = new NodeLostTrigger.NodeLostEvent(TriggerEventType.NODELOST, ".auto_add_replicas", Collections.singletonList(System.currentTimeMillis()), Collections.singletonList(lostNodeName), CollectionParams.CollectionAction.MOVEREPLICA.toLower());
|
||||
ActionContext context = new ActionContext(actionJetty.getCoreContainer().getZkController().getSolrCloudManager(), null, new HashMap<>());
|
||||
action.process(lostNode, context);
|
||||
|
|
|
@ -17,19 +17,6 @@
|
|||
|
||||
package org.apache.solr.cloud.autoscaling;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.cloud.NodeStateProvider;
|
||||
|
@ -55,14 +42,18 @@ import org.apache.solr.common.util.Pair;
|
|||
import org.apache.solr.common.util.Utils;
|
||||
import org.apache.solr.core.SolrResourceLoader;
|
||||
import org.apache.solr.util.LogLevel;
|
||||
import org.junit.After;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
import org.junit.*;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||
import static org.apache.solr.common.params.CollectionParams.CollectionAction.MOVEREPLICA;
|
||||
|
||||
|
@ -444,7 +435,33 @@ public class ComputePlanActionTest extends SolrCloudTestCase {
|
|||
|
||||
@Test
|
||||
//2018-06-18 (commented) @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 09-Apr-2018
|
||||
public void testSelectedCollections() throws Exception {
|
||||
public void testSelectedCollectionsByName() throws Exception {
|
||||
String collectionsFilter = "'testSelected1,testSelected2'";
|
||||
testCollectionsPredicate(collectionsFilter, Collections.emptyMap());
|
||||
}
|
||||
|
||||
@Test
|
||||
//2018-06-18 (commented) @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 09-Apr-2018
|
||||
public void testSelectedCollectionsByPolicy() throws Exception {
|
||||
CloudSolrClient solrClient = cluster.getSolrClient();
|
||||
String setSearchPolicyCommand = "{" +
|
||||
" 'set-policy': {" +
|
||||
" 'search': [" +
|
||||
" {'replica':'<5', 'shard': '#EACH', 'node': '#ANY'}," +
|
||||
" ]" +
|
||||
"}}";
|
||||
SolrRequest req = AutoScalingRequest.create(SolrRequest.METHOD.POST, setSearchPolicyCommand);
|
||||
NamedList<Object> response = solrClient.request(req);
|
||||
assertEquals(response.get("result").toString(), "success");
|
||||
|
||||
String collectionsFilter = "{'policy': 'search'}";
|
||||
Map<String, String> createCollectionParameters = new HashMap<>();
|
||||
createCollectionParameters.put("testSelected1", "search");
|
||||
createCollectionParameters.put("testSelected2", "search");
|
||||
testCollectionsPredicate(collectionsFilter, createCollectionParameters);
|
||||
}
|
||||
|
||||
private void testCollectionsPredicate(String collectionsFilter, Map<String, String> createCollectionParameters) throws Exception {
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Found number of jetties: {}", cluster.getJettySolrRunners().size());
|
||||
}
|
||||
|
@ -462,7 +479,7 @@ public class ComputePlanActionTest extends SolrCloudTestCase {
|
|||
"'event' : 'nodeLost'," +
|
||||
"'waitFor' : '1s'," +
|
||||
"'enabled' : true," +
|
||||
"'actions' : [{'name':'compute_plan', 'class' : 'solr.ComputePlanAction', 'collections' : 'testSelected1,testSelected2'}," +
|
||||
"'actions' : [{'name':'compute_plan', 'class' : 'solr.ComputePlanAction', 'collections' : " + collectionsFilter + "}," +
|
||||
"{'name':'test','class':'" + ComputePlanActionTest.AssertingTriggerAction.class.getName() + "'}]" +
|
||||
"}}";
|
||||
SolrRequest req = AutoScalingRequest.create(SolrRequest.METHOD.POST, setTriggerCommand);
|
||||
|
@ -471,14 +488,23 @@ public class ComputePlanActionTest extends SolrCloudTestCase {
|
|||
|
||||
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection("testSelected1",
|
||||
"conf", 2, 2);
|
||||
if (createCollectionParameters.get("testSelected1") != null) {
|
||||
create.setPolicy(createCollectionParameters.get("testSelected1"));
|
||||
}
|
||||
create.process(solrClient);
|
||||
|
||||
create = CollectionAdminRequest.createCollection("testSelected2",
|
||||
"conf", 2, 2);
|
||||
if (createCollectionParameters.get("testSelected2") != null) {
|
||||
create.setPolicy(createCollectionParameters.get("testSelected2"));
|
||||
}
|
||||
create.process(solrClient);
|
||||
|
||||
create = CollectionAdminRequest.createCollection("testSelected3",
|
||||
"conf", 2, 2);
|
||||
if (createCollectionParameters.get("testSelected3") != null) {
|
||||
create.setPolicy(createCollectionParameters.get("testSelected3"));
|
||||
}
|
||||
create.process(solrClient);
|
||||
|
||||
cluster.waitForActiveCollection("testSelected1", 2, 4);
|
||||
|
|
|
@ -29,12 +29,19 @@ commands which can re-balance the cluster in response to trigger events.
|
|||
The following parameters are configurable:
|
||||
|
||||
`collections`::
|
||||
A comma-separated list of collection names. If this list is not empty then
|
||||
the computed operations will only calculate collection operations that affect
|
||||
listed collections and ignore any other collection operations for collections
|
||||
not listed here. Note that non-collection operations are not affected by this.
|
||||
A comma-separated list of collection names, or a selector on collection properties that can be used to filter collections for which the plan is computed.
|
||||
|
||||
Example configuration:
|
||||
If a non-empty list or selector is specified then the computed operations will only calculate collection operations that affect
|
||||
matched collections and ignore any other collection operations for collections
|
||||
not listed here. This does not affect non-collection operations.
|
||||
|
||||
A collection selector is of the form `collections: {key1: value1, key2: value2, ...}` where the key can be any collection property such as `name`, `policy`, `numShards` etc.
|
||||
The value must match exactly and all specified properties must match for a collection to match.
|
||||
|
||||
A collection selector is useful in a cluster where collections are added and removed frequently and where selecting only collections that
|
||||
use a specific autoscaling policy is useful.
|
||||
|
||||
Example configurations:
|
||||
|
||||
[source,json]
|
||||
----
|
||||
|
@ -48,11 +55,11 @@ Example configuration:
|
|||
{
|
||||
"name" : "compute_plan",
|
||||
"class" : "solr.ComputePlanAction",
|
||||
"collections" : "test1,test2",
|
||||
"collections" : "test1,test2"
|
||||
},
|
||||
{
|
||||
"name" : "execute_plan",
|
||||
"class" : "solr.ExecutePlanAction",
|
||||
"class" : "solr.ExecutePlanAction"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
@ -63,6 +70,56 @@ In this example only collections `test1` and `test2` will be potentially
|
|||
replicated / moved to an added node, other collections will be ignored even
|
||||
if they cause policy violations.
|
||||
|
||||
[source,json]
|
||||
----
|
||||
{
|
||||
"set-trigger" : {
|
||||
"name" : "node_added_trigger",
|
||||
"event" : "nodeAdded",
|
||||
"waitFor" : "1s",
|
||||
"enabled" : true,
|
||||
"actions" : [
|
||||
{
|
||||
"name" : "compute_plan",
|
||||
"class" : "solr.ComputePlanAction",
|
||||
"collections" : {"policy": "my_policy"}
|
||||
},
|
||||
{
|
||||
"name" : "execute_plan",
|
||||
"class" : "solr.ExecutePlanAction"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
----
|
||||
|
||||
In this example only collections which use the `my_policy` as their autoscaling policy will be potentially replicated / moved to an added node, other collections will be ignored even if they cause policy violations.
|
||||
|
||||
[source,json]
|
||||
----
|
||||
{
|
||||
"set-trigger" : {
|
||||
"name" : "node_added_trigger",
|
||||
"event" : "nodeAdded",
|
||||
"waitFor" : "1s",
|
||||
"enabled" : true,
|
||||
"actions" : [
|
||||
{
|
||||
"name" : "compute_plan",
|
||||
"class" : "solr.ComputePlanAction",
|
||||
"collections" : {"policy": "my_policy", "numShards" : "4"}
|
||||
},
|
||||
{
|
||||
"name" : "execute_plan",
|
||||
"class" : "solr.ExecutePlanAction"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
----
|
||||
|
||||
In this example only collections which use the `my_policy` as their autoscaling policy and that have `numShards` equal to `4` will be potentially replicated / moved to an added node, other collections will be ignored even if they cause policy violations.
|
||||
|
||||
== Execute Plan Action
|
||||
|
||||
The `ExecutePlanAction` executes the Collection API commands emitted by the `ComputePlanAction` against
|
||||
|
|
Loading…
Reference in New Issue