mirror of https://github.com/apache/lucene.git
SOLR-11285: Support simulations at scale in the autoscaling framework,
part 1 (refactoring).
This commit is contained in:
parent
9543e85460
commit
8dd489edce
|
@ -52,6 +52,12 @@ Bug Fixes
|
|||
|
||||
* SOLR-11447: ZkStateWriter should process commands in atomic. (Cao Manh Dat, shalin)
|
||||
|
||||
Optimizations
|
||||
----------------------
|
||||
* SOLR-11285: Refactor autoscaling framework to avoid direct references to Zookeeper and Solr
|
||||
to make testing and large scale simulations easier. (ab)
|
||||
|
||||
|
||||
================== 7.1.0 ==================
|
||||
|
||||
Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.
|
||||
|
|
|
@ -327,6 +327,10 @@ public class ApiBag {
|
|||
public List<Map> getErrs() {
|
||||
return errs;
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
return super.toString() + ", errors: " + getErrs() + ", ";
|
||||
}
|
||||
}
|
||||
|
||||
public static class LazyLoadedApi extends Api {
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
package org.apache.solr.cloud;
|
||||
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
|
@ -42,7 +43,6 @@ import org.apache.solr.common.params.ShardParams;
|
|||
import org.apache.solr.common.util.NamedList;
|
||||
import org.apache.solr.common.util.Utils;
|
||||
import org.apache.solr.handler.component.ShardHandler;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -70,7 +70,7 @@ public class AddReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
|
|||
}
|
||||
|
||||
ZkNodeProps addReplica(ClusterState clusterState, ZkNodeProps message, NamedList results, Runnable onComplete)
|
||||
throws KeeperException, InterruptedException {
|
||||
throws IOException, InterruptedException {
|
||||
log.debug("addReplica() : {}", Utils.toJSONString(message));
|
||||
String collection = message.getStr(COLLECTION_PROP);
|
||||
String node = message.getStr(CoreAdminParams.NODE);
|
||||
|
@ -119,7 +119,7 @@ public class AddReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
|
|||
}
|
||||
} else {
|
||||
node = getNodesForNewReplicas(clusterState, collection, shard, 1, node,
|
||||
ocmh.overseer.getZkController().getCoreContainer()).get(0).nodeName;// TODO: use replica type in this logic too
|
||||
ocmh.overseer.getSolrCloudManager(), ocmh.overseer.getCoreContainer()).get(0).nodeName;// TODO: use replica type in this logic too
|
||||
}
|
||||
}
|
||||
log.info("Node Identified {} for creating new replica", node);
|
||||
|
@ -128,7 +128,7 @@ public class AddReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
|
|||
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Node: " + node + " is not live");
|
||||
}
|
||||
if (coreName == null) {
|
||||
coreName = Assign.buildCoreName(ocmh.zkStateReader.getZkClient(), coll, shard, replicaType);
|
||||
coreName = Assign.buildCoreName(ocmh.overseer.getSolrCloudManager().getDistribStateManager(), coll, shard, replicaType);
|
||||
} else if (!skipCreateReplicaInClusterState) {
|
||||
//Validate that the core name is unique in that collection
|
||||
for (Slice slice : coll.getSlices()) {
|
||||
|
@ -158,7 +158,11 @@ public class AddReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
|
|||
if (coreNodeName != null) {
|
||||
props = props.plus(ZkStateReader.CORE_NODE_NAME_PROP, coreNodeName);
|
||||
}
|
||||
Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(props));
|
||||
try {
|
||||
Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(props));
|
||||
} catch (Exception e) {
|
||||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Exception updating Overseer state queue", e);
|
||||
}
|
||||
}
|
||||
params.set(CoreAdminParams.CORE_NODE_NAME,
|
||||
ocmh.waitToSeeReplicasInState(collection, Collections.singletonList(coreName)).get(coreName).getName());
|
||||
|
|
|
@ -31,10 +31,12 @@ import java.util.Set;
|
|||
import java.util.stream.Collectors;
|
||||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.AlreadyExistsException;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.BadVersionException;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.DistribStateManager;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.client.solrj.impl.SolrClientDataProvider;
|
||||
import org.apache.solr.client.solrj.impl.ZkClientClusterStateProvider;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
|
||||
import org.apache.solr.cloud.rule.ReplicaAssigner;
|
||||
import org.apache.solr.cloud.rule.Rule;
|
||||
|
@ -44,9 +46,7 @@ import org.apache.solr.common.cloud.DocCollection;
|
|||
import org.apache.solr.common.cloud.Replica;
|
||||
import org.apache.solr.common.cloud.ReplicaPosition;
|
||||
import org.apache.solr.common.cloud.Slice;
|
||||
import org.apache.solr.common.cloud.SolrZkClient;
|
||||
import org.apache.solr.common.cloud.ZkNodeProps;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
import org.apache.solr.common.util.StrUtils;
|
||||
import org.apache.solr.common.util.Utils;
|
||||
import org.apache.solr.core.CoreContainer;
|
||||
|
@ -55,7 +55,6 @@ import org.apache.zookeeper.CreateMode;
|
|||
import org.apache.zookeeper.KeeperException;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.apache.zookeeper.data.Stat;
|
||||
|
||||
import static org.apache.solr.client.solrj.cloud.autoscaling.Policy.POLICY;
|
||||
import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET;
|
||||
|
@ -68,43 +67,47 @@ import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
|
|||
public class Assign {
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
public static int incAndGetId(SolrZkClient zkClient, String collection, int defaultValue) {
|
||||
public static int incAndGetId(DistribStateManager stateManager, String collection, int defaultValue) {
|
||||
String path = "/collections/"+collection;
|
||||
try {
|
||||
if (!zkClient.exists(path, true)) {
|
||||
if (!stateManager.hasData(path)) {
|
||||
try {
|
||||
zkClient.makePath(path, true);
|
||||
} catch (KeeperException.NodeExistsException e) {
|
||||
stateManager.makePath(path);
|
||||
} catch (AlreadyExistsException e) {
|
||||
// it's okay if another beats us creating the node
|
||||
}
|
||||
}
|
||||
path += "/counter";
|
||||
if (!zkClient.exists(path, true)) {
|
||||
if (!stateManager.hasData(path)) {
|
||||
try {
|
||||
zkClient.create(path, NumberUtils.intToBytes(defaultValue), CreateMode.PERSISTENT, true);
|
||||
} catch (KeeperException.NodeExistsException e) {
|
||||
stateManager.createData(path, NumberUtils.intToBytes(defaultValue), CreateMode.PERSISTENT);
|
||||
} catch (AlreadyExistsException e) {
|
||||
// it's okay if another beats us creating the node
|
||||
}
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
Thread.interrupted();
|
||||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error creating counter node in Zookeeper for collection:" + collection, e);
|
||||
} catch (KeeperException e) {
|
||||
} catch (IOException | KeeperException e) {
|
||||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error creating counter node in Zookeeper for collection:" + collection, e);
|
||||
}
|
||||
|
||||
while (true) {
|
||||
Stat stat = new Stat();
|
||||
try {
|
||||
byte[] data = zkClient.getData(path, null, stat, true);
|
||||
int currentId = NumberUtils.bytesToInt(data);
|
||||
data = NumberUtils.intToBytes(++currentId);
|
||||
zkClient.setData(path, data, stat.getVersion(), true);
|
||||
return currentId;
|
||||
} catch (KeeperException e) {
|
||||
if (e.code() != KeeperException.Code.BADVERSION) {
|
||||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error inc and get counter from Zookeeper for collection:"+collection, e);
|
||||
int version = 0;
|
||||
int currentId = 0;
|
||||
VersionedData data = stateManager.getData(path, null);
|
||||
if (data != null) {
|
||||
currentId = NumberUtils.bytesToInt(data.getData());
|
||||
version = data.getVersion();
|
||||
}
|
||||
byte[] bytes = NumberUtils.intToBytes(++currentId);
|
||||
stateManager.setData(path, bytes, version);
|
||||
return currentId;
|
||||
} catch (BadVersionException e) {
|
||||
continue;
|
||||
} catch (IOException | KeeperException e) {
|
||||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error inc and get counter from Zookeeper for collection:"+collection, e);
|
||||
} catch (InterruptedException e) {
|
||||
Thread.interrupted();
|
||||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error inc and get counter from Zookeeper for collection:" + collection, e);
|
||||
|
@ -112,14 +115,14 @@ public class Assign {
|
|||
}
|
||||
}
|
||||
|
||||
public static String assignNode(SolrZkClient client, DocCollection collection) {
|
||||
public static String assignNode(DistribStateManager stateManager, DocCollection collection) {
|
||||
// for backward compatibility;
|
||||
int defaultValue = defaultCounterValue(collection, false);
|
||||
String coreNodeName = "core_node" + incAndGetId(client, collection.getName(), defaultValue);
|
||||
String coreNodeName = "core_node" + incAndGetId(stateManager, collection.getName(), defaultValue);
|
||||
while (collection.getReplica(coreNodeName) != null) {
|
||||
// there is wee chance that, the new coreNodeName id not totally unique,
|
||||
// but this will be guaranteed unique for new collections
|
||||
coreNodeName = "core_node" + incAndGetId(client, collection.getName(), defaultValue);
|
||||
coreNodeName = "core_node" + incAndGetId(stateManager, collection.getName(), defaultValue);
|
||||
}
|
||||
return coreNodeName;
|
||||
}
|
||||
|
@ -185,20 +188,20 @@ public class Assign {
|
|||
return defaultValue * 20;
|
||||
}
|
||||
|
||||
public static String buildCoreName(SolrZkClient zkClient, DocCollection collection, String shard, Replica.Type type, boolean newCollection) {
|
||||
public static String buildCoreName(DistribStateManager stateManager, DocCollection collection, String shard, Replica.Type type, boolean newCollection) {
|
||||
Slice slice = collection.getSlice(shard);
|
||||
int defaultValue = defaultCounterValue(collection, newCollection);
|
||||
int replicaNum = incAndGetId(zkClient, collection.getName(), defaultValue);
|
||||
int replicaNum = incAndGetId(stateManager, collection.getName(), defaultValue);
|
||||
String coreName = buildCoreName(collection.getName(), shard, type, replicaNum);
|
||||
while (existCoreName(coreName, slice)) {
|
||||
replicaNum = incAndGetId(zkClient, collection.getName(), defaultValue);
|
||||
replicaNum = incAndGetId(stateManager, collection.getName(), defaultValue);
|
||||
coreName = buildCoreName(collection.getName(), shard, type, replicaNum);
|
||||
}
|
||||
return coreName;
|
||||
}
|
||||
|
||||
public static String buildCoreName(SolrZkClient zkClient, DocCollection collection, String shard, Replica.Type type) {
|
||||
return buildCoreName(zkClient, collection, shard, type, false);
|
||||
public static String buildCoreName(DistribStateManager stateManager, DocCollection collection, String shard, Replica.Type type) {
|
||||
return buildCoreName(stateManager, collection, shard, type, false);
|
||||
}
|
||||
|
||||
private static boolean existCoreName(String coreName, Slice slice) {
|
||||
|
@ -243,10 +246,10 @@ public class Assign {
|
|||
List<String> shardNames,
|
||||
int numNrtReplicas,
|
||||
int numTlogReplicas,
|
||||
int numPullReplicas) throws KeeperException, InterruptedException {
|
||||
int numPullReplicas) throws IOException, InterruptedException {
|
||||
List<Map> rulesMap = (List) message.get("rule");
|
||||
String policyName = message.getStr(POLICY);
|
||||
AutoScalingConfig autoScalingConfig = ocmh.zkStateReader.getAutoScalingConfig();
|
||||
AutoScalingConfig autoScalingConfig = ocmh.overseer.getSolrCloudManager().getDistribStateManager().getAutoScalingConfig();
|
||||
|
||||
if (rulesMap == null && policyName == null && autoScalingConfig.getPolicy().getClusterPolicy().isEmpty()) {
|
||||
log.debug("Identify nodes using default");
|
||||
|
@ -295,7 +298,7 @@ public class Assign {
|
|||
PolicyHelper.SESSION_REF.set(ocmh.policySessionRef);
|
||||
try {
|
||||
return getPositionsUsingPolicy(collectionName,
|
||||
shardNames, numNrtReplicas, numTlogReplicas, numPullReplicas, policyName, ocmh.zkStateReader, nodeList);
|
||||
shardNames, numNrtReplicas, numTlogReplicas, numPullReplicas, policyName, ocmh.overseer.getSolrCloudManager(), nodeList);
|
||||
} finally {
|
||||
PolicyHelper.SESSION_REF.remove();
|
||||
}
|
||||
|
@ -324,7 +327,7 @@ public class Assign {
|
|||
// could be created on live nodes given maxShardsPerNode, Replication factor (if from createShard) etc.
|
||||
public static List<ReplicaCount> getNodesForNewReplicas(ClusterState clusterState, String collectionName,
|
||||
String shard, int nrtReplicas,
|
||||
Object createNodeSet, CoreContainer cc) throws KeeperException, InterruptedException {
|
||||
Object createNodeSet, SolrCloudManager cloudManager, CoreContainer cc) throws IOException, InterruptedException {
|
||||
log.debug("getNodesForNewReplicas() shard: {} , replicas : {} , createNodeSet {}", shard, nrtReplicas, createNodeSet );
|
||||
DocCollection coll = clusterState.getCollection(collectionName);
|
||||
Integer maxShardsPerNode = coll.getMaxShardsPerNode();
|
||||
|
@ -356,13 +359,14 @@ public class Assign {
|
|||
List l = (List) coll.get(DocCollection.RULE);
|
||||
List<ReplicaPosition> replicaPositions = null;
|
||||
if (l != null) {
|
||||
// TODO: make it so that this method doesn't require access to CC
|
||||
replicaPositions = getNodesViaRules(clusterState, shard, nrtReplicas, cc, coll, createNodeList, l);
|
||||
}
|
||||
String policyName = coll.getStr(POLICY);
|
||||
AutoScalingConfig autoScalingConfig = cc.getZkController().zkStateReader.getAutoScalingConfig();
|
||||
AutoScalingConfig autoScalingConfig = cloudManager.getDistribStateManager().getAutoScalingConfig();
|
||||
if (policyName != null || !autoScalingConfig.getPolicy().getClusterPolicy().isEmpty()) {
|
||||
replicaPositions = Assign.getPositionsUsingPolicy(collectionName, Collections.singletonList(shard), nrtReplicas, 0, 0,
|
||||
policyName, cc.getZkController().zkStateReader, createNodeList);
|
||||
policyName, cloudManager, createNodeList);
|
||||
}
|
||||
|
||||
if(replicaPositions != null){
|
||||
|
@ -383,21 +387,17 @@ public class Assign {
|
|||
int nrtReplicas,
|
||||
int tlogReplicas,
|
||||
int pullReplicas,
|
||||
String policyName, ZkStateReader zkStateReader,
|
||||
List<String> nodesList) throws KeeperException, InterruptedException {
|
||||
String policyName, SolrCloudManager cloudManager,
|
||||
List<String> nodesList) throws IOException, InterruptedException {
|
||||
log.debug("shardnames {} NRT {} TLOG {} PULL {} , policy {}, nodeList {}", shardNames, nrtReplicas, tlogReplicas, pullReplicas, policyName, nodesList);
|
||||
SolrClientDataProvider clientDataProvider = null;
|
||||
List<ReplicaPosition> replicaPositions = null;
|
||||
AutoScalingConfig autoScalingConfig = zkStateReader.getAutoScalingConfig();
|
||||
try (CloudSolrClient csc = new CloudSolrClient.Builder()
|
||||
.withClusterStateProvider(new ZkClientClusterStateProvider(zkStateReader))
|
||||
.build()) {
|
||||
clientDataProvider = new SolrClientDataProvider(csc);
|
||||
AutoScalingConfig autoScalingConfig = cloudManager.getDistribStateManager().getAutoScalingConfig();
|
||||
try {
|
||||
Map<String, String> kvMap = Collections.singletonMap(collName, policyName);
|
||||
replicaPositions = PolicyHelper.getReplicaLocations(
|
||||
collName,
|
||||
autoScalingConfig,
|
||||
clientDataProvider,
|
||||
cloudManager,
|
||||
kvMap,
|
||||
shardNames,
|
||||
nrtReplicas,
|
||||
|
@ -405,11 +405,10 @@ public class Assign {
|
|||
pullReplicas,
|
||||
nodesList);
|
||||
return replicaPositions;
|
||||
} catch (IOException e) {
|
||||
} catch (Exception e) {
|
||||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error closing CloudSolrClient",e);
|
||||
} finally {
|
||||
if (log.isTraceEnabled()) {
|
||||
if (clientDataProvider != null) log.trace("CLUSTER_DATA_PROVIDER: " + Utils.toJSONString(clientDataProvider));
|
||||
if (replicaPositions != null)
|
||||
log.trace("REPLICA_POSITIONS: " + Utils.toJSONString(Utils.getDeepCopy(replicaPositions, 7, true)));
|
||||
log.trace("AUTOSCALING_CONF: " + Utils.toJSONString(autoScalingConfig));
|
||||
|
|
|
@ -225,7 +225,7 @@ public class CreateCollectionCmd implements Cmd {
|
|||
Map<String,ShardRequest> coresToCreate = new LinkedHashMap<>();
|
||||
for (ReplicaPosition replicaPosition : replicaPositions) {
|
||||
String nodeName = replicaPosition.node;
|
||||
String coreName = Assign.buildCoreName(ocmh.zkStateReader.getZkClient(), zkStateReader.getClusterState().getCollection(collectionName),
|
||||
String coreName = Assign.buildCoreName(ocmh.overseer.getSolrCloudManager().getDistribStateManager(), zkStateReader.getClusterState().getCollection(collectionName),
|
||||
replicaPosition.shard, replicaPosition.type, true);
|
||||
log.debug(formatString("Creating core {0} as part of shard {1} of collection {2} on {3}"
|
||||
, coreName, replicaPosition.shard, collectionName, nodeName));
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
package org.apache.solr.cloud;
|
||||
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
|
@ -27,6 +28,7 @@ import java.util.concurrent.CountDownLatch;
|
|||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
|
||||
import org.apache.solr.cloud.OverseerCollectionMessageHandler.Cmd;
|
||||
|
@ -41,7 +43,6 @@ import org.apache.solr.common.params.CoreAdminParams;
|
|||
import org.apache.solr.common.util.NamedList;
|
||||
import org.apache.solr.common.util.SimpleOrderedMap;
|
||||
import org.apache.solr.common.util.Utils;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -52,7 +53,6 @@ import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
|
|||
import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.SOLR_AUTOSCALING_CONF_PATH;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS;
|
||||
import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
|
||||
|
||||
|
@ -103,7 +103,7 @@ public class CreateShardCmd implements Cmd {
|
|||
numPullReplicas);
|
||||
} else {
|
||||
List<Assign.ReplicaCount> sortedNodeList = getNodesForNewReplicas(clusterState, collectionName, sliceName, totalReplicas,
|
||||
createNodeSetStr, ocmh.overseer.getZkController().getCoreContainer());
|
||||
createNodeSetStr, ocmh.overseer.getSolrCloudManager(), ocmh.overseer.getCoreContainer());
|
||||
int i = 0;
|
||||
positions = new ArrayList<>();
|
||||
for (Map.Entry<Replica.Type, Integer> e : ImmutableMap.of(Replica.Type.NRT, numNrtReplicas,
|
||||
|
@ -124,7 +124,7 @@ public class CreateShardCmd implements Cmd {
|
|||
countDownLatch = new CountDownLatch(totalReplicas);
|
||||
for (ReplicaPosition position : positions) {
|
||||
String nodeName = position.node;
|
||||
String coreName = Assign.buildCoreName(ocmh.zkStateReader.getZkClient(), collection, sliceName, position.type);
|
||||
String coreName = Assign.buildCoreName(ocmh.overseer.getSolrCloudManager().getDistribStateManager(), collection, sliceName, position.type);
|
||||
log.info("Creating replica " + coreName + " as part of slice " + sliceName + " of collection " + collectionName
|
||||
+ " on " + nodeName);
|
||||
|
||||
|
@ -173,8 +173,8 @@ public class CreateShardCmd implements Cmd {
|
|||
}
|
||||
|
||||
static boolean usePolicyFramework(DocCollection collection, OverseerCollectionMessageHandler ocmh)
|
||||
throws KeeperException, InterruptedException {
|
||||
Map autoScalingJson = Utils.getJson(ocmh.zkStateReader.getZkClient(), SOLR_AUTOSCALING_CONF_PATH, true);
|
||||
return autoScalingJson.get(Policy.CLUSTER_POLICY) != null || collection.getPolicyName() != null;
|
||||
throws IOException, InterruptedException {
|
||||
AutoScalingConfig autoScalingConfig = ocmh.overseer.getSolrCloudManager().getDistribStateManager().getAutoScalingConfig();
|
||||
return !autoScalingConfig.getPolicy().getClusterPolicy().isEmpty() || collection.getPolicyName() != null;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ import java.util.Map;
|
|||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.solr.common.cloud.DistributedQueue;
|
||||
import org.apache.solr.client.solrj.cloud.DistributedQueue;
|
||||
import org.apache.solr.cloud.OverseerCollectionMessageHandler.Cmd;
|
||||
import org.apache.solr.cloud.overseer.OverseerAction;
|
||||
import org.apache.solr.common.SolrException;
|
||||
|
|
|
@ -233,7 +233,8 @@ class ShardLeaderElectionContextBase extends ElectionContext {
|
|||
ZkStateReader.SHARD_ID_PROP, shardId,
|
||||
ZkStateReader.COLLECTION_PROP, collection,
|
||||
ZkStateReader.BASE_URL_PROP, leaderProps.get(ZkStateReader.BASE_URL_PROP),
|
||||
ZkStateReader.CORE_NAME_PROP, leaderProps.get(ZkStateReader.CORE_NAME_PROP));
|
||||
ZkStateReader.CORE_NAME_PROP, leaderProps.get(ZkStateReader.CORE_NAME_PROP),
|
||||
ZkStateReader.STATE_PROP, Replica.State.ACTIVE.toString());
|
||||
Overseer.getStateUpdateQueue(zkClient).offer(Utils.toJSON(m));
|
||||
}
|
||||
}
|
||||
|
@ -508,7 +509,7 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
|
|||
}
|
||||
}
|
||||
|
||||
public void publishActiveIfRegisteredAndNotActive(SolrCore core) throws KeeperException, InterruptedException {
|
||||
public void publishActiveIfRegisteredAndNotActive(SolrCore core) throws Exception {
|
||||
if (core.getCoreDescriptor().getCloudDescriptor().hasRegistered()) {
|
||||
ZkStateReader zkStateReader = zkController.getZkStateReader();
|
||||
zkStateReader.forceUpdateCollection(collection);
|
||||
|
|
|
@ -257,7 +257,7 @@ public class MigrateCmd implements OverseerCollectionMessageHandler.Cmd {
|
|||
|
||||
log.info("Creating a replica of temporary collection: {} on the target leader node: {}",
|
||||
tempSourceCollectionName, targetLeader.getNodeName());
|
||||
String tempCollectionReplica2 = Assign.buildCoreName(ocmh.zkStateReader.getZkClient(),
|
||||
String tempCollectionReplica2 = Assign.buildCoreName(ocmh.overseer.getSolrCloudManager().getDistribStateManager(),
|
||||
zkStateReader.getClusterState().getCollection(tempSourceCollectionName), tempSourceSlice.getName(), Replica.Type.NRT);
|
||||
props = new HashMap<>();
|
||||
props.put(Overseer.QUEUE_OPERATION, ADDREPLICA.toLower());
|
||||
|
|
|
@ -184,7 +184,7 @@ public class MoveReplicaCmd implements Cmd{
|
|||
|
||||
private void moveNormalReplica(ClusterState clusterState, NamedList results, String targetNode, String async,
|
||||
DocCollection coll, Replica replica, Slice slice, int timeout) throws Exception {
|
||||
String newCoreName = Assign.buildCoreName(ocmh.zkStateReader.getZkClient(), coll, slice.getName(), replica.getType());
|
||||
String newCoreName = Assign.buildCoreName(ocmh.overseer.getSolrCloudManager().getDistribStateManager(), coll, slice.getName(), replica.getType());
|
||||
ZkNodeProps addReplicasProps = new ZkNodeProps(
|
||||
COLLECTION_PROP, coll.getName(),
|
||||
SHARD_ID_PROP, slice.getName(),
|
||||
|
|
|
@ -19,19 +19,16 @@ package org.apache.solr.cloud;
|
|||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import com.codahale.metrics.Timer;
|
||||
import org.apache.solr.client.solrj.SolrResponse;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
|
||||
import org.apache.solr.client.solrj.impl.ClusterStateProvider;
|
||||
import org.apache.solr.cloud.autoscaling.OverseerTriggerThread;
|
||||
import org.apache.solr.cloud.overseer.ClusterStateMutator;
|
||||
import org.apache.solr.cloud.overseer.CollectionMutator;
|
||||
|
@ -51,6 +48,7 @@ import org.apache.solr.common.util.ObjectReleaseTracker;
|
|||
import org.apache.solr.common.util.Pair;
|
||||
import org.apache.solr.common.util.Utils;
|
||||
import org.apache.solr.core.CloudConfig;
|
||||
import org.apache.solr.core.CoreContainer;
|
||||
import org.apache.solr.handler.admin.CollectionsHandler;
|
||||
import org.apache.solr.handler.component.ShardHandler;
|
||||
import org.apache.solr.update.UpdateShardHandler;
|
||||
|
@ -115,11 +113,11 @@ public class Overseer implements Closeable {
|
|||
}
|
||||
|
||||
public Stats getStateUpdateQueueStats() {
|
||||
return stateUpdateQueue.getStats();
|
||||
return stateUpdateQueue.getZkStats();
|
||||
}
|
||||
|
||||
public Stats getWorkQueueStats() {
|
||||
return workQueue.getStats();
|
||||
return workQueue.getZkStats();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -164,7 +162,7 @@ public class Overseer implements Closeable {
|
|||
byte[] data = fallbackQueue.peek();
|
||||
while (fallbackQueueSize > 0 && data != null) {
|
||||
final ZkNodeProps message = ZkNodeProps.load(data);
|
||||
log.debug("processMessage: fallbackQueueSize: {}, message = {}", fallbackQueue.getStats().getQueueLength(), message);
|
||||
log.debug("processMessage: fallbackQueueSize: {}, message = {}", fallbackQueue.getZkStats().getQueueLength(), message);
|
||||
// force flush to ZK after each message because there is no fallback if workQueue items
|
||||
// are removed from workQueue but fail to be written to ZK
|
||||
try {
|
||||
|
@ -217,7 +215,7 @@ public class Overseer implements Closeable {
|
|||
for (Pair<String, byte[]> head : queue) {
|
||||
byte[] data = head.second();
|
||||
final ZkNodeProps message = ZkNodeProps.load(data);
|
||||
log.debug("processMessage: queueSize: {}, message = {} current state version: {}", stateUpdateQueue.getStats().getQueueLength(), message, clusterState.getZkClusterStateVersion());
|
||||
log.debug("processMessage: queueSize: {}, message = {} current state version: {}", stateUpdateQueue.getZkStats().getQueueLength(), message, clusterState.getZkClusterStateVersion());
|
||||
|
||||
processedNodes.add(head.first());
|
||||
fallbackQueueSize = processedNodes.size();
|
||||
|
@ -279,7 +277,7 @@ public class Overseer implements Closeable {
|
|||
// ZooKeeper in which case another Overseer should take over
|
||||
// TODO: if ordering for the message is not important, we could
|
||||
// track retries and put it back on the end of the queue
|
||||
log.error("Overseer could not process the current clusterstate state update message, skipping the message.", e);
|
||||
log.error("Overseer could not process the current clusterstate state update message, skipping the message: " + message, e);
|
||||
stats.error(operation);
|
||||
} finally {
|
||||
timerContext.stop();
|
||||
|
@ -338,19 +336,19 @@ public class Overseer implements Closeable {
|
|||
if (collectionAction != null) {
|
||||
switch (collectionAction) {
|
||||
case CREATE:
|
||||
return Collections.singletonList(new ClusterStateMutator(getZkStateReader()).createCollection(clusterState, message));
|
||||
return Collections.singletonList(new ClusterStateMutator(getSolrCloudManager()).createCollection(clusterState, message));
|
||||
case DELETE:
|
||||
return Collections.singletonList(new ClusterStateMutator(getZkStateReader()).deleteCollection(clusterState, message));
|
||||
return Collections.singletonList(new ClusterStateMutator(getSolrCloudManager()).deleteCollection(clusterState, message));
|
||||
case CREATESHARD:
|
||||
return Collections.singletonList(new CollectionMutator(getZkStateReader()).createShard(clusterState, message));
|
||||
return Collections.singletonList(new CollectionMutator(getSolrCloudManager()).createShard(clusterState, message));
|
||||
case DELETESHARD:
|
||||
return Collections.singletonList(new CollectionMutator(getZkStateReader()).deleteShard(clusterState, message));
|
||||
return Collections.singletonList(new CollectionMutator(getSolrCloudManager()).deleteShard(clusterState, message));
|
||||
case ADDREPLICA:
|
||||
return Collections.singletonList(new SliceMutator(getZkStateReader()).addReplica(clusterState, message));
|
||||
return Collections.singletonList(new SliceMutator(getSolrCloudManager()).addReplica(clusterState, message));
|
||||
case ADDREPLICAPROP:
|
||||
return Collections.singletonList(new ReplicaMutator(getZkStateReader()).addReplicaProperty(clusterState, message));
|
||||
return Collections.singletonList(new ReplicaMutator(getSolrCloudManager()).addReplicaProperty(clusterState, message));
|
||||
case DELETEREPLICAPROP:
|
||||
return Collections.singletonList(new ReplicaMutator(getZkStateReader()).deleteReplicaProperty(clusterState, message));
|
||||
return Collections.singletonList(new ReplicaMutator(getSolrCloudManager()).deleteReplicaProperty(clusterState, message));
|
||||
case BALANCESHARDUNIQUE:
|
||||
ExclusiveSliceProperty dProp = new ExclusiveSliceProperty(clusterState, message);
|
||||
if (dProp.balanceProperty()) {
|
||||
|
@ -360,9 +358,9 @@ public class Overseer implements Closeable {
|
|||
break;
|
||||
case MODIFYCOLLECTION:
|
||||
CollectionsHandler.verifyRuleParams(zkController.getCoreContainer() ,message.getProperties());
|
||||
return Collections.singletonList(new CollectionMutator(reader).modifyCollection(clusterState,message));
|
||||
return Collections.singletonList(new CollectionMutator(getSolrCloudManager()).modifyCollection(clusterState,message));
|
||||
case MIGRATESTATEFORMAT:
|
||||
return Collections.singletonList(new ClusterStateMutator(reader).migrateStateFormat(clusterState, message));
|
||||
return Collections.singletonList(new ClusterStateMutator(getSolrCloudManager()).migrateStateFormat(clusterState, message));
|
||||
default:
|
||||
throw new RuntimeException("unknown operation:" + operation
|
||||
+ " contents:" + message.getProperties());
|
||||
|
@ -374,17 +372,17 @@ public class Overseer implements Closeable {
|
|||
}
|
||||
switch (overseerAction) {
|
||||
case STATE:
|
||||
return Collections.singletonList(new ReplicaMutator(getZkStateReader()).setState(clusterState, message));
|
||||
return Collections.singletonList(new ReplicaMutator(getSolrCloudManager()).setState(clusterState, message));
|
||||
case LEADER:
|
||||
return Collections.singletonList(new SliceMutator(getZkStateReader()).setShardLeader(clusterState, message));
|
||||
return Collections.singletonList(new SliceMutator(getSolrCloudManager()).setShardLeader(clusterState, message));
|
||||
case DELETECORE:
|
||||
return Collections.singletonList(new SliceMutator(getZkStateReader()).removeReplica(clusterState, message));
|
||||
return Collections.singletonList(new SliceMutator(getSolrCloudManager()).removeReplica(clusterState, message));
|
||||
case ADDROUTINGRULE:
|
||||
return Collections.singletonList(new SliceMutator(getZkStateReader()).addRoutingRule(clusterState, message));
|
||||
return Collections.singletonList(new SliceMutator(getSolrCloudManager()).addRoutingRule(clusterState, message));
|
||||
case REMOVEROUTINGRULE:
|
||||
return Collections.singletonList(new SliceMutator(getZkStateReader()).removeRoutingRule(clusterState, message));
|
||||
return Collections.singletonList(new SliceMutator(getSolrCloudManager()).removeRoutingRule(clusterState, message));
|
||||
case UPDATESHARDSTATE:
|
||||
return Collections.singletonList(new SliceMutator(getZkStateReader()).updateShardState(clusterState, message));
|
||||
return Collections.singletonList(new SliceMutator(getSolrCloudManager()).updateShardState(clusterState, message));
|
||||
case QUIT:
|
||||
if (myId.equals(message.get(ID))) {
|
||||
log.info("Quit command received {} {}", message, LeaderElector.getNodeName(myId));
|
||||
|
@ -527,7 +525,8 @@ public class Overseer implements Closeable {
|
|||
ccThread.setDaemon(true);
|
||||
|
||||
ThreadGroup triggerThreadGroup = new ThreadGroup("Overseer autoscaling triggers");
|
||||
OverseerTriggerThread trigger = new OverseerTriggerThread(zkController, config);
|
||||
OverseerTriggerThread trigger = new OverseerTriggerThread(zkController.getCoreContainer().getResourceLoader(),
|
||||
zkController.getSolrCloudManager(), config);
|
||||
triggerThread = new OverseerThread(triggerThreadGroup, trigger, "OverseerAutoScalingTriggerThread-" + id);
|
||||
|
||||
updaterThread.start();
|
||||
|
@ -543,7 +542,15 @@ public class Overseer implements Closeable {
|
|||
ZkController getZkController(){
|
||||
return zkController;
|
||||
}
|
||||
|
||||
|
||||
public CoreContainer getCoreContainer() {
|
||||
return zkController.getCoreContainer();
|
||||
}
|
||||
|
||||
public SolrCloudManager getSolrCloudManager() {
|
||||
return zkController.getSolrCloudManager();
|
||||
}
|
||||
|
||||
/**
|
||||
* For tests.
|
||||
*
|
||||
|
@ -624,7 +631,7 @@ public class Overseer implements Closeable {
|
|||
* This method will create the /overseer znode in ZooKeeper if it does not exist already.
|
||||
*
|
||||
* @param zkClient the {@link SolrZkClient} to be used for reading/writing to the queue
|
||||
* @param zkStats a {@link Overseer.Stats} object which tracks statistics for all zookeeper operations performed by this queue
|
||||
* @param zkStats a {@link Stats} object which tracks statistics for all zookeeper operations performed by this queue
|
||||
* @return a {@link ZkDistributedQueue} object
|
||||
*/
|
||||
static ZkDistributedQueue getStateUpdateQueue(final SolrZkClient zkClient, Stats zkStats) {
|
||||
|
@ -644,7 +651,7 @@ public class Overseer implements Closeable {
|
|||
* This method will create the /overseer znode in ZooKeeper if it does not exist already.
|
||||
*
|
||||
* @param zkClient the {@link SolrZkClient} to be used for reading/writing to the queue
|
||||
* @param zkStats a {@link Overseer.Stats} object which tracks statistics for all zookeeper operations performed by this queue
|
||||
* @param zkStats a {@link Stats} object which tracks statistics for all zookeeper operations performed by this queue
|
||||
* @return a {@link ZkDistributedQueue} object
|
||||
*/
|
||||
static ZkDistributedQueue getInternalWorkQueue(final SolrZkClient zkClient, Stats zkStats) {
|
||||
|
@ -772,126 +779,13 @@ public class Overseer implements Closeable {
|
|||
return "true".equals(legacyProperty);
|
||||
}
|
||||
|
||||
public static boolean isLegacy(ClusterStateProvider clusterStateProvider) {
|
||||
String legacyProperty = clusterStateProvider.getClusterProperty(ZkStateReader.LEGACY_CLOUD, "false");
|
||||
return "true".equals(legacyProperty);
|
||||
}
|
||||
|
||||
public ZkStateReader getZkStateReader() {
|
||||
return reader;
|
||||
}
|
||||
|
||||
/**
|
||||
* Used to hold statistics about overseer operations. It will be exposed
|
||||
* to the OverseerCollectionProcessor to return statistics.
|
||||
*
|
||||
* This is experimental API and subject to change.
|
||||
*/
|
||||
public static class Stats {
|
||||
static final int MAX_STORED_FAILURES = 10;
|
||||
|
||||
final Map<String, Stat> stats = new ConcurrentHashMap<>();
|
||||
private volatile int queueLength;
|
||||
|
||||
public Map<String, Stat> getStats() {
|
||||
return stats;
|
||||
}
|
||||
|
||||
public int getSuccessCount(String operation) {
|
||||
Stat stat = stats.get(operation.toLowerCase(Locale.ROOT));
|
||||
return stat == null ? 0 : stat.success.get();
|
||||
}
|
||||
|
||||
public int getErrorCount(String operation) {
|
||||
Stat stat = stats.get(operation.toLowerCase(Locale.ROOT));
|
||||
return stat == null ? 0 : stat.errors.get();
|
||||
}
|
||||
|
||||
public void success(String operation) {
|
||||
String op = operation.toLowerCase(Locale.ROOT);
|
||||
Stat stat = stats.get(op);
|
||||
if (stat == null) {
|
||||
stat = new Stat();
|
||||
stats.put(op, stat);
|
||||
}
|
||||
stat.success.incrementAndGet();
|
||||
}
|
||||
|
||||
public void error(String operation) {
|
||||
String op = operation.toLowerCase(Locale.ROOT);
|
||||
Stat stat = stats.get(op);
|
||||
if (stat == null) {
|
||||
stat = new Stat();
|
||||
stats.put(op, stat);
|
||||
}
|
||||
stat.errors.incrementAndGet();
|
||||
}
|
||||
|
||||
public Timer.Context time(String operation) {
|
||||
String op = operation.toLowerCase(Locale.ROOT);
|
||||
Stat stat = stats.get(op);
|
||||
if (stat == null) {
|
||||
stat = new Stat();
|
||||
stats.put(op, stat);
|
||||
}
|
||||
return stat.requestTime.time();
|
||||
}
|
||||
|
||||
public void storeFailureDetails(String operation, ZkNodeProps request, SolrResponse resp) {
|
||||
String op = operation.toLowerCase(Locale.ROOT);
|
||||
Stat stat = stats.get(op);
|
||||
if (stat == null) {
|
||||
stat = new Stat();
|
||||
stats.put(op, stat);
|
||||
}
|
||||
LinkedList<FailedOp> failedOps = stat.failureDetails;
|
||||
synchronized (failedOps) {
|
||||
if (failedOps.size() >= MAX_STORED_FAILURES) {
|
||||
failedOps.removeFirst();
|
||||
}
|
||||
failedOps.addLast(new FailedOp(request, resp));
|
||||
}
|
||||
}
|
||||
|
||||
public List<FailedOp> getFailureDetails(String operation) {
|
||||
Stat stat = stats.get(operation.toLowerCase(Locale.ROOT));
|
||||
if (stat == null || stat.failureDetails.isEmpty()) return null;
|
||||
LinkedList<FailedOp> failedOps = stat.failureDetails;
|
||||
synchronized (failedOps) {
|
||||
ArrayList<FailedOp> ret = new ArrayList<>(failedOps);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
public int getQueueLength() {
|
||||
return queueLength;
|
||||
}
|
||||
|
||||
public void setQueueLength(int queueLength) {
|
||||
this.queueLength = queueLength;
|
||||
}
|
||||
|
||||
public void clear() {
|
||||
stats.clear();
|
||||
}
|
||||
}
|
||||
|
||||
public static class Stat {
|
||||
public final AtomicInteger success;
|
||||
public final AtomicInteger errors;
|
||||
public final Timer requestTime;
|
||||
public final LinkedList<FailedOp> failureDetails;
|
||||
|
||||
public Stat() {
|
||||
this.success = new AtomicInteger();
|
||||
this.errors = new AtomicInteger();
|
||||
this.requestTime = new Timer();
|
||||
this.failureDetails = new LinkedList<>();
|
||||
}
|
||||
}
|
||||
|
||||
public static class FailedOp {
|
||||
public final ZkNodeProps req;
|
||||
public final SolrResponse resp;
|
||||
|
||||
public FailedOp(ZkNodeProps req, SolrResponse resp) {
|
||||
this.req = req;
|
||||
this.resp = resp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,9 +34,9 @@ import static org.apache.solr.cloud.OverseerConfigSetMessageHandler.CONFIGSETS_A
|
|||
public class OverseerCollectionConfigSetProcessor extends OverseerTaskProcessor {
|
||||
|
||||
public OverseerCollectionConfigSetProcessor(ZkStateReader zkStateReader, String myId,
|
||||
final ShardHandler shardHandler,
|
||||
String adminPath, Overseer.Stats stats, Overseer overseer,
|
||||
OverseerNodePrioritizer overseerNodePrioritizer) {
|
||||
final ShardHandler shardHandler,
|
||||
String adminPath, Stats stats, Overseer overseer,
|
||||
OverseerNodePrioritizer overseerNodePrioritizer) {
|
||||
this(
|
||||
zkStateReader,
|
||||
myId,
|
||||
|
@ -55,7 +55,7 @@ public class OverseerCollectionConfigSetProcessor extends OverseerTaskProcessor
|
|||
protected OverseerCollectionConfigSetProcessor(ZkStateReader zkStateReader, String myId,
|
||||
final ShardHandlerFactory shardHandlerFactory,
|
||||
String adminPath,
|
||||
Overseer.Stats stats,
|
||||
Stats stats,
|
||||
Overseer overseer,
|
||||
OverseerNodePrioritizer overseerNodePrioritizer,
|
||||
OverseerTaskQueue workQueue,
|
||||
|
@ -80,7 +80,7 @@ public class OverseerCollectionConfigSetProcessor extends OverseerTaskProcessor
|
|||
String myId,
|
||||
final ShardHandlerFactory shardHandlerFactory,
|
||||
String adminPath,
|
||||
Overseer.Stats stats,
|
||||
Stats stats,
|
||||
Overseer overseer,
|
||||
OverseerNodePrioritizer overseerNodePrioritizer) {
|
||||
final OverseerCollectionMessageHandler collMessageHandler = new OverseerCollectionMessageHandler(
|
||||
|
|
|
@ -36,8 +36,8 @@ import com.google.common.collect.ImmutableMap;
|
|||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.solr.client.solrj.SolrResponse;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.cloud.DistributedQueue;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
|
||||
import org.apache.solr.common.cloud.DistributedQueue;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient.RemoteSolrException;
|
||||
import org.apache.solr.client.solrj.request.AbstractUpdateRequest;
|
||||
|
@ -145,7 +145,7 @@ public class OverseerCollectionMessageHandler implements OverseerMessageHandler
|
|||
String adminPath;
|
||||
ZkStateReader zkStateReader;
|
||||
String myId;
|
||||
Overseer.Stats stats;
|
||||
Stats stats;
|
||||
|
||||
// Set that tracks collections that are currently being processed by a running task.
|
||||
// This is used for handling mutual exclusion of the tasks.
|
||||
|
@ -172,7 +172,7 @@ public class OverseerCollectionMessageHandler implements OverseerMessageHandler
|
|||
public OverseerCollectionMessageHandler(ZkStateReader zkStateReader, String myId,
|
||||
final ShardHandlerFactory shardHandlerFactory,
|
||||
String adminPath,
|
||||
Overseer.Stats stats,
|
||||
Stats stats,
|
||||
Overseer overseer,
|
||||
OverseerNodePrioritizer overseerPrioritizer) {
|
||||
this.zkStateReader = zkStateReader;
|
||||
|
@ -442,7 +442,7 @@ public class OverseerCollectionMessageHandler implements OverseerMessageHandler
|
|||
}
|
||||
}
|
||||
|
||||
void deleteCoreNode(String collectionName, String replicaName, Replica replica, String core) throws KeeperException, InterruptedException {
|
||||
void deleteCoreNode(String collectionName, String replicaName, Replica replica, String core) throws Exception {
|
||||
ZkNodeProps m = new ZkNodeProps(
|
||||
Overseer.QUEUE_OPERATION, OverseerAction.DELETECORE.toLower(),
|
||||
ZkStateReader.CORE_NAME_PROP, core,
|
||||
|
@ -463,8 +463,7 @@ public class OverseerCollectionMessageHandler implements OverseerMessageHandler
|
|||
}
|
||||
|
||||
//TODO should we not remove in the next release ?
|
||||
private void migrateStateFormat(ClusterState state, ZkNodeProps message, NamedList results)
|
||||
throws KeeperException, InterruptedException {
|
||||
private void migrateStateFormat(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
|
||||
final String collectionName = message.getStr(COLLECTION_PROP);
|
||||
|
||||
boolean firstLoop = true;
|
||||
|
@ -635,7 +634,7 @@ public class OverseerCollectionMessageHandler implements OverseerMessageHandler
|
|||
|
||||
|
||||
private void modifyCollection(ClusterState clusterState, ZkNodeProps message, NamedList results)
|
||||
throws KeeperException, InterruptedException {
|
||||
throws Exception {
|
||||
|
||||
final String collectionName = message.getStr(ZkStateReader.COLLECTION_PROP);
|
||||
//the rest of the processing is based on writing cluster state properties
|
||||
|
@ -713,7 +712,7 @@ public class OverseerCollectionMessageHandler implements OverseerMessageHandler
|
|||
}
|
||||
|
||||
ZkNodeProps addReplica(ClusterState clusterState, ZkNodeProps message, NamedList results, Runnable onComplete)
|
||||
throws KeeperException, InterruptedException {
|
||||
throws Exception {
|
||||
|
||||
return ((AddReplicaCmd) commandMap.get(ADDREPLICA)).addReplica(clusterState, message, results, onComplete);
|
||||
}
|
||||
|
|
|
@ -31,7 +31,6 @@ import org.apache.solr.common.util.Utils;
|
|||
import org.apache.solr.handler.component.ShardHandler;
|
||||
import org.apache.solr.handler.component.ShardHandlerFactory;
|
||||
import org.apache.solr.handler.component.ShardRequest;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
import org.apache.zookeeper.data.Stat;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
@ -56,7 +55,7 @@ public class OverseerNodePrioritizer {
|
|||
this.shardHandlerFactory = shardHandlerFactory;
|
||||
}
|
||||
|
||||
public synchronized void prioritizeOverseerNodes(String overseerId) throws KeeperException, InterruptedException {
|
||||
public synchronized void prioritizeOverseerNodes(String overseerId) throws Exception {
|
||||
SolrZkClient zk = zkStateReader.getZkClient();
|
||||
if(!zk.exists(ZkStateReader.ROLES,true))return;
|
||||
Map m = (Map) Utils.fromJSON(zk.getData(ZkStateReader.ROLES, null, new Stat(), true));
|
||||
|
|
|
@ -64,8 +64,8 @@ public class OverseerStatusCmd implements Cmd {
|
|||
NamedList stateUpdateQueueStats = new NamedList();
|
||||
NamedList workQueueStats = new NamedList();
|
||||
NamedList collectionQueueStats = new NamedList();
|
||||
Overseer.Stats stats = ocmh.stats;
|
||||
for (Map.Entry<String, Overseer.Stat> entry : stats.getStats().entrySet()) {
|
||||
Stats stats = ocmh.stats;
|
||||
for (Map.Entry<String, Stats.Stat> entry : stats.getStats().entrySet()) {
|
||||
String key = entry.getKey();
|
||||
NamedList<Object> lst = new SimpleOrderedMap<>();
|
||||
if (key.startsWith("collection_")) {
|
||||
|
@ -74,10 +74,10 @@ public class OverseerStatusCmd implements Cmd {
|
|||
int errors = stats.getErrorCount(entry.getKey());
|
||||
lst.add("requests", successes);
|
||||
lst.add("errors", errors);
|
||||
List<Overseer.FailedOp> failureDetails = stats.getFailureDetails(key);
|
||||
List<Stats.FailedOp> failureDetails = stats.getFailureDetails(key);
|
||||
if (failureDetails != null) {
|
||||
List<SimpleOrderedMap<Object>> failures = new ArrayList<>();
|
||||
for (Overseer.FailedOp failedOp : failureDetails) {
|
||||
for (Stats.FailedOp failedOp : failureDetails) {
|
||||
SimpleOrderedMap<Object> fail = new SimpleOrderedMap<>();
|
||||
fail.add("request", failedOp.req.getProperties());
|
||||
fail.add("response", failedOp.resp.getResponse());
|
||||
|
|
|
@ -91,7 +91,7 @@ public class OverseerTaskProcessor implements Runnable, Closeable {
|
|||
|
||||
private boolean isClosed;
|
||||
|
||||
private Overseer.Stats stats;
|
||||
private Stats stats;
|
||||
|
||||
// Set of tasks that have been picked up for processing but not cleaned up from zk work-queue.
|
||||
// It may contain tasks that have completed execution, have been entered into the completed/failed map in zk but not
|
||||
|
@ -122,7 +122,7 @@ public class OverseerTaskProcessor implements Runnable, Closeable {
|
|||
private OverseerNodePrioritizer prioritizer;
|
||||
|
||||
public OverseerTaskProcessor(ZkStateReader zkStateReader, String myId,
|
||||
Overseer.Stats stats,
|
||||
Stats stats,
|
||||
OverseerMessageHandlerSelector selector,
|
||||
OverseerNodePrioritizer prioritizer,
|
||||
OverseerTaskQueue workQueue,
|
||||
|
|
|
@ -45,10 +45,10 @@ public class OverseerTaskQueue extends ZkDistributedQueue {
|
|||
private static final String RESPONSE_PREFIX = "qnr-" ;
|
||||
|
||||
public OverseerTaskQueue(SolrZkClient zookeeper, String dir) {
|
||||
this(zookeeper, dir, new Overseer.Stats());
|
||||
this(zookeeper, dir, new Stats());
|
||||
}
|
||||
|
||||
public OverseerTaskQueue(SolrZkClient zookeeper, String dir, Overseer.Stats stats) {
|
||||
public OverseerTaskQueue(SolrZkClient zookeeper, String dir, Stats stats) {
|
||||
super(zookeeper, dir, stats);
|
||||
}
|
||||
|
||||
|
|
|
@ -64,7 +64,6 @@ import org.apache.solr.update.processor.DistributedUpdateProcessor;
|
|||
import org.apache.solr.util.RefCounted;
|
||||
import org.apache.solr.util.SolrPluginUtils;
|
||||
import org.apache.solr.util.plugin.NamedListInitializedPlugin;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -174,7 +173,7 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
|
||||
final private void recoveryFailed(final SolrCore core,
|
||||
final ZkController zkController, final String baseUrl,
|
||||
final String shardZkNodeName, final CoreDescriptor cd) throws KeeperException, InterruptedException {
|
||||
final String shardZkNodeName, final CoreDescriptor cd) throws Exception {
|
||||
SolrException.log(LOG, "Recovery failed - I give up.");
|
||||
try {
|
||||
zkController.publish(cd, Replica.State.RECOVERY_FAILED);
|
||||
|
@ -298,7 +297,7 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
final public void doRecovery(SolrCore core) throws KeeperException, InterruptedException {
|
||||
final public void doRecovery(SolrCore core) throws Exception {
|
||||
if (core.getCoreDescriptor().getCloudDescriptor().requiresTransactionLog()) {
|
||||
doSyncOrReplicateRecovery(core);
|
||||
} else {
|
||||
|
@ -441,7 +440,7 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
}
|
||||
|
||||
// TODO: perhaps make this grab a new core each time through the loop to handle core reloads?
|
||||
final public void doSyncOrReplicateRecovery(SolrCore core) throws KeeperException, InterruptedException {
|
||||
final public void doSyncOrReplicateRecovery(SolrCore core) throws Exception {
|
||||
boolean replayed = false;
|
||||
boolean successfulRecovery = false;
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ import java.util.Optional;
|
|||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.solr.common.cloud.DistributedQueue;
|
||||
import org.apache.solr.client.solrj.cloud.DistributedQueue;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
|
||||
import org.apache.solr.cloud.overseer.OverseerAction;
|
||||
import org.apache.solr.common.cloud.ReplicaPosition;
|
||||
|
|
|
@ -27,7 +27,7 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.solr.common.cloud.DistributedQueue;
|
||||
import org.apache.solr.client.solrj.cloud.DistributedQueue;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
|
||||
import org.apache.solr.client.solrj.request.CoreAdminRequest;
|
||||
import org.apache.solr.cloud.OverseerCollectionMessageHandler.Cmd;
|
||||
|
@ -207,7 +207,7 @@ public class SplitShardCmd implements Cmd {
|
|||
for (int i = 0; i < subRanges.size(); i++) {
|
||||
String subSlice = slice + "_" + i;
|
||||
subSlices.add(subSlice);
|
||||
String subShardName = Assign.buildCoreName(ocmh.zkStateReader.getZkClient(), collection, subSlice, Replica.Type.NRT);
|
||||
String subShardName = Assign.buildCoreName(ocmh.overseer.getSolrCloudManager().getDistribStateManager(), collection, subSlice, Replica.Type.NRT);
|
||||
subShardNames.add(subShardName);
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,147 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.solr.cloud;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import com.codahale.metrics.Timer;
|
||||
import org.apache.solr.client.solrj.SolrResponse;
|
||||
import org.apache.solr.common.cloud.ZkNodeProps;
|
||||
|
||||
/**
|
||||
* Used to hold statistics about some SolrCloud operations.
|
||||
*
|
||||
* This is experimental API and subject to change.
|
||||
*/
|
||||
public class Stats {
|
||||
static final int MAX_STORED_FAILURES = 10;
|
||||
|
||||
final Map<String, Stat> stats = new ConcurrentHashMap<>();
|
||||
private volatile int queueLength;
|
||||
|
||||
public Map<String, Stat> getStats() {
|
||||
return stats;
|
||||
}
|
||||
|
||||
public int getSuccessCount(String operation) {
|
||||
Stat stat = stats.get(operation.toLowerCase(Locale.ROOT));
|
||||
return stat == null ? 0 : stat.success.get();
|
||||
}
|
||||
|
||||
public int getErrorCount(String operation) {
|
||||
Stat stat = stats.get(operation.toLowerCase(Locale.ROOT));
|
||||
return stat == null ? 0 : stat.errors.get();
|
||||
}
|
||||
|
||||
public void success(String operation) {
|
||||
String op = operation.toLowerCase(Locale.ROOT);
|
||||
Stat stat = stats.get(op);
|
||||
if (stat == null) {
|
||||
stat = new Stat();
|
||||
stats.put(op, stat);
|
||||
}
|
||||
stat.success.incrementAndGet();
|
||||
}
|
||||
|
||||
public void error(String operation) {
|
||||
String op = operation.toLowerCase(Locale.ROOT);
|
||||
Stat stat = stats.get(op);
|
||||
if (stat == null) {
|
||||
stat = new Stat();
|
||||
stats.put(op, stat);
|
||||
}
|
||||
stat.errors.incrementAndGet();
|
||||
}
|
||||
|
||||
public Timer.Context time(String operation) {
|
||||
String op = operation.toLowerCase(Locale.ROOT);
|
||||
Stat stat = stats.get(op);
|
||||
if (stat == null) {
|
||||
stat = new Stat();
|
||||
stats.put(op, stat);
|
||||
}
|
||||
return stat.requestTime.time();
|
||||
}
|
||||
|
||||
public void storeFailureDetails(String operation, ZkNodeProps request, SolrResponse resp) {
|
||||
String op = operation.toLowerCase(Locale.ROOT);
|
||||
Stat stat = stats.get(op);
|
||||
if (stat == null) {
|
||||
stat = new Stat();
|
||||
stats.put(op, stat);
|
||||
}
|
||||
LinkedList<FailedOp> failedOps = stat.failureDetails;
|
||||
synchronized (failedOps) {
|
||||
if (failedOps.size() >= MAX_STORED_FAILURES) {
|
||||
failedOps.removeFirst();
|
||||
}
|
||||
failedOps.addLast(new FailedOp(request, resp));
|
||||
}
|
||||
}
|
||||
|
||||
public List<FailedOp> getFailureDetails(String operation) {
|
||||
Stat stat = stats.get(operation.toLowerCase(Locale.ROOT));
|
||||
if (stat == null || stat.failureDetails.isEmpty()) return null;
|
||||
LinkedList<FailedOp> failedOps = stat.failureDetails;
|
||||
synchronized (failedOps) {
|
||||
ArrayList<FailedOp> ret = new ArrayList<>(failedOps);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
public int getQueueLength() {
|
||||
return queueLength;
|
||||
}
|
||||
|
||||
public void setQueueLength(int queueLength) {
|
||||
this.queueLength = queueLength;
|
||||
}
|
||||
|
||||
public void clear() {
|
||||
stats.clear();
|
||||
}
|
||||
|
||||
public static class Stat {
|
||||
public final AtomicInteger success;
|
||||
public final AtomicInteger errors;
|
||||
public final Timer requestTime;
|
||||
public final LinkedList<FailedOp> failureDetails;
|
||||
|
||||
public Stat() {
|
||||
this.success = new AtomicInteger();
|
||||
this.errors = new AtomicInteger();
|
||||
this.requestTime = new Timer();
|
||||
this.failureDetails = new LinkedList<>();
|
||||
}
|
||||
}
|
||||
|
||||
public static class FailedOp {
|
||||
public final ZkNodeProps req;
|
||||
public final SolrResponse resp;
|
||||
|
||||
public FailedOp(ZkNodeProps req, SolrResponse resp) {
|
||||
this.req = req;
|
||||
this.resp = resp;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -51,8 +51,12 @@ import java.util.concurrent.atomic.AtomicReference;
|
|||
|
||||
import com.google.common.base.Strings;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient.Builder;
|
||||
import org.apache.solr.client.solrj.impl.SolrClientCloudManager;
|
||||
import org.apache.solr.client.solrj.impl.ZkClientClusterStateProvider;
|
||||
import org.apache.solr.client.solrj.request.CoreAdminRequest.WaitForState;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventType;
|
||||
import org.apache.solr.cloud.overseer.OverseerAction;
|
||||
|
@ -191,6 +195,8 @@ public class ZkController {
|
|||
private final SolrZkClient zkClient;
|
||||
private final ZkCmdExecutor cmdExecutor;
|
||||
public final ZkStateReader zkStateReader;
|
||||
private SolrCloudManager cloudManager;
|
||||
private CloudSolrClient cloudSolrClient;
|
||||
|
||||
private final String zkServerAddress; // example: 127.0.0.1:54062/solr
|
||||
|
||||
|
@ -435,7 +441,7 @@ public class ZkController {
|
|||
});
|
||||
|
||||
init(registerOnReconnect);
|
||||
|
||||
|
||||
assert ObjectReleaseTracker.track(this);
|
||||
}
|
||||
|
||||
|
@ -554,6 +560,12 @@ public class ZkController {
|
|||
IOUtils.closeQuietly(overseerElector.getContext());
|
||||
IOUtils.closeQuietly(overseer);
|
||||
} finally {
|
||||
if (cloudSolrClient != null) {
|
||||
IOUtils.closeQuietly(cloudSolrClient);
|
||||
}
|
||||
if (cloudManager != null) {
|
||||
IOUtils.closeQuietly(cloudManager);
|
||||
}
|
||||
try {
|
||||
try {
|
||||
zkStateReader.close();
|
||||
|
@ -588,6 +600,22 @@ public class ZkController {
|
|||
return zkStateReader.getClusterState();
|
||||
}
|
||||
|
||||
public SolrCloudManager getSolrCloudManager() {
|
||||
if (cloudManager != null) {
|
||||
return cloudManager;
|
||||
}
|
||||
synchronized(this) {
|
||||
if (cloudManager != null) {
|
||||
return cloudManager;
|
||||
}
|
||||
cloudSolrClient = new CloudSolrClient.Builder()
|
||||
.withClusterStateProvider(new ZkClientClusterStateProvider(zkStateReader))
|
||||
.build();
|
||||
cloudManager = new SolrClientCloudManager(new ZkDistributedQueueFactory(zkClient), cloudSolrClient);
|
||||
}
|
||||
return cloudManager;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns config file data (in bytes)
|
||||
*/
|
||||
|
@ -1290,18 +1318,18 @@ public class ZkController {
|
|||
return baseURL;
|
||||
}
|
||||
|
||||
public void publish(final CoreDescriptor cd, final Replica.State state) throws KeeperException, InterruptedException {
|
||||
public void publish(final CoreDescriptor cd, final Replica.State state) throws Exception {
|
||||
publish(cd, state, true);
|
||||
}
|
||||
|
||||
public void publish(final CoreDescriptor cd, final Replica.State state, boolean updateLastState) throws KeeperException, InterruptedException {
|
||||
public void publish(final CoreDescriptor cd, final Replica.State state, boolean updateLastState) throws Exception {
|
||||
publish(cd, state, updateLastState, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Publish core state to overseer.
|
||||
*/
|
||||
public void publish(final CoreDescriptor cd, final Replica.State state, boolean updateLastState, boolean forcePublish) throws KeeperException, InterruptedException {
|
||||
public void publish(final CoreDescriptor cd, final Replica.State state, boolean updateLastState, boolean forcePublish) throws Exception {
|
||||
if (!forcePublish) {
|
||||
try (SolrCore core = cc.getCore(cd.getName())) {
|
||||
if (core == null || core.isClosed()) {
|
||||
|
@ -1410,7 +1438,7 @@ public class ZkController {
|
|||
return true;
|
||||
}
|
||||
|
||||
public void unregister(String coreName, CoreDescriptor cd) throws InterruptedException, KeeperException {
|
||||
public void unregister(String coreName, CoreDescriptor cd) throws Exception {
|
||||
final String coreNodeName = cd.getCloudDescriptor().getCoreNodeName();
|
||||
final String collection = cd.getCloudDescriptor().getCollectionName();
|
||||
|
||||
|
@ -1441,8 +1469,7 @@ public class ZkController {
|
|||
overseerJobQueue.offer(Utils.toJSON(m));
|
||||
}
|
||||
|
||||
public void createCollection(String collection) throws KeeperException,
|
||||
InterruptedException {
|
||||
public void createCollection(String collection) throws Exception {
|
||||
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION,
|
||||
CollectionParams.CollectionAction.CREATE.toLower(), ZkStateReader.NODE_NAME_PROP, getNodeName(),
|
||||
ZkStateReader.COLLECTION_PROP, collection);
|
||||
|
@ -1570,6 +1597,9 @@ public class ZkController {
|
|||
Thread.currentThread().interrupt();
|
||||
log.error("", e);
|
||||
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
|
||||
} catch (Exception e) {
|
||||
log.error("", e);
|
||||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "", e);
|
||||
}
|
||||
|
||||
doGetShardIdAndNodeNameProcess(cd);
|
||||
|
|
|
@ -19,7 +19,10 @@ package org.apache.solr.cloud;
|
|||
import java.lang.invoke.MethodHandles;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.NoSuchElementException;
|
||||
import java.util.TreeSet;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
@ -31,9 +34,9 @@ import java.util.function.Predicate;
|
|||
import com.codahale.metrics.Timer;
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.solr.client.solrj.cloud.DistributedQueue;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.SolrException.ErrorCode;
|
||||
import org.apache.solr.common.cloud.DistributedQueue;
|
||||
import org.apache.solr.common.cloud.SolrZkClient;
|
||||
import org.apache.solr.common.cloud.ZkCmdExecutor;
|
||||
import org.apache.solr.common.util.Pair;
|
||||
|
@ -47,7 +50,7 @@ import org.slf4j.Logger;
|
|||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* A distributed queue. Optimized for single-consumer,
|
||||
* A ZK-based distributed queue. Optimized for single-consumer,
|
||||
* multiple-producer: if there are multiple consumers on the same ZK queue,
|
||||
* the results should be correct but inefficient
|
||||
*/
|
||||
|
@ -71,7 +74,7 @@ public class ZkDistributedQueue implements DistributedQueue {
|
|||
|
||||
final SolrZkClient zookeeper;
|
||||
|
||||
final Overseer.Stats stats;
|
||||
final Stats stats;
|
||||
|
||||
/**
|
||||
* A lock that guards all of the mutable state that follows.
|
||||
|
@ -104,14 +107,14 @@ public class ZkDistributedQueue implements DistributedQueue {
|
|||
private final AtomicInteger offerPermits = new AtomicInteger(0);
|
||||
|
||||
public ZkDistributedQueue(SolrZkClient zookeeper, String dir) {
|
||||
this(zookeeper, dir, new Overseer.Stats());
|
||||
this(zookeeper, dir, new Stats());
|
||||
}
|
||||
|
||||
public ZkDistributedQueue(SolrZkClient zookeeper, String dir, Overseer.Stats stats) {
|
||||
public ZkDistributedQueue(SolrZkClient zookeeper, String dir, Stats stats) {
|
||||
this(zookeeper, dir, stats, 0);
|
||||
}
|
||||
|
||||
public ZkDistributedQueue(SolrZkClient zookeeper, String dir, Overseer.Stats stats, int maxQueueSize) {
|
||||
public ZkDistributedQueue(SolrZkClient zookeeper, String dir, Stats stats, int maxQueueSize) {
|
||||
this.dir = dir;
|
||||
|
||||
ZkCmdExecutor cmdExecutor = new ZkCmdExecutor(zookeeper.getZkClientTimeout());
|
||||
|
@ -331,10 +334,35 @@ public class ZkDistributedQueue implements DistributedQueue {
|
|||
}
|
||||
}
|
||||
|
||||
public Overseer.Stats getStats() {
|
||||
public Stats getZkStats() {
|
||||
return stats;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Object> getStats() {
|
||||
if (stats == null) {
|
||||
return Collections.emptyMap();
|
||||
}
|
||||
Map<String, Object> res = new HashMap<>();
|
||||
res.put("queueLength", stats.getQueueLength());
|
||||
final Map<String, Object> statsMap = new HashMap<>();
|
||||
res.put("stats", statsMap);
|
||||
stats.getStats().forEach((op, stat) -> {
|
||||
final Map<String, Object> statMap = new HashMap<>();
|
||||
statMap.put("success", stat.success.get());
|
||||
statMap.put("errors", stat.errors.get());
|
||||
final List<Map<String, Object>> failed = new ArrayList<>(stat.failureDetails.size());
|
||||
statMap.put("failureDetails", failed);
|
||||
stat.failureDetails.forEach(failedOp -> {
|
||||
Map<String, Object> fo = new HashMap<>();
|
||||
fo.put("req", failedOp.req);
|
||||
fo.put("resp", failedOp.resp);
|
||||
});
|
||||
statsMap.put(op, statMap);
|
||||
});
|
||||
return res;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the name if the first known child node, or {@code null} if the queue is empty.
|
||||
* This is the only place {@link #knownChildren} is ever updated!
|
||||
|
@ -400,7 +428,8 @@ public class ZkDistributedQueue implements DistributedQueue {
|
|||
/**
|
||||
* Return the currently-known set of elements, using child names from memory. If no children are found, or no
|
||||
* children pass {@code acceptFilter}, waits up to {@code waitMillis} for at least one child to become available.
|
||||
* Package-private to support {@link OverseerTaskQueue} specifically.
|
||||
* <p>
|
||||
* Package-private to support {@link OverseerTaskQueue} specifically.</p>
|
||||
*/
|
||||
@Override
|
||||
public Collection<Pair<String, byte[]>> peekElements(int max, long waitMillis, Predicate<String> acceptFilter) throws KeeperException, InterruptedException {
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.solr.cloud;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.solr.client.solrj.cloud.DistributedQueue;
|
||||
import org.apache.solr.client.solrj.cloud.DistributedQueueFactory;
|
||||
import org.apache.solr.common.cloud.SolrZkClient;
|
||||
|
||||
/**
|
||||
* Implementation of {@link DistributedQueueFactory} that uses ZooKeeper.
|
||||
*/
|
||||
public class ZkDistributedQueueFactory implements DistributedQueueFactory {
|
||||
private final SolrZkClient zkClient;
|
||||
|
||||
public ZkDistributedQueueFactory(SolrZkClient zkClient) {
|
||||
this.zkClient = zkClient;
|
||||
}
|
||||
@Override
|
||||
public DistributedQueue makeQueue(String path) throws IOException {
|
||||
return new ZkDistributedQueue(zkClient, path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeQueue(String path) throws IOException {
|
||||
|
||||
}
|
||||
}
|
|
@ -20,8 +20,8 @@ package org.apache.solr.cloud.autoscaling;
|
|||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
|
||||
import org.apache.solr.common.MapWriter;
|
||||
import org.apache.solr.core.CoreContainer;
|
||||
|
||||
/**
|
||||
* Provides additional context for the TriggerAction such as the trigger instance on
|
||||
|
@ -30,18 +30,18 @@ import org.apache.solr.core.CoreContainer;
|
|||
*/
|
||||
public class ActionContext implements MapWriter {
|
||||
|
||||
private final CoreContainer coreContainer;
|
||||
private final SolrCloudManager cloudManager;
|
||||
private final AutoScaling.Trigger source;
|
||||
private final Map<String, Object> properties;
|
||||
|
||||
public ActionContext(CoreContainer coreContainer, AutoScaling.Trigger source, Map<String, Object> properties) {
|
||||
this.coreContainer = coreContainer;
|
||||
public ActionContext(SolrCloudManager cloudManager, AutoScaling.Trigger source, Map<String, Object> properties) {
|
||||
this.cloudManager = cloudManager;
|
||||
this.source = source;
|
||||
this.properties = properties;
|
||||
}
|
||||
|
||||
public CoreContainer getCoreContainer() {
|
||||
return coreContainer;
|
||||
public SolrCloudManager getCloudManager() {
|
||||
return cloudManager;
|
||||
}
|
||||
|
||||
public AutoScaling.Trigger getSource() {
|
||||
|
|
|
@ -18,8 +18,13 @@
|
|||
package org.apache.solr.cloud.autoscaling;
|
||||
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.NoneSuggester;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
|
||||
import org.apache.solr.client.solrj.impl.ClusterStateProvider;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.cloud.ClusterState;
|
||||
import org.apache.solr.common.cloud.DocCollection;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
|
@ -27,15 +32,21 @@ import org.apache.solr.common.cloud.ZkStateReader;
|
|||
public class AutoAddReplicasPlanAction extends ComputePlanAction {
|
||||
|
||||
@Override
|
||||
protected Policy.Suggester getSuggester(Policy.Session session, TriggerEvent event, ZkStateReader zkStateReader) {
|
||||
protected Policy.Suggester getSuggester(Policy.Session session, TriggerEvent event, SolrCloudManager cloudManager) {
|
||||
// for backward compatibility
|
||||
String autoAddReplicas = zkStateReader.getClusterProperty(ZkStateReader.AUTO_ADD_REPLICAS, (String) null);
|
||||
ClusterStateProvider stateProvider = cloudManager.getClusterStateProvider();
|
||||
String autoAddReplicas = stateProvider.getClusterProperty(ZkStateReader.AUTO_ADD_REPLICAS, (String) null);
|
||||
if (autoAddReplicas != null && autoAddReplicas.equals("false")) {
|
||||
return new NoneSuggester();
|
||||
}
|
||||
|
||||
Policy.Suggester suggester = super.getSuggester(session, event, zkStateReader);
|
||||
ClusterState clusterState = zkStateReader.getClusterState();
|
||||
Policy.Suggester suggester = super.getSuggester(session, event, cloudManager);
|
||||
ClusterState clusterState;
|
||||
try {
|
||||
clusterState = stateProvider.getClusterState();
|
||||
} catch (IOException e) {
|
||||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Exception getting cluster state", e);
|
||||
}
|
||||
|
||||
boolean anyCollections = false;
|
||||
for (DocCollection collection: clusterState.getCollectionsMap().values()) {
|
||||
|
|
|
@ -24,10 +24,10 @@ import java.util.Map;
|
|||
import java.util.Objects;
|
||||
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventType;
|
||||
import org.apache.solr.common.util.Utils;
|
||||
import org.apache.solr.core.CoreContainer;
|
||||
import org.apache.solr.cloud.ZkController;
|
||||
import org.apache.solr.core.SolrResourceLoader;
|
||||
|
||||
public class AutoScaling {
|
||||
|
||||
|
@ -112,33 +112,13 @@ public class AutoScaling {
|
|||
void init();
|
||||
}
|
||||
|
||||
public static class TriggerFactory implements Closeable {
|
||||
/**
|
||||
* Factory to produce instances of {@link Trigger}.
|
||||
*/
|
||||
public static abstract class TriggerFactory implements Closeable {
|
||||
protected boolean isClosed = false;
|
||||
|
||||
private final CoreContainer coreContainer;
|
||||
private final ZkController zkController;
|
||||
|
||||
private boolean isClosed = false;
|
||||
|
||||
public TriggerFactory(CoreContainer coreContainer, ZkController zkController) {
|
||||
Objects.requireNonNull(coreContainer);
|
||||
Objects.requireNonNull(zkController);
|
||||
this.coreContainer = coreContainer;
|
||||
this.zkController = zkController;
|
||||
}
|
||||
|
||||
public synchronized Trigger create(TriggerEventType type, String name, Map<String, Object> props) {
|
||||
if (isClosed) {
|
||||
throw new AlreadyClosedException("TriggerFactory has already been closed, cannot create new triggers");
|
||||
}
|
||||
switch (type) {
|
||||
case NODEADDED:
|
||||
return new NodeAddedTrigger(name, props, coreContainer, zkController);
|
||||
case NODELOST:
|
||||
return new NodeLostTrigger(name, props, coreContainer, zkController);
|
||||
default:
|
||||
throw new IllegalArgumentException("Unknown event type: " + type + " in trigger: " + name);
|
||||
}
|
||||
}
|
||||
public abstract Trigger create(TriggerEventType type, String name, Map<String, Object> props);
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
|
@ -148,6 +128,38 @@ public class AutoScaling {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Default implementation of {@link TriggerFactory}.
|
||||
*/
|
||||
public static class TriggerFactoryImpl extends TriggerFactory {
|
||||
|
||||
private final SolrCloudManager dataProvider;
|
||||
private final SolrResourceLoader loader;
|
||||
|
||||
public TriggerFactoryImpl(SolrResourceLoader loader, SolrCloudManager dataProvider) {
|
||||
Objects.requireNonNull(dataProvider);
|
||||
Objects.requireNonNull(loader);
|
||||
this.dataProvider = dataProvider;
|
||||
this.loader = loader;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized Trigger create(TriggerEventType type, String name, Map<String, Object> props) {
|
||||
if (isClosed) {
|
||||
throw new AlreadyClosedException("TriggerFactory has already been closed, cannot create new triggers");
|
||||
}
|
||||
switch (type) {
|
||||
case NODEADDED:
|
||||
return new NodeAddedTrigger(name, props, loader, dataProvider);
|
||||
case NODELOST:
|
||||
return new NodeLostTrigger(name, props, loader, dataProvider);
|
||||
default:
|
||||
throw new IllegalArgumentException("Unknown event type: " + type + " in trigger: " + name);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public static final String AUTO_ADD_REPLICAS_TRIGGER_DSL =
|
||||
" {" +
|
||||
" 'name' : '.auto_add_replicas'," +
|
||||
|
|
|
@ -36,6 +36,7 @@ import java.util.stream.Stream;
|
|||
|
||||
import org.apache.solr.api.Api;
|
||||
import org.apache.solr.api.ApiBag;
|
||||
import org.apache.solr.client.solrj.cloud.DistributedQueueFactory;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.Cell;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.Clause;
|
||||
|
@ -45,7 +46,8 @@ import org.apache.solr.client.solrj.cloud.autoscaling.Row;
|
|||
import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventProcessorStage;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventType;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.client.solrj.impl.SolrClientDataProvider;
|
||||
import org.apache.solr.client.solrj.impl.SolrClientCloudManager;
|
||||
import org.apache.solr.cloud.ZkDistributedQueueFactory;
|
||||
import org.apache.solr.common.MapWriter;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
|
@ -214,7 +216,8 @@ public class AutoScalingHandler extends RequestHandlerBase implements Permission
|
|||
try (CloudSolrClient build = new CloudSolrClient.Builder()
|
||||
.withHttpClient(container.getUpdateShardHandler().getHttpClient())
|
||||
.withZkHost(container.getZkController().getZkServerAddress()).build()) {
|
||||
Policy.Session session = policy.createSession(new SolrClientDataProvider(build));
|
||||
DistributedQueueFactory queueFactory = new ZkDistributedQueueFactory(container.getZkController().getZkClient());
|
||||
Policy.Session session = policy.createSession(new SolrClientCloudManager(queueFactory, build));
|
||||
List<Row> sorted = session.getSorted();
|
||||
List<Clause.Violation> violations = session.getViolations();
|
||||
|
||||
|
@ -638,7 +641,9 @@ public class AutoScalingHandler extends RequestHandlerBase implements Permission
|
|||
try (CloudSolrClient build = new CloudSolrClient.Builder()
|
||||
.withHttpClient(container.getUpdateShardHandler().getHttpClient())
|
||||
.withZkHost(container.getZkController().getZkServerAddress()).build()) {
|
||||
Policy.Session session = autoScalingConf.getPolicy().createSession(new SolrClientDataProvider(build));
|
||||
DistributedQueueFactory queueFactory = new ZkDistributedQueueFactory(container.getZkController().getZkClient());
|
||||
Policy.Session session = autoScalingConf.getPolicy()
|
||||
.createSession(new SolrClientCloudManager(queueFactory, build));
|
||||
log.debug("Verified autoscaling configuration");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,19 +20,14 @@ package org.apache.solr.cloud.autoscaling;
|
|||
import java.io.IOException;
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.client.solrj.impl.SolrClientDataProvider;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
|
||||
import org.apache.solr.common.params.CollectionParams;
|
||||
import org.apache.solr.core.CoreContainer;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -49,45 +44,30 @@ public class ComputePlanAction extends TriggerActionBase {
|
|||
@Override
|
||||
public void process(TriggerEvent event, ActionContext context) {
|
||||
log.debug("-- processing event: {} with context properties: {}", event, context.getProperties());
|
||||
CoreContainer container = context.getCoreContainer();
|
||||
SolrCloudManager cloudManager = context.getCloudManager();
|
||||
try {
|
||||
try (CloudSolrClient cloudSolrClient = new CloudSolrClient.Builder()
|
||||
.withZkHost(container.getZkController().getZkServerAddress())
|
||||
.withHttpClient(container.getUpdateShardHandler().getHttpClient())
|
||||
.build()) {
|
||||
ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader();
|
||||
AutoScalingConfig autoScalingConf = zkStateReader.getAutoScalingConfig();
|
||||
if (autoScalingConf.isEmpty()) {
|
||||
log.error("Action: " + getName() + " executed but no policy is configured");
|
||||
return;
|
||||
}
|
||||
Policy policy = autoScalingConf.getPolicy();
|
||||
SolrClientDataProvider dataProvider = new SolrClientDataProvider(cloudSolrClient);
|
||||
dataProvider.config = autoScalingConf;
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Cluster data provider: {}", dataProvider.toMap(new HashMap<>()));
|
||||
}
|
||||
Policy.Session session = policy.createSession(dataProvider);
|
||||
Policy.Suggester suggester = getSuggester(session, event, zkStateReader);
|
||||
while (true) {
|
||||
SolrRequest operation = suggester.getOperation();
|
||||
if (operation == null) break;
|
||||
log.info("Computed Plan: {}", operation.getParams());
|
||||
Map<String, Object> props = context.getProperties();
|
||||
props.compute("operations", (k, v) -> {
|
||||
List<SolrRequest> operations = (List<SolrRequest>) v;
|
||||
if (operations == null) operations = new ArrayList<>();
|
||||
operations.add(operation);
|
||||
return operations;
|
||||
});
|
||||
session = suggester.getSession();
|
||||
suggester = getSuggester(session, event, zkStateReader);
|
||||
}
|
||||
AutoScalingConfig autoScalingConf = cloudManager.getDistribStateManager().getAutoScalingConfig();
|
||||
if (autoScalingConf.isEmpty()) {
|
||||
log.error("Action: " + getName() + " executed but no policy is configured");
|
||||
return;
|
||||
}
|
||||
Policy policy = autoScalingConf.getPolicy();
|
||||
Policy.Session session = policy.createSession(cloudManager);
|
||||
Policy.Suggester suggester = getSuggester(session, event, cloudManager);
|
||||
while (true) {
|
||||
SolrRequest operation = suggester.getOperation();
|
||||
if (operation == null) break;
|
||||
log.info("Computed Plan: {}", operation.getParams());
|
||||
Map<String, Object> props = context.getProperties();
|
||||
props.compute("operations", (k, v) -> {
|
||||
List<SolrRequest> operations = (List<SolrRequest>) v;
|
||||
if (operations == null) operations = new ArrayList<>();
|
||||
operations.add(operation);
|
||||
return operations;
|
||||
});
|
||||
session = suggester.getSession();
|
||||
suggester = getSuggester(session, event, cloudManager);
|
||||
}
|
||||
} catch (KeeperException e) {
|
||||
log.error("ZooKeeperException while processing event: " + event, e);
|
||||
} catch (InterruptedException e) {
|
||||
log.error("Interrupted while processing event: " + event, e);
|
||||
} catch (IOException e) {
|
||||
log.error("IOException while processing event: " + event, e);
|
||||
} catch (Exception e) {
|
||||
|
@ -95,7 +75,7 @@ public class ComputePlanAction extends TriggerActionBase {
|
|||
}
|
||||
}
|
||||
|
||||
protected Policy.Suggester getSuggester(Policy.Session session, TriggerEvent event, ZkStateReader zkStateReader) {
|
||||
protected Policy.Suggester getSuggester(Policy.Session session, TriggerEvent event, SolrCloudManager cloudManager) {
|
||||
Policy.Suggester suggester;
|
||||
switch (event.getEventType()) {
|
||||
case NODEADDED:
|
||||
|
|
|
@ -28,17 +28,15 @@ import java.util.concurrent.TimeoutException;
|
|||
import org.apache.commons.lang3.exception.ExceptionUtils;
|
||||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.SolrResponse;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.AlreadyExistsException;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.DistribStateManager;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
|
||||
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
|
||||
import org.apache.solr.client.solrj.response.RequestStatusState;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.cloud.SolrZkClient;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
import org.apache.solr.common.util.NamedList;
|
||||
import org.apache.solr.common.util.Utils;
|
||||
import org.apache.solr.core.CoreContainer;
|
||||
import org.apache.zookeeper.CreateMode;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
import org.slf4j.Logger;
|
||||
|
@ -57,43 +55,45 @@ public class ExecutePlanAction extends TriggerActionBase {
|
|||
@Override
|
||||
public void process(TriggerEvent event, ActionContext context) {
|
||||
log.debug("-- processing event: {} with context properties: {}", event, context.getProperties());
|
||||
CoreContainer container = context.getCoreContainer();
|
||||
SolrZkClient zkClient = container.getZkController().getZkClient();
|
||||
SolrCloudManager dataProvider = context.getCloudManager();
|
||||
List<SolrRequest> operations = (List<SolrRequest>) context.getProperty("operations");
|
||||
if (operations == null || operations.isEmpty()) {
|
||||
log.info("No operations to execute for event: {}", event);
|
||||
return;
|
||||
}
|
||||
try (CloudSolrClient cloudSolrClient = new CloudSolrClient.Builder()
|
||||
.withZkHost(container.getZkController().getZkServerAddress())
|
||||
.withHttpClient(container.getUpdateShardHandler().getHttpClient())
|
||||
.build()) {
|
||||
int counter = 0;
|
||||
try {
|
||||
for (SolrRequest operation : operations) {
|
||||
log.info("Executing operation: {}", operation.getParams());
|
||||
try {
|
||||
SolrResponse response = null;
|
||||
int counter = 0;
|
||||
if (operation instanceof CollectionAdminRequest.AsyncCollectionAdminRequest) {
|
||||
CollectionAdminRequest.AsyncCollectionAdminRequest req = (CollectionAdminRequest.AsyncCollectionAdminRequest) operation;
|
||||
String asyncId = event.getSource() + '/' + event.getId() + '/' + counter;
|
||||
String znode = saveAsyncId(event, context, asyncId);
|
||||
String znode = saveAsyncId(dataProvider.getDistribStateManager(), event, asyncId);
|
||||
log.debug("Saved requestId: {} in znode: {}", asyncId, znode);
|
||||
asyncId = req.processAsync(asyncId, cloudSolrClient);
|
||||
CollectionAdminRequest.RequestStatusResponse statusResponse = waitForTaskToFinish(cloudSolrClient, asyncId,
|
||||
// TODO: find a better way of using async calls using dataProvider API !!!
|
||||
req.setAsyncId(asyncId);
|
||||
SolrResponse asyncResponse = dataProvider.request(req);
|
||||
if (asyncResponse.getResponse().get("error") != null) {
|
||||
throw new IOException("" + asyncResponse.getResponse().get("error"));
|
||||
}
|
||||
asyncId = (String)asyncResponse.getResponse().get("requestid");
|
||||
CollectionAdminRequest.RequestStatusResponse statusResponse = waitForTaskToFinish(dataProvider, asyncId,
|
||||
DEFAULT_TASK_TIMEOUT_SECONDS, TimeUnit.SECONDS);
|
||||
if (statusResponse != null) {
|
||||
RequestStatusState state = statusResponse.getRequestStatus();
|
||||
if (state == RequestStatusState.COMPLETED || state == RequestStatusState.FAILED || state == RequestStatusState.NOT_FOUND) {
|
||||
try {
|
||||
zkClient.delete(znode, -1, true);
|
||||
} catch (KeeperException e) {
|
||||
dataProvider.getDistribStateManager().removeData(znode, -1);
|
||||
} catch (Exception e) {
|
||||
log.warn("Unexpected exception while trying to delete znode: " + znode, e);
|
||||
}
|
||||
}
|
||||
response = statusResponse;
|
||||
}
|
||||
} else {
|
||||
response = operation.process(cloudSolrClient);
|
||||
response = dataProvider.request(operation);
|
||||
}
|
||||
NamedList<Object> result = response.getResponse();
|
||||
context.getProperties().compute("responses", (s, o) -> {
|
||||
|
@ -102,40 +102,41 @@ public class ExecutePlanAction extends TriggerActionBase {
|
|||
responses.add(result);
|
||||
return responses;
|
||||
});
|
||||
} catch (SolrServerException | HttpSolrClient.RemoteSolrException e) {
|
||||
} catch (IOException e) {
|
||||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unable to talk to ZooKeeper", e);
|
||||
// } catch (InterruptedException e) {
|
||||
// Thread.currentThread().interrupt();
|
||||
// throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "ExecutePlanAction was interrupted", e);
|
||||
} catch (Exception e) {
|
||||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
|
||||
"Unexpected exception executing operation: " + operation.getParams(), e);
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "ExecutePlanAction was interrupted", e);
|
||||
} catch (KeeperException e) {
|
||||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unable to talk to ZooKeeper", e);
|
||||
}
|
||||
|
||||
counter++;
|
||||
// counter++;
|
||||
}
|
||||
} catch (IOException e) {
|
||||
} catch (Exception e) {
|
||||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
|
||||
"Unexpected IOException while processing event: " + event, e);
|
||||
}
|
||||
}
|
||||
|
||||
static CollectionAdminRequest.RequestStatusResponse waitForTaskToFinish(CloudSolrClient cloudSolrClient, String requestId, long duration, TimeUnit timeUnit) throws SolrServerException, IOException, InterruptedException {
|
||||
|
||||
static CollectionAdminRequest.RequestStatusResponse waitForTaskToFinish(SolrCloudManager dataProvider, String requestId, long duration, TimeUnit timeUnit) throws IOException, InterruptedException {
|
||||
long timeoutSeconds = timeUnit.toSeconds(duration);
|
||||
RequestStatusState state = RequestStatusState.NOT_FOUND;
|
||||
CollectionAdminRequest.RequestStatusResponse statusResponse = null;
|
||||
for (int i = 0; i < timeoutSeconds; i++) {
|
||||
try {
|
||||
statusResponse = CollectionAdminRequest.requestStatus(requestId).process(cloudSolrClient);
|
||||
statusResponse = (CollectionAdminRequest.RequestStatusResponse)dataProvider.request(CollectionAdminRequest.requestStatus(requestId));
|
||||
state = statusResponse.getRequestStatus();
|
||||
if (state == RequestStatusState.COMPLETED || state == RequestStatusState.FAILED) {
|
||||
log.info("Task with requestId={} finished with state={} in {}s", requestId, state, i * 5);
|
||||
CollectionAdminRequest.deleteAsyncId(requestId).process(cloudSolrClient);
|
||||
dataProvider.request(CollectionAdminRequest.deleteAsyncId(requestId));
|
||||
return statusResponse;
|
||||
} else if (state == RequestStatusState.NOT_FOUND) {
|
||||
// the request for this id was never actually submitted! no harm done, just bail out
|
||||
log.warn("Task with requestId={} was not found on overseer", requestId);
|
||||
CollectionAdminRequest.deleteAsyncId(requestId).process(cloudSolrClient);
|
||||
dataProvider.request(CollectionAdminRequest.deleteAsyncId(requestId));
|
||||
return statusResponse;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
|
@ -162,16 +163,14 @@ public class ExecutePlanAction extends TriggerActionBase {
|
|||
*
|
||||
* @return the path of the newly created node in ZooKeeper
|
||||
*/
|
||||
private String saveAsyncId(TriggerEvent event, ActionContext context, String asyncId) throws InterruptedException, KeeperException {
|
||||
String parentPath = ZkStateReader.SOLR_AUTOSCALING_TRIGGER_STATE_PATH + "/" + context.getSource().getName() + "/" + getName();
|
||||
CoreContainer container = context.getCoreContainer();
|
||||
SolrZkClient zkClient = container.getZkController().getZkClient();
|
||||
private String saveAsyncId(DistribStateManager stateManager, TriggerEvent event, String asyncId) throws InterruptedException, AlreadyExistsException, IOException, KeeperException {
|
||||
String parentPath = ZkStateReader.SOLR_AUTOSCALING_TRIGGER_STATE_PATH + "/" + event.getSource() + "/" + getName();
|
||||
try {
|
||||
zkClient.makePath(parentPath, new byte[0], CreateMode.PERSISTENT, true);
|
||||
} catch (KeeperException.NodeExistsException e) {
|
||||
stateManager.makePath(parentPath);
|
||||
} catch (AlreadyExistsException e) {
|
||||
// ignore
|
||||
}
|
||||
return zkClient.create(parentPath + "/" + PREFIX, Utils.toJSON(Collections.singletonMap("requestid", asyncId)), CreateMode.PERSISTENT_SEQUENTIAL, true);
|
||||
return stateManager.createData(parentPath + "/" + PREFIX, Utils.toJSON(Collections.singletonMap("requestid", asyncId)), CreateMode.PERSISTENT_SEQUENTIAL);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -23,17 +23,12 @@ import java.util.Map;
|
|||
import java.util.Properties;
|
||||
import java.util.StringJoiner;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpResponse;
|
||||
import org.apache.http.client.HttpClient;
|
||||
import org.apache.http.client.methods.HttpPost;
|
||||
import org.apache.http.client.protocol.HttpClientContext;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventProcessorStage;
|
||||
import org.apache.solr.client.solrj.impl.HttpClientUtil;
|
||||
import org.apache.solr.common.util.Utils;
|
||||
import org.apache.solr.core.CoreContainer;
|
||||
import org.apache.solr.util.PropertiesUtil;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
@ -63,7 +58,6 @@ import org.slf4j.LoggerFactory;
|
|||
public class HttpTriggerListener extends TriggerListenerBase {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
private HttpClient httpClient;
|
||||
private String urlTemplate;
|
||||
private String payloadTemplate;
|
||||
private String contentType;
|
||||
|
@ -72,9 +66,8 @@ public class HttpTriggerListener extends TriggerListenerBase {
|
|||
private boolean followRedirects;
|
||||
|
||||
@Override
|
||||
public void init(CoreContainer coreContainer, AutoScalingConfig.TriggerListenerConfig config) {
|
||||
super.init(coreContainer, config);
|
||||
httpClient = coreContainer.getUpdateShardHandler().getHttpClient();
|
||||
public void init(SolrCloudManager dataProvider, AutoScalingConfig.TriggerListenerConfig config) {
|
||||
super.init(dataProvider, config);
|
||||
urlTemplate = (String)config.properties.get("url");
|
||||
payloadTemplate = (String)config.properties.get("payload");
|
||||
contentType = (String)config.properties.get("contentType");
|
||||
|
@ -146,31 +139,16 @@ public class HttpTriggerListener extends TriggerListenerBase {
|
|||
payload = Utils.toJSONString(properties);
|
||||
type = "application/json";
|
||||
}
|
||||
HttpPost post = new HttpPost(url);
|
||||
HttpEntity entity = new StringEntity(payload, "UTF-8");
|
||||
Map<String, String> headers = new HashMap<>();
|
||||
headerTemplates.forEach((k, v) -> {
|
||||
String headerVal = PropertiesUtil.substituteProperty(v, properties);
|
||||
if (!headerVal.isEmpty()) {
|
||||
post.addHeader(k, headerVal);
|
||||
headers.put(k, headerVal);
|
||||
}
|
||||
});
|
||||
post.setEntity(entity);
|
||||
post.setHeader("Content-Type", type);
|
||||
org.apache.http.client.config.RequestConfig.Builder requestConfigBuilder = HttpClientUtil.createDefaultRequestConfigBuilder();
|
||||
requestConfigBuilder.setSocketTimeout(timeout);
|
||||
requestConfigBuilder.setConnectTimeout(timeout);
|
||||
requestConfigBuilder.setRedirectsEnabled(followRedirects);
|
||||
|
||||
post.setConfig(requestConfigBuilder.build());
|
||||
headers.put("Content-Type", type);
|
||||
try {
|
||||
HttpClientContext httpClientRequestContext = HttpClientUtil.createNewHttpClientRequestContext();
|
||||
HttpResponse rsp = httpClient.execute(post, httpClientRequestContext);
|
||||
int statusCode = rsp.getStatusLine().getStatusCode();
|
||||
if (statusCode != 200) {
|
||||
LOG.warn("Error sending request for event " + event + ", HTTP response: " + rsp.toString());
|
||||
}
|
||||
HttpEntity responseEntity = rsp.getEntity();
|
||||
Utils.consumeFully(responseEntity);
|
||||
dataProvider.httpRequest(url, SolrRequest.METHOD.POST, headers, payload, timeout, followRedirects);
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Exception sending request for event " + event, e);
|
||||
}
|
||||
|
|
|
@ -17,30 +17,25 @@
|
|||
|
||||
package org.apache.solr.cloud.autoscaling;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.NoSuchElementException;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
|
||||
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventType;
|
||||
import org.apache.solr.cloud.ZkController;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
import org.apache.solr.core.CoreContainer;
|
||||
import org.apache.solr.core.SolrResourceLoader;
|
||||
import org.apache.solr.util.TimeSource;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -50,47 +45,19 @@ import org.slf4j.LoggerFactory;
|
|||
public class NodeAddedTrigger extends TriggerBase {
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
private final String name;
|
||||
private final Map<String, Object> properties;
|
||||
private final CoreContainer container;
|
||||
private final ZkController zkController;
|
||||
private final List<TriggerAction> actions;
|
||||
private final AtomicReference<AutoScaling.TriggerEventProcessor> processorRef;
|
||||
private final boolean enabled;
|
||||
private final int waitForSecond;
|
||||
private final TriggerEventType eventType;
|
||||
private final TimeSource timeSource;
|
||||
|
||||
private boolean isClosed = false;
|
||||
|
||||
private Set<String> lastLiveNodes;
|
||||
|
||||
private Map<String, Long> nodeNameVsTimeAdded = new HashMap<>();
|
||||
|
||||
public NodeAddedTrigger(String name, Map<String, Object> properties,
|
||||
CoreContainer container, ZkController zkController) {
|
||||
super(zkController.getZkClient());
|
||||
this.name = name;
|
||||
this.properties = properties;
|
||||
this.container = container;
|
||||
this.zkController = zkController;
|
||||
SolrResourceLoader loader,
|
||||
SolrCloudManager dataProvider) {
|
||||
super(TriggerEventType.NODEADDED, name, properties, loader, dataProvider);
|
||||
this.timeSource = TimeSource.CURRENT_TIME;
|
||||
this.processorRef = new AtomicReference<>();
|
||||
List<Map<String, String>> o = (List<Map<String, String>>) properties.get("actions");
|
||||
if (o != null && !o.isEmpty()) {
|
||||
actions = new ArrayList<>(3);
|
||||
for (Map<String, String> map : o) {
|
||||
TriggerAction action = container.getResourceLoader().newInstance(map.get("class"), TriggerAction.class);
|
||||
actions.add(action);
|
||||
}
|
||||
} else {
|
||||
actions = Collections.emptyList();
|
||||
}
|
||||
lastLiveNodes = new HashSet<>(zkController.getZkStateReader().getClusterState().getLiveNodes());
|
||||
lastLiveNodes = new HashSet<>(dataProvider.getClusterStateProvider().getLiveNodes());
|
||||
log.debug("Initial livenodes: {}", lastLiveNodes);
|
||||
this.enabled = Boolean.parseBoolean(String.valueOf(properties.getOrDefault("enabled", "true")));
|
||||
this.waitForSecond = ((Long) properties.getOrDefault("waitFor", -1L)).intValue();
|
||||
this.eventType = TriggerEventType.valueOf(properties.get("event").toString().toUpperCase(Locale.ROOT));
|
||||
log.debug("NodeAddedTrigger {} instantiated with properties: {}", name, properties);
|
||||
}
|
||||
|
||||
|
@ -105,7 +72,7 @@ public class NodeAddedTrigger extends TriggerBase {
|
|||
}
|
||||
// pick up added nodes for which marker paths were created
|
||||
try {
|
||||
List<String> added = zkClient.getChildren(ZkStateReader.SOLR_AUTOSCALING_NODE_ADDED_PATH, null, true);
|
||||
List<String> added = stateManager.listData(ZkStateReader.SOLR_AUTOSCALING_NODE_ADDED_PATH);
|
||||
added.forEach(n -> {
|
||||
// don't add nodes that have since gone away
|
||||
if (lastLiveNodes.contains(n)) {
|
||||
|
@ -114,77 +81,14 @@ public class NodeAddedTrigger extends TriggerBase {
|
|||
}
|
||||
removeMarker(n);
|
||||
});
|
||||
} catch (KeeperException.NoNodeException e) {
|
||||
} catch (NoSuchElementException e) {
|
||||
// ignore
|
||||
} catch (KeeperException | InterruptedException e) {
|
||||
} catch (Exception e) {
|
||||
log.warn("Exception retrieving nodeLost markers", e);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setProcessor(AutoScaling.TriggerEventProcessor processor) {
|
||||
processorRef.set(processor);
|
||||
}
|
||||
|
||||
@Override
|
||||
public AutoScaling.TriggerEventProcessor getProcessor() {
|
||||
return processorRef.get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TriggerEventType getEventType() {
|
||||
return eventType;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isEnabled() {
|
||||
return enabled;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getWaitForSecond() {
|
||||
return waitForSecond;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Object> getProperties() {
|
||||
return properties;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<TriggerAction> getActions() {
|
||||
return actions;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj instanceof NodeAddedTrigger) {
|
||||
NodeAddedTrigger that = (NodeAddedTrigger) obj;
|
||||
return this.name.equals(that.name)
|
||||
&& this.properties.equals(that.properties);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(name, properties);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
synchronized (this) {
|
||||
isClosed = true;
|
||||
IOUtils.closeWhileHandlingException(actions);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void restoreState(AutoScaling.Trigger old) {
|
||||
assert old.isClosed();
|
||||
|
@ -232,8 +136,7 @@ public class NodeAddedTrigger extends TriggerBase {
|
|||
}
|
||||
log.debug("Running NodeAddedTrigger {}", name);
|
||||
|
||||
ZkStateReader reader = zkController.getZkStateReader();
|
||||
Set<String> newLiveNodes = reader.getClusterState().getLiveNodes();
|
||||
Set<String> newLiveNodes = new HashSet<>(dataProvider.getClusterStateProvider().getLiveNodes());
|
||||
log.debug("Found livenodes: {}", newLiveNodes);
|
||||
|
||||
// have any nodes that we were tracking been removed from the cluster?
|
||||
|
@ -290,24 +193,17 @@ public class NodeAddedTrigger extends TriggerBase {
|
|||
private void removeMarker(String nodeName) {
|
||||
String path = ZkStateReader.SOLR_AUTOSCALING_NODE_ADDED_PATH + "/" + nodeName;
|
||||
try {
|
||||
if (zkClient.exists(path, true)) {
|
||||
zkClient.delete(path, -1, true);
|
||||
if (stateManager.hasData(path)) {
|
||||
stateManager.removeData(path, -1);
|
||||
}
|
||||
} catch (KeeperException.NoNodeException e) {
|
||||
} catch (NoSuchElementException e) {
|
||||
// ignore
|
||||
} catch (KeeperException | InterruptedException e) {
|
||||
} catch (Exception e) {
|
||||
log.debug("Exception removing nodeAdded marker " + nodeName, e);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isClosed() {
|
||||
synchronized (this) {
|
||||
return isClosed;
|
||||
}
|
||||
}
|
||||
|
||||
public static class NodeAddedEvent extends TriggerEvent {
|
||||
|
||||
public NodeAddedEvent(TriggerEventType eventType, String source, List<Long> times, List<String> nodeNames) {
|
||||
|
|
|
@ -17,30 +17,24 @@
|
|||
|
||||
package org.apache.solr.cloud.autoscaling;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.NoSuchElementException;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventType;
|
||||
import org.apache.solr.cloud.ZkController;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
import org.apache.solr.core.CoreContainer;
|
||||
import org.apache.solr.core.SolrResourceLoader;
|
||||
import org.apache.solr.util.TimeSource;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -50,61 +44,27 @@ import org.slf4j.LoggerFactory;
|
|||
public class NodeLostTrigger extends TriggerBase {
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
private final String name;
|
||||
private final Map<String, Object> properties;
|
||||
private final CoreContainer container;
|
||||
private final ZkController zkController;
|
||||
private final List<TriggerAction> actions;
|
||||
private final AtomicReference<AutoScaling.TriggerEventProcessor> processorRef;
|
||||
private final boolean enabled;
|
||||
private final int waitForSecond;
|
||||
private final TriggerEventType eventType;
|
||||
private final TimeSource timeSource;
|
||||
|
||||
private boolean isClosed = false;
|
||||
|
||||
private Set<String> lastLiveNodes;
|
||||
|
||||
private Map<String, Long> nodeNameVsTimeRemoved = new HashMap<>();
|
||||
|
||||
public NodeLostTrigger(String name, Map<String, Object> properties,
|
||||
CoreContainer container, ZkController zkController) {
|
||||
super(zkController.getZkClient());
|
||||
this.name = name;
|
||||
this.properties = properties;
|
||||
this.container = container;
|
||||
this.zkController = zkController;
|
||||
SolrResourceLoader loader,
|
||||
SolrCloudManager dataProvider) {
|
||||
super(TriggerEventType.NODELOST, name, properties, loader, dataProvider);
|
||||
this.timeSource = TimeSource.CURRENT_TIME;
|
||||
this.processorRef = new AtomicReference<>();
|
||||
List<Map<String, String>> o = (List<Map<String, String>>) properties.get("actions");
|
||||
if (o != null && !o.isEmpty()) {
|
||||
actions = new ArrayList<>(3);
|
||||
for (Map<String, String> map : o) {
|
||||
TriggerAction action = container.getResourceLoader().newInstance(map.get("class"), TriggerAction.class);
|
||||
actions.add(action);
|
||||
}
|
||||
} else {
|
||||
actions = Collections.emptyList();
|
||||
}
|
||||
lastLiveNodes = new HashSet<>(zkController.getZkStateReader().getClusterState().getLiveNodes());
|
||||
lastLiveNodes = new HashSet<>(dataProvider.getClusterStateProvider().getLiveNodes());
|
||||
log.debug("Initial livenodes: {}", lastLiveNodes);
|
||||
this.enabled = Boolean.parseBoolean(String.valueOf(properties.getOrDefault("enabled", "true")));
|
||||
this.waitForSecond = ((Long) properties.getOrDefault("waitFor", -1L)).intValue();
|
||||
this.eventType = TriggerEventType.valueOf(properties.get("event").toString().toUpperCase(Locale.ROOT));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void init() {
|
||||
List<Map<String, String>> o = (List<Map<String, String>>) properties.get("actions");
|
||||
if (o != null && !o.isEmpty()) {
|
||||
for (int i = 0; i < o.size(); i++) {
|
||||
Map<String, String> map = o.get(i);
|
||||
actions.get(i).init(map);
|
||||
}
|
||||
}
|
||||
super.init();
|
||||
// pick up lost nodes for which marker paths were created
|
||||
try {
|
||||
List<String> lost = zkClient.getChildren(ZkStateReader.SOLR_AUTOSCALING_NODE_LOST_PATH, null, true);
|
||||
List<String> lost = stateManager.listData(ZkStateReader.SOLR_AUTOSCALING_NODE_LOST_PATH);
|
||||
lost.forEach(n -> {
|
||||
// don't add nodes that have since came back
|
||||
if (!lastLiveNodes.contains(n)) {
|
||||
|
@ -113,76 +73,13 @@ public class NodeLostTrigger extends TriggerBase {
|
|||
}
|
||||
removeMarker(n);
|
||||
});
|
||||
} catch (KeeperException.NoNodeException e) {
|
||||
} catch (NoSuchElementException e) {
|
||||
// ignore
|
||||
} catch (KeeperException | InterruptedException e) {
|
||||
} catch (Exception e) {
|
||||
log.warn("Exception retrieving nodeLost markers", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setProcessor(AutoScaling.TriggerEventProcessor processor) {
|
||||
processorRef.set(processor);
|
||||
}
|
||||
|
||||
@Override
|
||||
public AutoScaling.TriggerEventProcessor getProcessor() {
|
||||
return processorRef.get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TriggerEventType getEventType() {
|
||||
return eventType;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isEnabled() {
|
||||
return enabled;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getWaitForSecond() {
|
||||
return waitForSecond;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Object> getProperties() {
|
||||
return properties;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<TriggerAction> getActions() {
|
||||
return actions;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj instanceof NodeLostTrigger) {
|
||||
NodeLostTrigger that = (NodeLostTrigger) obj;
|
||||
return this.name.equals(that.name)
|
||||
&& this.properties.equals(that.properties);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(name, properties);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
synchronized (this) {
|
||||
isClosed = true;
|
||||
IOUtils.closeWhileHandlingException(actions);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void restoreState(AutoScaling.Trigger old) {
|
||||
assert old.isClosed();
|
||||
|
@ -229,8 +126,7 @@ public class NodeLostTrigger extends TriggerBase {
|
|||
}
|
||||
}
|
||||
|
||||
ZkStateReader reader = zkController.getZkStateReader();
|
||||
Set<String> newLiveNodes = reader.getClusterState().getLiveNodes();
|
||||
Set<String> newLiveNodes = new HashSet<>(dataProvider.getClusterStateProvider().getLiveNodes());
|
||||
log.debug("Running NodeLostTrigger: {} with currently live nodes: {}", name, newLiveNodes);
|
||||
|
||||
// have any nodes that we were tracking been added to the cluster?
|
||||
|
@ -289,23 +185,16 @@ public class NodeLostTrigger extends TriggerBase {
|
|||
private void removeMarker(String nodeName) {
|
||||
String path = ZkStateReader.SOLR_AUTOSCALING_NODE_LOST_PATH + "/" + nodeName;
|
||||
try {
|
||||
if (zkClient.exists(path, true)) {
|
||||
zkClient.delete(path, -1, true);
|
||||
if (stateManager.hasData(path)) {
|
||||
stateManager.removeData(path, -1);
|
||||
}
|
||||
} catch (KeeperException.NoNodeException e) {
|
||||
} catch (NoSuchElementException e) {
|
||||
// ignore
|
||||
} catch (KeeperException | InterruptedException e) {
|
||||
} catch (Exception e) {
|
||||
log.warn("Exception removing nodeLost marker " + nodeName, e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isClosed() {
|
||||
synchronized (this) {
|
||||
return isClosed;
|
||||
}
|
||||
}
|
||||
|
||||
public static class NodeLostEvent extends TriggerEvent {
|
||||
|
||||
public NodeLostEvent(TriggerEventType eventType, String source, List<Long> times, List<String> nodeNames) {
|
||||
|
|
|
@ -20,25 +20,27 @@ package org.apache.solr.cloud.autoscaling;
|
|||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.net.ConnectException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.NoSuchElementException;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.locks.Condition;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.BadVersionException;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.DistribStateManager;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventType;
|
||||
import org.apache.solr.cloud.ZkController;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.cloud.SolrZkClient;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
import org.apache.solr.common.cloud.ZooKeeperException;
|
||||
import org.apache.solr.common.util.IOUtils;
|
||||
import org.apache.solr.common.util.Utils;
|
||||
import org.apache.solr.core.CloudConfig;
|
||||
import org.apache.solr.core.SolrResourceLoader;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
import org.apache.zookeeper.WatchedEvent;
|
||||
import org.apache.zookeeper.Watcher;
|
||||
|
@ -55,14 +57,10 @@ public class OverseerTriggerThread implements Runnable, Closeable {
|
|||
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
private final ZkController zkController;
|
||||
private final SolrCloudManager dataProvider;
|
||||
|
||||
private final CloudConfig cloudConfig;
|
||||
|
||||
private final ZkStateReader zkStateReader;
|
||||
|
||||
private final SolrZkClient zkClient;
|
||||
|
||||
private final ScheduledTriggers scheduledTriggers;
|
||||
|
||||
private final AutoScaling.TriggerFactory triggerFactory;
|
||||
|
@ -82,13 +80,11 @@ public class OverseerTriggerThread implements Runnable, Closeable {
|
|||
|
||||
private AutoScalingConfig autoScalingConfig;
|
||||
|
||||
public OverseerTriggerThread(ZkController zkController, CloudConfig cloudConfig) {
|
||||
this.zkController = zkController;
|
||||
public OverseerTriggerThread(SolrResourceLoader loader, SolrCloudManager dataProvider, CloudConfig cloudConfig) {
|
||||
this.dataProvider = dataProvider;
|
||||
this.cloudConfig = cloudConfig;
|
||||
zkStateReader = zkController.getZkStateReader();
|
||||
zkClient = zkController.getZkClient();
|
||||
scheduledTriggers = new ScheduledTriggers(zkController);
|
||||
triggerFactory = new AutoScaling.TriggerFactory(zkController.getCoreContainer(), zkController);
|
||||
scheduledTriggers = new ScheduledTriggers(loader, dataProvider);
|
||||
triggerFactory = new AutoScaling.TriggerFactoryImpl(loader, dataProvider);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -113,20 +109,20 @@ public class OverseerTriggerThread implements Runnable, Closeable {
|
|||
// we automatically add a trigger for auto add replicas if it does not exists already
|
||||
while (!isClosed) {
|
||||
try {
|
||||
AutoScalingConfig autoScalingConfig = zkStateReader.getAutoScalingConfig();
|
||||
AutoScalingConfig autoScalingConfig = dataProvider.getDistribStateManager().getAutoScalingConfig();
|
||||
AutoScalingConfig withAutoAddReplicasTrigger = withAutoAddReplicasTrigger(autoScalingConfig);
|
||||
if (withAutoAddReplicasTrigger.equals(autoScalingConfig)) break;
|
||||
log.debug("Adding .autoAddReplicas trigger");
|
||||
zkClient.setData(SOLR_AUTOSCALING_CONF_PATH, Utils.toJSON(withAutoAddReplicasTrigger), withAutoAddReplicasTrigger.getZkVersion(), true);
|
||||
dataProvider.getDistribStateManager().setData(SOLR_AUTOSCALING_CONF_PATH, Utils.toJSON(withAutoAddReplicasTrigger), withAutoAddReplicasTrigger.getZkVersion());
|
||||
break;
|
||||
} catch (KeeperException.BadVersionException bve) {
|
||||
} catch (BadVersionException bve) {
|
||||
// somebody else has changed the configuration so we must retry
|
||||
} catch (InterruptedException e) {
|
||||
// Restore the interrupted status
|
||||
Thread.currentThread().interrupt();
|
||||
log.warn("Interrupted", e);
|
||||
break;
|
||||
} catch (KeeperException e) {
|
||||
} catch (IOException | KeeperException e) {
|
||||
log.error("A ZK error has occurred", e);
|
||||
}
|
||||
}
|
||||
|
@ -135,11 +131,8 @@ public class OverseerTriggerThread implements Runnable, Closeable {
|
|||
|
||||
try {
|
||||
refreshAutoScalingConf(new AutoScalingWatcher());
|
||||
} catch (KeeperException.ConnectionLossException | KeeperException.SessionExpiredException e) {
|
||||
} catch (ConnectException e) {
|
||||
log.warn("ZooKeeper watch triggered for autoscaling conf, but Solr cannot talk to ZK: [{}]", e.getMessage());
|
||||
} catch (KeeperException e) {
|
||||
log.error("A ZK error has occurred", e);
|
||||
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "A ZK error has occurred", e);
|
||||
} catch (InterruptedException e) {
|
||||
// Restore the interrupted status
|
||||
Thread.currentThread().interrupt();
|
||||
|
@ -227,29 +220,30 @@ public class OverseerTriggerThread implements Runnable, Closeable {
|
|||
throw new IllegalStateException("Caught AlreadyClosedException from ScheduledTriggers, but we're not closed yet!", e);
|
||||
}
|
||||
}
|
||||
DistribStateManager stateManager = dataProvider.getDistribStateManager();
|
||||
if (cleanOldNodeLostMarkers) {
|
||||
log.debug("-- clean old nodeLost markers");
|
||||
try {
|
||||
List<String> markers = zkClient.getChildren(ZkStateReader.SOLR_AUTOSCALING_NODE_LOST_PATH, null, true);
|
||||
List<String> markers = stateManager.listData(ZkStateReader.SOLR_AUTOSCALING_NODE_LOST_PATH);
|
||||
markers.forEach(n -> {
|
||||
removeNodeMarker(ZkStateReader.SOLR_AUTOSCALING_NODE_LOST_PATH, n);
|
||||
});
|
||||
} catch (KeeperException.NoNodeException e) {
|
||||
} catch (NoSuchElementException e) {
|
||||
// ignore
|
||||
} catch (KeeperException | InterruptedException e) {
|
||||
} catch (Exception e) {
|
||||
log.warn("Error removing old nodeLost markers", e);
|
||||
}
|
||||
}
|
||||
if (cleanOldNodeAddedMarkers) {
|
||||
log.debug("-- clean old nodeAdded markers");
|
||||
try {
|
||||
List<String> markers = zkClient.getChildren(ZkStateReader.SOLR_AUTOSCALING_NODE_ADDED_PATH, null, true);
|
||||
List<String> markers = stateManager.listData(ZkStateReader.SOLR_AUTOSCALING_NODE_ADDED_PATH);
|
||||
markers.forEach(n -> {
|
||||
removeNodeMarker(ZkStateReader.SOLR_AUTOSCALING_NODE_ADDED_PATH, n);
|
||||
});
|
||||
} catch (KeeperException.NoNodeException e) {
|
||||
} catch (NoSuchElementException e) {
|
||||
// ignore
|
||||
} catch (KeeperException | InterruptedException e) {
|
||||
} catch (Exception e) {
|
||||
log.warn("Error removing old nodeAdded markers", e);
|
||||
}
|
||||
|
||||
|
@ -260,11 +254,11 @@ public class OverseerTriggerThread implements Runnable, Closeable {
|
|||
private void removeNodeMarker(String path, String nodeName) {
|
||||
path = path + "/" + nodeName;
|
||||
try {
|
||||
zkClient.delete(path, -1, true);
|
||||
dataProvider.getDistribStateManager().removeData(path, -1);
|
||||
log.debug(" -- deleted " + path);
|
||||
} catch (KeeperException.NoNodeException e) {
|
||||
} catch (NoSuchElementException e) {
|
||||
// ignore
|
||||
} catch (KeeperException | InterruptedException e) {
|
||||
} catch (Exception e) {
|
||||
log.warn("Error removing old marker " + path, e);
|
||||
}
|
||||
}
|
||||
|
@ -279,11 +273,8 @@ public class OverseerTriggerThread implements Runnable, Closeable {
|
|||
|
||||
try {
|
||||
refreshAutoScalingConf(this);
|
||||
} catch (KeeperException.ConnectionLossException | KeeperException.SessionExpiredException e) {
|
||||
log.warn("ZooKeeper watch triggered for autoscaling conf, but Solr cannot talk to ZK: [{}]", e.getMessage());
|
||||
} catch (KeeperException e) {
|
||||
log.error("A ZK error has occurred", e);
|
||||
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "A ZK error has occurred", e);
|
||||
} catch (ConnectException e) {
|
||||
log.warn("ZooKeeper watch triggered for autoscaling conf, but we cannot talk to ZK: [{}]", e.getMessage());
|
||||
} catch (InterruptedException e) {
|
||||
// Restore the interrupted status
|
||||
Thread.currentThread().interrupt();
|
||||
|
@ -295,13 +286,13 @@ public class OverseerTriggerThread implements Runnable, Closeable {
|
|||
|
||||
}
|
||||
|
||||
private void refreshAutoScalingConf(Watcher watcher) throws KeeperException, InterruptedException {
|
||||
private void refreshAutoScalingConf(Watcher watcher) throws InterruptedException, IOException {
|
||||
updateLock.lock();
|
||||
try {
|
||||
if (isClosed) {
|
||||
return;
|
||||
}
|
||||
AutoScalingConfig currentConfig = zkStateReader.getAutoScalingConfig(watcher);
|
||||
AutoScalingConfig currentConfig = dataProvider.getDistribStateManager().getAutoScalingConfig(watcher);
|
||||
log.debug("Refreshing {} with znode version {}", ZkStateReader.SOLR_AUTOSCALING_CONF_PATH, currentConfig.getZkVersion());
|
||||
if (znodeVersion >= currentConfig.getZkVersion()) {
|
||||
// protect against reordered watcher fires by ensuring that we only move forward
|
||||
|
|
|
@ -43,24 +43,22 @@ import java.util.stream.Collectors;
|
|||
import org.apache.commons.lang3.exception.ExceptionUtils;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.DistribStateManager;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventProcessorStage;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
|
||||
import org.apache.solr.client.solrj.request.CollectionAdminRequest.RequestStatusResponse;
|
||||
import org.apache.solr.client.solrj.response.RequestStatusState;
|
||||
import org.apache.solr.cloud.ActionThrottle;
|
||||
import org.apache.solr.cloud.Overseer;
|
||||
import org.apache.solr.cloud.ZkController;
|
||||
import org.apache.solr.cloud.Stats;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.cloud.SolrZkClient;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
import org.apache.solr.common.util.ExecutorUtil;
|
||||
import org.apache.solr.common.util.IOUtils;
|
||||
import org.apache.solr.common.util.Utils;
|
||||
import org.apache.solr.core.CoreContainer;
|
||||
import org.apache.solr.core.SolrResourceLoader;
|
||||
import org.apache.solr.util.DefaultSolrThreadFactory;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
import org.apache.zookeeper.Op;
|
||||
import org.apache.zookeeper.OpResult;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -95,17 +93,19 @@ public class ScheduledTriggers implements Closeable {
|
|||
|
||||
private final ActionThrottle actionThrottle;
|
||||
|
||||
private final SolrZkClient zkClient;
|
||||
private final SolrCloudManager dataProvider;
|
||||
|
||||
private final Overseer.Stats queueStats;
|
||||
private final DistribStateManager stateManager;
|
||||
|
||||
private final CoreContainer coreContainer;
|
||||
private final SolrResourceLoader loader;
|
||||
|
||||
private final Stats queueStats;
|
||||
|
||||
private final TriggerListeners listeners;
|
||||
|
||||
private AutoScalingConfig autoScalingConfig;
|
||||
|
||||
public ScheduledTriggers(ZkController zkController) {
|
||||
public ScheduledTriggers(SolrResourceLoader loader, SolrCloudManager dataProvider) {
|
||||
// todo make the core pool size configurable
|
||||
// it is important to use more than one because a time taking trigger can starve other scheduled triggers
|
||||
// ideally we should have as many core threads as the number of triggers but firstly, we don't know beforehand
|
||||
|
@ -118,9 +118,10 @@ public class ScheduledTriggers implements Closeable {
|
|||
actionExecutor = ExecutorUtil.newMDCAwareSingleThreadExecutor(new DefaultSolrThreadFactory("AutoscalingActionExecutor"));
|
||||
// todo make the wait time configurable
|
||||
actionThrottle = new ActionThrottle("action", DEFAULT_MIN_MS_BETWEEN_ACTIONS);
|
||||
coreContainer = zkController.getCoreContainer();
|
||||
zkClient = zkController.getZkClient();
|
||||
queueStats = new Overseer.Stats();
|
||||
this.dataProvider = dataProvider;
|
||||
this.stateManager = dataProvider.getDistribStateManager();
|
||||
this.loader = loader;
|
||||
queueStats = new Stats();
|
||||
listeners = new TriggerListeners();
|
||||
}
|
||||
|
||||
|
@ -148,13 +149,13 @@ public class ScheduledTriggers implements Closeable {
|
|||
}
|
||||
ScheduledTrigger st;
|
||||
try {
|
||||
st = new ScheduledTrigger(newTrigger, zkClient, queueStats);
|
||||
st = new ScheduledTrigger(newTrigger, dataProvider, queueStats);
|
||||
} catch (Exception e) {
|
||||
if (isClosed) {
|
||||
throw new AlreadyClosedException("ScheduledTriggers has been closed and cannot be used anymore");
|
||||
}
|
||||
if (!zkClient.isConnected() || zkClient.isClosed()) {
|
||||
log.error("Failed to add trigger " + newTrigger.getName() + " - closing or disconnected from ZK", e);
|
||||
if (dataProvider.isClosed()) {
|
||||
log.error("Failed to add trigger " + newTrigger.getName() + " - closing or disconnected from data provider", e);
|
||||
} else {
|
||||
log.error("Failed to add trigger " + newTrigger.getName(), e);
|
||||
}
|
||||
|
@ -174,7 +175,7 @@ public class ScheduledTriggers implements Closeable {
|
|||
scheduledTriggers.replace(newTrigger.getName(), scheduledTrigger);
|
||||
}
|
||||
newTrigger.setProcessor(event -> {
|
||||
if (coreContainer.isShutDown()) {
|
||||
if (dataProvider.isClosed()) {
|
||||
String msg = String.format(Locale.ROOT, "Ignoring autoscaling event %s because Solr has been shutdown.", event.toString());
|
||||
log.warn(msg);
|
||||
listeners.fireListeners(event.getSource(), event, TriggerEventProcessorStage.ABORTED, msg);
|
||||
|
@ -219,7 +220,7 @@ public class ScheduledTriggers implements Closeable {
|
|||
// this event so that we continue processing other events and not block this action executor
|
||||
waitForPendingTasks(newTrigger, actions);
|
||||
|
||||
ActionContext actionContext = new ActionContext(coreContainer, newTrigger, new HashMap<>());
|
||||
ActionContext actionContext = new ActionContext(dataProvider, newTrigger, new HashMap<>());
|
||||
for (TriggerAction action : actions) {
|
||||
List<String> beforeActions = (List<String>)actionContext.getProperties().computeIfAbsent(TriggerEventProcessorStage.BEFORE_ACTION.toString(), k -> new ArrayList<String>());
|
||||
beforeActions.add(action.getName());
|
||||
|
@ -266,39 +267,35 @@ public class ScheduledTriggers implements Closeable {
|
|||
}
|
||||
|
||||
private void waitForPendingTasks(AutoScaling.Trigger newTrigger, List<TriggerAction> actions) throws AlreadyClosedException {
|
||||
try (CloudSolrClient cloudSolrClient = new CloudSolrClient.Builder()
|
||||
.withZkHost(coreContainer.getZkController().getZkServerAddress())
|
||||
.withHttpClient(coreContainer.getUpdateShardHandler().getHttpClient())
|
||||
.build()) {
|
||||
|
||||
SolrZkClient zkClient = coreContainer.getZkController().getZkClient();
|
||||
DistribStateManager stateManager = dataProvider.getDistribStateManager();
|
||||
try {
|
||||
|
||||
for (TriggerAction action : actions) {
|
||||
if (action instanceof ExecutePlanAction) {
|
||||
String parentPath = ZkStateReader.SOLR_AUTOSCALING_TRIGGER_STATE_PATH + "/" + newTrigger.getName() + "/" + action.getName();
|
||||
if (!zkClient.exists(parentPath, true)) {
|
||||
if (!stateManager.hasData(parentPath)) {
|
||||
break;
|
||||
}
|
||||
List<String> children = zkClient.getChildren(parentPath, null, true);
|
||||
List<String> children = stateManager.listData(parentPath);
|
||||
if (children != null) {
|
||||
for (String child : children) {
|
||||
String path = parentPath + '/' + child;
|
||||
byte[] data = zkClient.getData(path, null, null, true);
|
||||
VersionedData data = stateManager.getData(path, null);
|
||||
if (data != null) {
|
||||
Map map = (Map) Utils.fromJSON(data);
|
||||
Map map = (Map) Utils.fromJSON(data.getData());
|
||||
String requestid = (String) map.get("requestid");
|
||||
try {
|
||||
log.debug("Found pending task with requestid={}", requestid);
|
||||
RequestStatusResponse statusResponse = waitForTaskToFinish(cloudSolrClient, requestid,
|
||||
RequestStatusResponse statusResponse = waitForTaskToFinish(dataProvider, requestid,
|
||||
ExecutePlanAction.DEFAULT_TASK_TIMEOUT_SECONDS, TimeUnit.SECONDS);
|
||||
if (statusResponse != null) {
|
||||
RequestStatusState state = statusResponse.getRequestStatus();
|
||||
if (state == RequestStatusState.COMPLETED || state == RequestStatusState.FAILED || state == RequestStatusState.NOT_FOUND) {
|
||||
zkClient.delete(path, -1, true);
|
||||
stateManager.removeData(path, -1);
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
if (coreContainer.isShutDown()) {
|
||||
if (dataProvider.isClosed()) {
|
||||
throw e; // propagate the abort to the caller
|
||||
}
|
||||
Throwable rootCause = ExceptionUtils.getRootCause(e);
|
||||
|
@ -319,7 +316,7 @@ public class ScheduledTriggers implements Closeable {
|
|||
Thread.currentThread().interrupt();
|
||||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Thread interrupted", e);
|
||||
} catch (Exception e) {
|
||||
if (coreContainer.isShutDown()) {
|
||||
if (dataProvider.isClosed()) {
|
||||
throw new AlreadyClosedException("The Solr instance has been shutdown");
|
||||
}
|
||||
// we catch but don't rethrow because a failure to wait for pending tasks
|
||||
|
@ -344,36 +341,24 @@ public class ScheduledTriggers implements Closeable {
|
|||
String statePath = ZkStateReader.SOLR_AUTOSCALING_TRIGGER_STATE_PATH + "/" + triggerName;
|
||||
String eventsPath = ZkStateReader.SOLR_AUTOSCALING_EVENTS_PATH + "/" + triggerName;
|
||||
try {
|
||||
zkDelTree(zkClient, statePath);
|
||||
} catch (KeeperException | InterruptedException e) {
|
||||
if (stateManager.hasData(statePath)) {
|
||||
stateManager.removeData(statePath, -1);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to remove state for removed trigger " + statePath, e);
|
||||
}
|
||||
try {
|
||||
zkDelTree(zkClient, eventsPath);
|
||||
} catch (KeeperException | InterruptedException e) {
|
||||
log.warn("Failed to remove events for removed trigger " + eventsPath, e);
|
||||
}
|
||||
}
|
||||
|
||||
static List<OpResult> zkDelTree(SolrZkClient zkClient, String znode) throws KeeperException, InterruptedException {
|
||||
if (zkClient.exists(znode, true)) {
|
||||
ArrayList<Op> ops = new ArrayList<>();
|
||||
zkDelTree(zkClient, znode, ops);
|
||||
return zkClient.multi(ops, true);
|
||||
}
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
private static void zkDelTree(SolrZkClient zkClient, String znode, ArrayList<Op> ops) throws KeeperException, InterruptedException {
|
||||
if (zkClient.exists(znode, true)) {
|
||||
List<String> children = zkClient.getChildren(znode, null, true);
|
||||
if (children != null) {
|
||||
for (String child : children) {
|
||||
String path = znode + "/" + child;
|
||||
zkDelTree(zkClient, path, ops);
|
||||
}
|
||||
if (stateManager.hasData(eventsPath)) {
|
||||
List<String> events = stateManager.listData(eventsPath);
|
||||
List<Op> ops = new ArrayList<>(events.size() + 1);
|
||||
events.forEach(ev -> {
|
||||
ops.add(Op.delete(eventsPath + "/" + ev, -1));
|
||||
});
|
||||
ops.add(Op.delete(eventsPath, -1));
|
||||
stateManager.multi(ops);
|
||||
}
|
||||
ops.add(Op.delete(znode, -1));
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to remove events for removed trigger " + eventsPath, e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -408,9 +393,9 @@ public class ScheduledTriggers implements Closeable {
|
|||
boolean replay;
|
||||
volatile boolean isClosed;
|
||||
|
||||
ScheduledTrigger(AutoScaling.Trigger trigger, SolrZkClient zkClient, Overseer.Stats stats) {
|
||||
ScheduledTrigger(AutoScaling.Trigger trigger, SolrCloudManager cloudManager, Stats stats) throws IOException {
|
||||
this.trigger = trigger;
|
||||
this.queue = new TriggerEventQueue(zkClient, trigger.getName(), stats);
|
||||
this.queue = new TriggerEventQueue(cloudManager, trigger.getName(), stats);
|
||||
this.replay = true;
|
||||
this.isClosed = false;
|
||||
}
|
||||
|
@ -537,13 +522,13 @@ public class ScheduledTriggers implements Closeable {
|
|||
if (listener == null) { // create new instance
|
||||
String clazz = config.listenerClass;
|
||||
try {
|
||||
listener = coreContainer.getResourceLoader().newInstance(clazz, TriggerListener.class);
|
||||
listener = loader.newInstance(clazz, TriggerListener.class);
|
||||
} catch (Exception e) {
|
||||
log.warn("Invalid TriggerListener class name '" + clazz + "', skipping...", e);
|
||||
}
|
||||
if (listener != null) {
|
||||
try {
|
||||
listener.init(coreContainer, config);
|
||||
listener.init(dataProvider, config);
|
||||
listenersPerName.put(config.name, listener);
|
||||
} catch (Exception e) {
|
||||
log.warn("Error initializing TriggerListener " + config, e);
|
||||
|
|
|
@ -31,8 +31,8 @@ import java.util.StringJoiner;
|
|||
|
||||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventProcessorStage;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.client.solrj.request.UpdateRequest;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.SolrInputDocument;
|
||||
|
@ -41,7 +41,6 @@ import org.apache.solr.common.params.CommonParams;
|
|||
import org.apache.solr.common.params.SolrParams;
|
||||
import org.apache.solr.common.util.NamedList;
|
||||
import org.apache.solr.common.util.Utils;
|
||||
import org.apache.solr.core.CoreContainer;
|
||||
import org.apache.solr.util.IdUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
@ -73,8 +72,8 @@ public class SystemLogListener extends TriggerListenerBase {
|
|||
private boolean enabled = true;
|
||||
|
||||
@Override
|
||||
public void init(CoreContainer coreContainer, AutoScalingConfig.TriggerListenerConfig config) {
|
||||
super.init(coreContainer, config);
|
||||
public void init(SolrCloudManager dataProvider, AutoScalingConfig.TriggerListenerConfig config) {
|
||||
super.init(dataProvider, config);
|
||||
collection = (String)config.properties.getOrDefault(CollectionAdminParams.COLLECTION, CollectionAdminParams.SYSTEM_COLL);
|
||||
enabled = Boolean.parseBoolean(String.valueOf(config.properties.getOrDefault("enabled", true)));
|
||||
}
|
||||
|
@ -85,10 +84,7 @@ public class SystemLogListener extends TriggerListenerBase {
|
|||
if (!enabled) {
|
||||
return;
|
||||
}
|
||||
try (CloudSolrClient cloudSolrClient = new CloudSolrClient.Builder()
|
||||
.withZkHost(coreContainer.getZkController().getZkServerAddress())
|
||||
.withHttpClient(coreContainer.getUpdateShardHandler().getHttpClient())
|
||||
.build()) {
|
||||
try {
|
||||
SolrInputDocument doc = new SolrInputDocument();
|
||||
doc.addField(CommonParams.TYPE, DOC_TYPE);
|
||||
doc.addField(SOURCE_FIELD, SOURCE);
|
||||
|
@ -122,7 +118,8 @@ public class SystemLogListener extends TriggerListenerBase {
|
|||
}
|
||||
UpdateRequest req = new UpdateRequest();
|
||||
req.add(doc);
|
||||
cloudSolrClient.request(req, collection);
|
||||
req.setParam(CollectionAdminParams.COLLECTION, collection);
|
||||
dataProvider.request(req);
|
||||
} catch (Exception e) {
|
||||
if ((e instanceof SolrException) && e.getMessage().contains("Collection not found")) {
|
||||
// relatively benign
|
||||
|
|
|
@ -16,15 +16,29 @@
|
|||
*/
|
||||
package org.apache.solr.cloud.autoscaling;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import org.apache.solr.common.cloud.SolrZkClient;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.AlreadyExistsException;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.BadVersionException;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.DistribStateManager;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventType;
|
||||
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
import org.apache.solr.common.util.Utils;
|
||||
import org.apache.solr.core.SolrResourceLoader;
|
||||
import org.apache.zookeeper.CreateMode;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
import org.apache.zookeeper.data.Stat;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -35,19 +49,135 @@ import org.slf4j.LoggerFactory;
|
|||
public abstract class TriggerBase implements AutoScaling.Trigger {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
protected SolrZkClient zkClient;
|
||||
protected final String name;
|
||||
protected final SolrCloudManager dataProvider;
|
||||
protected final DistribStateManager stateManager;
|
||||
protected final Map<String, Object> properties = new HashMap<>();
|
||||
protected final TriggerEventType eventType;
|
||||
protected final int waitForSecond;
|
||||
protected Map<String,Object> lastState;
|
||||
protected final AtomicReference<AutoScaling.TriggerEventProcessor> processorRef = new AtomicReference<>();
|
||||
protected final List<TriggerAction> actions;
|
||||
protected final boolean enabled;
|
||||
protected boolean isClosed;
|
||||
|
||||
|
||||
protected TriggerBase(SolrZkClient zkClient) {
|
||||
this.zkClient = zkClient;
|
||||
protected TriggerBase(TriggerEventType eventType, String name, Map<String, Object> properties, SolrResourceLoader loader, SolrCloudManager dataProvider) {
|
||||
this.eventType = eventType;
|
||||
this.name = name;
|
||||
this.dataProvider = dataProvider;
|
||||
this.stateManager = dataProvider.getDistribStateManager();
|
||||
if (properties != null) {
|
||||
this.properties.putAll(properties);
|
||||
}
|
||||
this.enabled = Boolean.parseBoolean(String.valueOf(this.properties.getOrDefault("enabled", "true")));
|
||||
this.waitForSecond = ((Number) this.properties.getOrDefault("waitFor", -1L)).intValue();
|
||||
List<Map<String, String>> o = (List<Map<String, String>>) properties.get("actions");
|
||||
if (o != null && !o.isEmpty()) {
|
||||
actions = new ArrayList<>(3);
|
||||
for (Map<String, String> map : o) {
|
||||
TriggerAction action = loader.newInstance(map.get("class"), TriggerAction.class);
|
||||
actions.add(action);
|
||||
}
|
||||
} else {
|
||||
actions = Collections.emptyList();
|
||||
}
|
||||
|
||||
try {
|
||||
zkClient.makePath(ZkStateReader.SOLR_AUTOSCALING_TRIGGER_STATE_PATH, false, true);
|
||||
} catch (KeeperException | InterruptedException e) {
|
||||
if (!stateManager.hasData(ZkStateReader.SOLR_AUTOSCALING_TRIGGER_STATE_PATH)) {
|
||||
stateManager.makePath(ZkStateReader.SOLR_AUTOSCALING_TRIGGER_STATE_PATH);
|
||||
}
|
||||
} catch (AlreadyExistsException e) {
|
||||
// ignore
|
||||
} catch (InterruptedException | KeeperException | IOException e) {
|
||||
LOG.warn("Exception checking ZK path " + ZkStateReader.SOLR_AUTOSCALING_TRIGGER_STATE_PATH, e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void init() {
|
||||
List<Map<String, String>> o = (List<Map<String, String>>) properties.get("actions");
|
||||
if (o != null && !o.isEmpty()) {
|
||||
for (int i = 0; i < o.size(); i++) {
|
||||
Map<String, String> map = o.get(i);
|
||||
actions.get(i).init(map);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setProcessor(AutoScaling.TriggerEventProcessor processor) {
|
||||
processorRef.set(processor);
|
||||
}
|
||||
|
||||
@Override
|
||||
public AutoScaling.TriggerEventProcessor getProcessor() {
|
||||
return processorRef.get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TriggerEventType getEventType() {
|
||||
return eventType;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isEnabled() {
|
||||
return enabled;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getWaitForSecond() {
|
||||
return waitForSecond;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Object> getProperties() {
|
||||
return properties;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<TriggerAction> getActions() {
|
||||
return actions;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isClosed() {
|
||||
synchronized (this) {
|
||||
return isClosed;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
synchronized (this) {
|
||||
isClosed = true;
|
||||
IOUtils.closeWhileHandlingException(actions);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(name, properties);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null) {
|
||||
return false;
|
||||
}
|
||||
if (obj.getClass().equals(this.getClass())) {
|
||||
TriggerBase that = (TriggerBase) obj;
|
||||
return this.name.equals(that.name)
|
||||
&& this.properties.equals(that.properties);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepare and return internal state of this trigger in a format suitable for persisting in ZK.
|
||||
* @return map of internal state properties. Note: values must be supported by {@link Utils#toJSON(Object)}.
|
||||
|
@ -70,15 +200,15 @@ public abstract class TriggerBase implements AutoScaling.Trigger {
|
|||
byte[] data = Utils.toJSON(state);
|
||||
String path = ZkStateReader.SOLR_AUTOSCALING_TRIGGER_STATE_PATH + "/" + getName();
|
||||
try {
|
||||
if (zkClient.exists(path, true)) {
|
||||
if (stateManager.hasData(path)) {
|
||||
// update
|
||||
zkClient.setData(path, data, -1, true);
|
||||
stateManager.setData(path, data, -1);
|
||||
} else {
|
||||
// create
|
||||
zkClient.create(path, data, CreateMode.PERSISTENT, true);
|
||||
stateManager.createData(path, data, CreateMode.PERSISTENT);
|
||||
}
|
||||
lastState = state;
|
||||
} catch (KeeperException | InterruptedException e) {
|
||||
} catch (InterruptedException | BadVersionException | AlreadyExistsException | IOException | KeeperException e) {
|
||||
LOG.warn("Exception updating trigger state '" + path + "'", e);
|
||||
}
|
||||
}
|
||||
|
@ -88,10 +218,11 @@ public abstract class TriggerBase implements AutoScaling.Trigger {
|
|||
byte[] data = null;
|
||||
String path = ZkStateReader.SOLR_AUTOSCALING_TRIGGER_STATE_PATH + "/" + getName();
|
||||
try {
|
||||
if (zkClient.exists(path, true)) {
|
||||
data = zkClient.getData(path, null, new Stat(), true);
|
||||
if (stateManager.hasData(path)) {
|
||||
VersionedData versionedData = stateManager.getData(path);
|
||||
data = versionedData.getData();
|
||||
}
|
||||
} catch (KeeperException | InterruptedException e) {
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Exception getting trigger state '" + path + "'", e);
|
||||
}
|
||||
if (data != null) {
|
||||
|
|
|
@ -16,22 +16,25 @@
|
|||
*/
|
||||
package org.apache.solr.cloud.autoscaling;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.solr.client.solrj.cloud.DistributedQueue;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventType;
|
||||
import org.apache.solr.cloud.Overseer;
|
||||
import org.apache.solr.cloud.ZkDistributedQueue;
|
||||
import org.apache.solr.common.cloud.SolrZkClient;
|
||||
import org.apache.solr.cloud.Stats;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
import org.apache.solr.common.util.Utils;
|
||||
import org.apache.solr.util.TimeSource;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public class TriggerEventQueue extends ZkDistributedQueue {
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TriggerEventQueue {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
public static final String ENQUEUE_TIME = "_enqueue_time_";
|
||||
|
@ -39,9 +42,11 @@ public class TriggerEventQueue extends ZkDistributedQueue {
|
|||
|
||||
private final String triggerName;
|
||||
private final TimeSource timeSource;
|
||||
private final DistributedQueue delegate;
|
||||
|
||||
public TriggerEventQueue(SolrZkClient zookeeper, String triggerName, Overseer.Stats stats) {
|
||||
super(zookeeper, ZkStateReader.SOLR_AUTOSCALING_EVENTS_PATH + "/" + triggerName, stats);
|
||||
public TriggerEventQueue(SolrCloudManager cloudManager, String triggerName, Stats stats) throws IOException {
|
||||
// TODO: collect stats
|
||||
this.delegate = cloudManager.getDistributedQueueFactory().makeQueue(ZkStateReader.SOLR_AUTOSCALING_EVENTS_PATH + "/" + triggerName);
|
||||
this.triggerName = triggerName;
|
||||
this.timeSource = TimeSource.CURRENT_TIME;
|
||||
}
|
||||
|
@ -50,9 +55,9 @@ public class TriggerEventQueue extends ZkDistributedQueue {
|
|||
event.getProperties().put(ENQUEUE_TIME, timeSource.getTime());
|
||||
try {
|
||||
byte[] data = Utils.toJSON(event);
|
||||
offer(data);
|
||||
delegate.offer(data);
|
||||
return true;
|
||||
} catch (KeeperException | InterruptedException e) {
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Exception adding event " + event + " to queue " + triggerName, e);
|
||||
return false;
|
||||
}
|
||||
|
@ -61,7 +66,7 @@ public class TriggerEventQueue extends ZkDistributedQueue {
|
|||
public TriggerEvent peekEvent() {
|
||||
byte[] data;
|
||||
try {
|
||||
while ((data = peek()) != null) {
|
||||
while ((data = delegate.peek()) != null) {
|
||||
if (data.length == 0) {
|
||||
LOG.warn("ignoring empty data...");
|
||||
continue;
|
||||
|
@ -74,7 +79,7 @@ public class TriggerEventQueue extends ZkDistributedQueue {
|
|||
continue;
|
||||
}
|
||||
}
|
||||
} catch (KeeperException | InterruptedException e) {
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Exception peeking queue of trigger " + triggerName, e);
|
||||
}
|
||||
return null;
|
||||
|
@ -83,7 +88,7 @@ public class TriggerEventQueue extends ZkDistributedQueue {
|
|||
public TriggerEvent pollEvent() {
|
||||
byte[] data;
|
||||
try {
|
||||
while ((data = poll()) != null) {
|
||||
while ((data = delegate.poll()) != null) {
|
||||
if (data.length == 0) {
|
||||
LOG.warn("ignoring empty data...");
|
||||
continue;
|
||||
|
@ -96,7 +101,7 @@ public class TriggerEventQueue extends ZkDistributedQueue {
|
|||
continue;
|
||||
}
|
||||
}
|
||||
} catch (KeeperException | InterruptedException e) {
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Exception polling queue of trigger " + triggerName, e);
|
||||
}
|
||||
return null;
|
||||
|
|
|
@ -19,8 +19,8 @@ package org.apache.solr.cloud.autoscaling;
|
|||
import java.io.Closeable;
|
||||
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventProcessorStage;
|
||||
import org.apache.solr.core.CoreContainer;
|
||||
|
||||
/**
|
||||
* Implementations of this interface are notified of stages in event processing that they were
|
||||
|
@ -28,7 +28,7 @@ import org.apache.solr.core.CoreContainer;
|
|||
*/
|
||||
public interface TriggerListener extends Closeable {
|
||||
|
||||
void init(CoreContainer coreContainer, AutoScalingConfig.TriggerListenerConfig config) throws Exception;
|
||||
void init(SolrCloudManager dataProvider, AutoScalingConfig.TriggerListenerConfig config) throws Exception;
|
||||
|
||||
AutoScalingConfig.TriggerListenerConfig getConfig();
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@ package org.apache.solr.cloud.autoscaling;
|
|||
import java.io.IOException;
|
||||
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
|
||||
import org.apache.solr.core.CoreContainer;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
|
||||
|
||||
/**
|
||||
* Base class for implementations of {@link TriggerListener}.
|
||||
|
@ -27,11 +27,11 @@ import org.apache.solr.core.CoreContainer;
|
|||
public abstract class TriggerListenerBase implements TriggerListener {
|
||||
|
||||
protected AutoScalingConfig.TriggerListenerConfig config;
|
||||
protected CoreContainer coreContainer;
|
||||
protected SolrCloudManager dataProvider;
|
||||
|
||||
@Override
|
||||
public void init(CoreContainer coreContainer, AutoScalingConfig.TriggerListenerConfig config) {
|
||||
this.coreContainer = coreContainer;
|
||||
public void init(SolrCloudManager dataProvider, AutoScalingConfig.TriggerListenerConfig config) {
|
||||
this.dataProvider = dataProvider;
|
||||
this.config = config;
|
||||
}
|
||||
|
||||
|
|
|
@ -24,6 +24,8 @@ import java.util.LinkedHashMap;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.DistribStateManager;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
|
||||
import org.apache.solr.cloud.OverseerCollectionMessageHandler;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.cloud.ClusterState;
|
||||
|
@ -42,10 +44,12 @@ import static org.apache.solr.common.params.CommonParams.NAME;
|
|||
public class ClusterStateMutator {
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
protected final ZkStateReader zkStateReader;
|
||||
protected final SolrCloudManager dataProvider;
|
||||
protected final DistribStateManager stateManager;
|
||||
|
||||
public ClusterStateMutator(ZkStateReader zkStateReader) {
|
||||
this.zkStateReader = zkStateReader;
|
||||
public ClusterStateMutator(SolrCloudManager dataProvider) {
|
||||
this.dataProvider = dataProvider;
|
||||
this.stateManager = dataProvider.getDistribStateManager();
|
||||
}
|
||||
|
||||
public ZkWriteCommand createCollection(ClusterState clusterState, ZkNodeProps message) {
|
||||
|
|
|
@ -23,6 +23,8 @@ import java.util.HashMap;
|
|||
import java.util.LinkedHashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.DistribStateManager;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
|
||||
import org.apache.solr.common.cloud.ClusterState;
|
||||
import org.apache.solr.common.cloud.DocCollection;
|
||||
import org.apache.solr.common.cloud.ImplicitDocRouter;
|
||||
|
@ -41,10 +43,12 @@ import static org.apache.solr.common.params.CommonParams.NAME;
|
|||
public class CollectionMutator {
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
protected final ZkStateReader zkStateReader;
|
||||
protected final SolrCloudManager dataProvider;
|
||||
protected final DistribStateManager stateManager;
|
||||
|
||||
public CollectionMutator(ZkStateReader zkStateReader) {
|
||||
this.zkStateReader = zkStateReader;
|
||||
public CollectionMutator(SolrCloudManager dataProvider) {
|
||||
this.dataProvider = dataProvider;
|
||||
this.stateManager = dataProvider.getDistribStateManager();
|
||||
}
|
||||
|
||||
public ZkWriteCommand createShard(final ClusterState clusterState, ZkNodeProps message) {
|
||||
|
|
|
@ -58,6 +58,9 @@ public class NodeMutator {
|
|||
Collection<Replica> replicas = slice.getReplicas();
|
||||
for (Replica replica : replicas) {
|
||||
String rNodeName = replica.getNodeName();
|
||||
if (rNodeName == null) {
|
||||
throw new RuntimeException("Replica without node name! " + replica);
|
||||
}
|
||||
if (rNodeName.equals(nodeName)) {
|
||||
log.debug("Update replica state for " + replica + " to " + Replica.State.DOWN.toString());
|
||||
Map<String, Object> props = replica.shallowCopy();
|
||||
|
|
|
@ -24,9 +24,13 @@ import java.util.LinkedHashMap;
|
|||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.NoSuchElementException;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.DistribStateManager;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
|
||||
import org.apache.solr.cloud.Assign;
|
||||
import org.apache.solr.cloud.Overseer;
|
||||
import org.apache.solr.cloud.OverseerCollectionMessageHandler;
|
||||
|
@ -38,7 +42,6 @@ import org.apache.solr.common.cloud.Slice;
|
|||
import org.apache.solr.common.cloud.ZkNodeProps;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
import org.apache.solr.common.util.Utils;
|
||||
import org.apache.zookeeper.data.Stat;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -50,10 +53,12 @@ import static org.apache.solr.common.params.CommonParams.NAME;
|
|||
public class ReplicaMutator {
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
protected final ZkStateReader zkStateReader;
|
||||
protected final SolrCloudManager dataProvider;
|
||||
protected final DistribStateManager stateManager;
|
||||
|
||||
public ReplicaMutator(ZkStateReader reader) {
|
||||
this.zkStateReader = reader;
|
||||
public ReplicaMutator(SolrCloudManager dataProvider) {
|
||||
this.dataProvider = dataProvider;
|
||||
this.stateManager = dataProvider.getDistribStateManager();
|
||||
}
|
||||
|
||||
protected Replica setProperty(Replica replica, String key, String value) {
|
||||
|
@ -196,7 +201,7 @@ public class ReplicaMutator {
|
|||
}
|
||||
|
||||
public ZkWriteCommand setState(ClusterState clusterState, ZkNodeProps message) {
|
||||
if (Overseer.isLegacy(zkStateReader)) {
|
||||
if (Overseer.isLegacy(dataProvider.getClusterStateProvider())) {
|
||||
return updateState(clusterState, message);
|
||||
} else {
|
||||
return updateStateNew(clusterState, message);
|
||||
|
@ -220,7 +225,7 @@ public class ReplicaMutator {
|
|||
ClusterStateMutator.getShardNames(numShards, shardNames);
|
||||
Map<String, Object> createMsg = Utils.makeMap(NAME, cName);
|
||||
createMsg.putAll(message.getProperties());
|
||||
writeCommand = new ClusterStateMutator(zkStateReader).createCollection(prevState, new ZkNodeProps(createMsg));
|
||||
writeCommand = new ClusterStateMutator(dataProvider).createCollection(prevState, new ZkNodeProps(createMsg));
|
||||
DocCollection collection = writeCommand.collection;
|
||||
newState = ClusterStateMutator.newState(prevState, cName, collection);
|
||||
}
|
||||
|
@ -240,7 +245,7 @@ public class ReplicaMutator {
|
|||
log.debug("node=" + coreNodeName + " is already registered");
|
||||
} else {
|
||||
// if coreNodeName is null, auto assign one
|
||||
coreNodeName = Assign.assignNode(zkStateReader.getZkClient(), collection);
|
||||
coreNodeName = Assign.assignNode(stateManager, collection);
|
||||
}
|
||||
message.getProperties().put(ZkStateReader.CORE_NODE_NAME_PROP,
|
||||
coreNodeName);
|
||||
|
@ -415,14 +420,19 @@ public class ReplicaMutator {
|
|||
if (shardParentNode != null && shardParentZkSession != null) {
|
||||
log.info("Checking whether sub-shard leader node is still the same one at {} with ZK session id {}", shardParentNode, shardParentZkSession);
|
||||
try {
|
||||
Stat leaderZnodeStat = zkStateReader.getZkClient().exists(ZkStateReader.LIVE_NODES_ZKNODE
|
||||
+ "/" + shardParentNode, null, true);
|
||||
if (leaderZnodeStat == null) {
|
||||
VersionedData leaderZnode = null;
|
||||
try {
|
||||
leaderZnode = stateManager.getData(ZkStateReader.LIVE_NODES_ZKNODE
|
||||
+ "/" + shardParentNode, null);
|
||||
} catch (NoSuchElementException e) {
|
||||
// ignore
|
||||
}
|
||||
if (leaderZnode == null) {
|
||||
log.error("The shard leader node: {} is not live anymore!", shardParentNode);
|
||||
isLeaderSame = false;
|
||||
} else if (leaderZnodeStat.getEphemeralOwner() != Long.parseLong(shardParentZkSession)) {
|
||||
} else if (!shardParentZkSession.equals(leaderZnode.getOwner())) {
|
||||
log.error("The zk session id for shard leader node: {} has changed from {} to {}",
|
||||
shardParentNode, shardParentZkSession, leaderZnodeStat.getEphemeralOwner());
|
||||
shardParentNode, shardParentZkSession, leaderZnode.getOwner());
|
||||
isLeaderSame = false;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
|
@ -443,7 +453,7 @@ public class ReplicaMutator {
|
|||
}
|
||||
propMap.put(ZkStateReader.COLLECTION_PROP, collection.getName());
|
||||
ZkNodeProps m = new ZkNodeProps(propMap);
|
||||
return new SliceMutator(zkStateReader).updateShardState(prevState, m).collection;
|
||||
return new SliceMutator(dataProvider).updateShardState(prevState, m).collection;
|
||||
} else {
|
||||
// we must mark the shard split as failed by switching sub-shards to recovery_failed state
|
||||
Map<String, Object> propMap = new HashMap<>();
|
||||
|
@ -454,7 +464,7 @@ public class ReplicaMutator {
|
|||
}
|
||||
propMap.put(ZkStateReader.COLLECTION_PROP, collection.getName());
|
||||
ZkNodeProps m = new ZkNodeProps(propMap);
|
||||
return new SliceMutator(zkStateReader).updateShardState(prevState, m).collection;
|
||||
return new SliceMutator(dataProvider).updateShardState(prevState, m).collection;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,6 +26,8 @@ import java.util.LinkedHashMap;
|
|||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.DistribStateManager;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
|
||||
import org.apache.solr.cloud.Assign;
|
||||
import org.apache.solr.cloud.Overseer;
|
||||
import org.apache.solr.common.cloud.ClusterState;
|
||||
|
@ -48,11 +50,12 @@ public class SliceMutator {
|
|||
|
||||
public static final Set<String> SLICE_UNIQUE_BOOLEAN_PROPERTIES = ImmutableSet.of(PREFERRED_LEADER_PROP);
|
||||
|
||||
protected final SolrCloudManager dataProvider;
|
||||
protected final DistribStateManager stateManager;
|
||||
|
||||
protected final ZkStateReader zkStateReader;
|
||||
|
||||
public SliceMutator(ZkStateReader zkStateReader) {
|
||||
this.zkStateReader = zkStateReader;
|
||||
public SliceMutator(SolrCloudManager dataProvider) {
|
||||
this.dataProvider = dataProvider;
|
||||
this.stateManager = dataProvider.getDistribStateManager();
|
||||
}
|
||||
|
||||
public ZkWriteCommand addReplica(ClusterState clusterState, ZkNodeProps message) {
|
||||
|
@ -70,7 +73,7 @@ public class SliceMutator {
|
|||
if (message.getStr(ZkStateReader.CORE_NODE_NAME_PROP) != null) {
|
||||
coreNodeName = message.getStr(ZkStateReader.CORE_NODE_NAME_PROP);
|
||||
} else {
|
||||
coreNodeName = Assign.assignNode(zkStateReader.getZkClient(), collection);
|
||||
coreNodeName = Assign.assignNode(stateManager, collection);
|
||||
}
|
||||
Replica replica = new Replica(coreNodeName,
|
||||
makeMap(
|
||||
|
@ -138,9 +141,9 @@ public class SliceMutator {
|
|||
String coreURL = ZkCoreNodeProps.getCoreUrl(replica.getStr(ZkStateReader.BASE_URL_PROP), replica.getStr(ZkStateReader.CORE_NAME_PROP));
|
||||
|
||||
if (replica == oldLeader && !coreURL.equals(leaderUrl)) {
|
||||
replica = new ReplicaMutator(zkStateReader).unsetLeader(replica);
|
||||
replica = new ReplicaMutator(dataProvider).unsetLeader(replica);
|
||||
} else if (coreURL.equals(leaderUrl)) {
|
||||
replica = new ReplicaMutator(zkStateReader).setLeader(replica);
|
||||
replica = new ReplicaMutator(dataProvider).setLeader(replica);
|
||||
}
|
||||
|
||||
newReplicas.put(replica.getName(), replica);
|
||||
|
|
|
@ -24,6 +24,7 @@ import java.util.concurrent.TimeUnit;
|
|||
|
||||
import com.codahale.metrics.Timer;
|
||||
import org.apache.solr.cloud.Overseer;
|
||||
import org.apache.solr.cloud.Stats;
|
||||
import org.apache.solr.common.cloud.ClusterState;
|
||||
import org.apache.solr.common.cloud.DocCollection;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
|
@ -61,7 +62,7 @@ public class ZkStateWriter {
|
|||
public static ZkWriteCommand NO_OP = ZkWriteCommand.noop();
|
||||
|
||||
protected final ZkStateReader reader;
|
||||
protected final Overseer.Stats stats;
|
||||
protected final Stats stats;
|
||||
|
||||
protected Map<String, DocCollection> updates = new HashMap<>();
|
||||
private int numUpdates = 0;
|
||||
|
@ -75,7 +76,7 @@ public class ZkStateWriter {
|
|||
*/
|
||||
protected boolean invalidState = false;
|
||||
|
||||
public ZkStateWriter(ZkStateReader zkStateReader, Overseer.Stats stats) {
|
||||
public ZkStateWriter(ZkStateReader zkStateReader, Stats stats) {
|
||||
assert zkStateReader != null;
|
||||
|
||||
this.reader = zkStateReader;
|
||||
|
|
|
@ -962,6 +962,8 @@ public class CoreContainer {
|
|||
SolrException.log(log, null, e);
|
||||
} catch (KeeperException e) {
|
||||
SolrException.log(log, null, e);
|
||||
} catch (Exception e) {
|
||||
SolrException.log(log, null, e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1407,6 +1409,8 @@ public class CoreContainer {
|
|||
throw new SolrException(ErrorCode.SERVER_ERROR, "Interrupted while unregistering core [" + name + "] from cloud state");
|
||||
} catch (KeeperException e) {
|
||||
throw new SolrException(ErrorCode.SERVER_ERROR, "Error unregistering core [" + name + "] from cloud state", e);
|
||||
} catch (Exception e) {
|
||||
throw new SolrException(ErrorCode.SERVER_ERROR, "Error unregistering core [" + name + "] from cloud state", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -227,6 +227,8 @@ public class ZkContainer {
|
|||
} catch (InterruptedException e) {
|
||||
Thread.interrupted();
|
||||
ZkContainer.log.error("", e);
|
||||
} catch (Exception e) {
|
||||
ZkContainer.log.error("", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -41,12 +41,12 @@ import org.apache.lucene.util.CharsRefBuilder;
|
|||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.SolrRequest.METHOD;
|
||||
import org.apache.solr.common.cloud.DistributedQueue;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.GenericSolrRequest;
|
||||
import org.apache.solr.client.solrj.request.UpdateRequest;
|
||||
import org.apache.solr.client.solrj.response.SimpleSolrResponse;
|
||||
import org.apache.solr.cloud.CloudDescriptor;
|
||||
import org.apache.solr.client.solrj.cloud.DistributedQueue;
|
||||
import org.apache.solr.cloud.Overseer;
|
||||
import org.apache.solr.cloud.ZkController;
|
||||
import org.apache.solr.cloud.overseer.OverseerAction;
|
||||
|
|
|
@ -26,6 +26,7 @@ import java.util.concurrent.ExecutorService;
|
|||
import java.util.concurrent.Future;
|
||||
|
||||
import org.apache.solr.SolrTestCaseJ4;
|
||||
import org.apache.solr.client.solrj.impl.ZkDistribStateManager;
|
||||
import org.apache.solr.common.cloud.DocCollection;
|
||||
import org.apache.solr.common.cloud.DocRouter;
|
||||
import org.apache.solr.common.cloud.Replica;
|
||||
|
@ -70,11 +71,13 @@ public class AssignTest extends SolrTestCaseJ4 {
|
|||
);
|
||||
when(zkClient.getData(anyString(), any(), any(), anyBoolean())).then(invocation ->
|
||||
zkClientData.get(invocation.getArgument(0)));
|
||||
String nodeName = Assign.assignNode(zkClient, new DocCollection("collection1", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT));
|
||||
// TODO: fix this to be independent of ZK
|
||||
ZkDistribStateManager stateManager = new ZkDistribStateManager(zkClient);
|
||||
String nodeName = Assign.assignNode(stateManager, new DocCollection("collection1", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT));
|
||||
assertEquals("core_node1", nodeName);
|
||||
nodeName = Assign.assignNode(zkClient, new DocCollection("collection2", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT));
|
||||
nodeName = Assign.assignNode(stateManager, new DocCollection("collection2", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT));
|
||||
assertEquals("core_node1", nodeName);
|
||||
nodeName = Assign.assignNode(zkClient, new DocCollection("collection1", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT));
|
||||
nodeName = Assign.assignNode(stateManager, new DocCollection("collection1", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT));
|
||||
assertEquals("core_node2", nodeName);
|
||||
}
|
||||
|
||||
|
@ -99,11 +102,13 @@ public class AssignTest extends SolrTestCaseJ4 {
|
|||
for (String c : collections) {
|
||||
zkClient.makePath("/collections/"+c, true);
|
||||
}
|
||||
// TODO: fix this to be independent of ZK
|
||||
ZkDistribStateManager stateManager = new ZkDistribStateManager(zkClient);
|
||||
List<Future<?>> futures = new ArrayList<>();
|
||||
for (int i = 0; i < 1000; i++) {
|
||||
futures.add(executor.submit(() -> {
|
||||
String collection = collections[random().nextInt(collections.length)];
|
||||
int id = Assign.incAndGetId(zkClient, collection, 0);
|
||||
int id = Assign.incAndGetId(stateManager, collection, 0);
|
||||
Object val = collectionUniqueIds.get(collection).put(id, fixedValue);
|
||||
if (val != null) {
|
||||
fail("ZkController do not generate unique id for " + collection);
|
||||
|
@ -131,13 +136,15 @@ public class AssignTest extends SolrTestCaseJ4 {
|
|||
server.run();
|
||||
try (SolrZkClient zkClient = new SolrZkClient(server.getZkAddress(), 10000)) {
|
||||
zkClient.makePath("/", true);
|
||||
// TODO: fix this to be independent of ZK
|
||||
ZkDistribStateManager stateManager = new ZkDistribStateManager(zkClient);
|
||||
Map<String, Slice> slices = new HashMap<>();
|
||||
slices.put("shard1", new Slice("shard1", new HashMap<>(), null));
|
||||
slices.put("shard2", new Slice("shard2", new HashMap<>(), null));
|
||||
|
||||
DocCollection docCollection = new DocCollection("collection1", slices, null, DocRouter.DEFAULT);
|
||||
assertEquals("Core name pattern changed", "collection1_shard1_replica_n1", Assign.buildCoreName(zkClient, docCollection, "shard1", Replica.Type.NRT));
|
||||
assertEquals("Core name pattern changed", "collection1_shard2_replica_p2", Assign.buildCoreName(zkClient, docCollection, "shard2", Replica.Type.PULL));
|
||||
assertEquals("Core name pattern changed", "collection1_shard1_replica_n1", Assign.buildCoreName(stateManager, docCollection, "shard1", Replica.Type.NRT));
|
||||
assertEquals("Core name pattern changed", "collection1_shard2_replica_p2", Assign.buildCoreName(stateManager, docCollection, "shard2", Replica.Type.PULL));
|
||||
} finally {
|
||||
server.shutdown();
|
||||
}
|
||||
|
|
|
@ -84,8 +84,9 @@ public class DeleteInactiveReplicaTest extends SolrCloudTestCase {
|
|||
CoreContainer cc = jetty.getCoreContainer();
|
||||
CoreContainer.CoreLoadFailure loadFailure = cc.getCoreInitFailures().get(replica.getCoreName());
|
||||
assertNotNull("Deleted core was still loaded!", loadFailure);
|
||||
assertTrue("Unexpected load failure message: " + loadFailure.exception.getMessage(),
|
||||
loadFailure.exception.getMessage().contains("does not exist in shard"));
|
||||
assertNotNull(loadFailure.exception.getCause());
|
||||
assertTrue("Unexpected load failure message: " + loadFailure.exception.getCause().getMessage(),
|
||||
loadFailure.exception.getCause().getMessage().contains("does not exist in shard"));
|
||||
|
||||
// Check that we can't create a core with no coreNodeName
|
||||
try (SolrClient queryClient = getHttpSolrClient(jetty.getBaseUrl().toString())) {
|
||||
|
|
|
@ -21,7 +21,7 @@ import java.util.HashMap;
|
|||
import java.util.Map;
|
||||
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.common.cloud.DistributedQueue;
|
||||
import org.apache.solr.client.solrj.cloud.DistributedQueue;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
|
||||
import org.apache.solr.client.solrj.request.CoreStatus;
|
||||
|
@ -82,8 +82,7 @@ public class DeleteShardTest extends SolrCloudTestCase {
|
|||
|
||||
}
|
||||
|
||||
protected void setSliceState(String collection, String slice, State state) throws SolrServerException, IOException,
|
||||
Exception {
|
||||
protected void setSliceState(String collection, String slice, State state) throws Exception {
|
||||
|
||||
CloudSolrClient client = cluster.getSolrClient();
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ import java.util.concurrent.TimeoutException;
|
|||
import java.util.function.Predicate;
|
||||
|
||||
import org.apache.solr.SolrTestCaseJ4;
|
||||
import org.apache.solr.common.cloud.DistributedQueue;
|
||||
import org.apache.solr.client.solrj.cloud.DistributedQueue;
|
||||
import org.apache.solr.common.cloud.SolrZkClient;
|
||||
import org.apache.solr.common.util.ExecutorUtil;
|
||||
import org.apache.solr.common.util.SolrjNamedThreadFactory;
|
||||
|
@ -105,7 +105,7 @@ public class DistributedQueueTest extends SolrTestCaseJ4 {
|
|||
producer.offer(data);
|
||||
consumer.poll();
|
||||
|
||||
assertEquals(2, consumer.getStats().getQueueLength());
|
||||
assertEquals(2, consumer.getZkStats().getQueueLength());
|
||||
producer.offer(data);
|
||||
producer2.offer(data);
|
||||
consumer.poll();
|
||||
|
@ -114,10 +114,10 @@ public class DistributedQueueTest extends SolrTestCaseJ4 {
|
|||
Thread.sleep(20);
|
||||
}
|
||||
// DQ still have elements in their queue, so we should not fetch elements path from Zk
|
||||
assertEquals(1, consumer.getStats().getQueueLength());
|
||||
assertEquals(1, consumer.getZkStats().getQueueLength());
|
||||
consumer.poll();
|
||||
consumer.peek();
|
||||
assertEquals(2, consumer.getStats().getQueueLength());
|
||||
assertEquals(2, consumer.getZkStats().getQueueLength());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.solr.client.solrj.SolrClient;
|
|||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.SolrRequest.METHOD;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.cloud.DistributedQueue;
|
||||
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
|
||||
|
@ -230,9 +231,8 @@ public class ForceLeaderTest extends HttpPartitionTest {
|
|||
}
|
||||
}
|
||||
|
||||
protected void setReplicaState(String collection, String slice, Replica replica, Replica.State state) throws SolrServerException, IOException,
|
||||
KeeperException, InterruptedException {
|
||||
ZkDistributedQueue inQueue = Overseer.getStateUpdateQueue(cloudClient.getZkStateReader().getZkClient());
|
||||
protected void setReplicaState(String collection, String slice, Replica replica, Replica.State state) throws Exception {
|
||||
DistributedQueue inQueue = Overseer.getStateUpdateQueue(cloudClient.getZkStateReader().getZkClient());
|
||||
ZkStateReader zkStateReader = cloudClient.getZkStateReader();
|
||||
|
||||
String baseUrl = zkStateReader.getBaseUrlForNodeName(replica.getNodeName());
|
||||
|
|
|
@ -23,7 +23,7 @@ import java.util.Random;
|
|||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.common.cloud.DistributedQueue;
|
||||
import org.apache.solr.client.solrj.cloud.DistributedQueue;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
|
||||
import org.apache.solr.client.solrj.request.CollectionAdminRequest.Create;
|
||||
|
@ -70,7 +70,7 @@ public class MultiThreadedOCPTest extends AbstractFullDistribZkTestBase {
|
|||
private void testFillWorkQueue() throws Exception {
|
||||
try (SolrClient client = createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0)))) {
|
||||
DistributedQueue distributedQueue = new ZkDistributedQueue(cloudClient.getZkStateReader().getZkClient(),
|
||||
"/overseer/collection-queue-work", new Overseer.Stats());
|
||||
"/overseer/collection-queue-work", new Stats());
|
||||
//fill the work queue with blocked tasks by adding more than the no:of parallel tasks
|
||||
for (int i = 0; i < MAX_PARALLEL_TASKS+5; i++) {
|
||||
distributedQueue.offer(Utils.toJSON(Utils.makeMap(
|
||||
|
@ -151,7 +151,7 @@ public class MultiThreadedOCPTest extends AbstractFullDistribZkTestBase {
|
|||
private void testTaskExclusivity() throws Exception, SolrServerException {
|
||||
|
||||
DistributedQueue distributedQueue = new ZkDistributedQueue(cloudClient.getZkStateReader().getZkClient(),
|
||||
"/overseer/collection-queue-work", new Overseer.Stats());
|
||||
"/overseer/collection-queue-work", new Stats());
|
||||
try (SolrClient client = createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0)))) {
|
||||
|
||||
Create createCollectionRequest = CollectionAdminRequest.createCollection("ocptest_shardsplit","conf1",4,1);
|
||||
|
|
|
@ -25,6 +25,11 @@ import java.util.concurrent.TimeUnit;
|
|||
import org.apache.solr.SolrTestCaseJ4;
|
||||
import org.apache.solr.client.solrj.SolrResponse;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.BadVersionException;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.DistribStateManager;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
|
||||
import org.apache.solr.client.solrj.impl.ClusterStateProvider;
|
||||
import org.apache.solr.cloud.Overseer.LeaderStatus;
|
||||
import org.apache.solr.cloud.OverseerTaskQueue.QueueEvent;
|
||||
import org.apache.solr.common.cloud.ClusterState;
|
||||
|
@ -45,6 +50,7 @@ import org.apache.solr.handler.component.ShardHandler;
|
|||
import org.apache.solr.handler.component.ShardHandlerFactory;
|
||||
import org.apache.solr.handler.component.ShardRequest;
|
||||
import org.apache.solr.util.TimeOut;
|
||||
import org.apache.zookeeper.CreateMode;
|
||||
import org.apache.zookeeper.data.Stat;
|
||||
import org.junit.After;
|
||||
import org.junit.AfterClass;
|
||||
|
@ -52,6 +58,8 @@ import org.junit.Before;
|
|||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
import org.mockito.ArgumentCaptor;
|
||||
import org.mockito.invocation.InvocationOnMock;
|
||||
import org.mockito.stubbing.Answer;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -66,6 +74,10 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
|
|||
private static final String CONFIG_NAME = "myconfig";
|
||||
|
||||
private static OverseerTaskQueue workQueueMock;
|
||||
private static Overseer overseerMock;
|
||||
private static ZkController zkControllerMock;
|
||||
private static SolrCloudManager cloudDataProviderMock;
|
||||
private static ClusterStateProvider clusterStateProviderMock;
|
||||
private static DistributedMap runningMapMock;
|
||||
private static DistributedMap completedMapMock;
|
||||
private static DistributedMap failureMapMock;
|
||||
|
@ -74,6 +86,7 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
|
|||
private static ZkStateReader zkStateReaderMock;
|
||||
private static ClusterState clusterStateMock;
|
||||
private static SolrZkClient solrZkClientMock;
|
||||
private static DistribStateManager stateManagerMock;
|
||||
private static AutoScalingConfig autoScalingConfig = new AutoScalingConfig(Collections.emptyMap());
|
||||
private final Map zkMap = new HashMap();
|
||||
private final Map<String, ClusterState.CollectionRef> collectionsSet = new HashMap<>();
|
||||
|
@ -94,9 +107,10 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
|
|||
String myId, ShardHandlerFactory shardHandlerFactory,
|
||||
String adminPath,
|
||||
OverseerTaskQueue workQueue, DistributedMap runningMap,
|
||||
Overseer overseer,
|
||||
DistributedMap completedMap,
|
||||
DistributedMap failureMap) {
|
||||
super(zkStateReader, myId, shardHandlerFactory, adminPath, new Overseer.Stats(), null, new OverseerNodePrioritizer(zkStateReader, adminPath, shardHandlerFactory), workQueue, runningMap, completedMap, failureMap);
|
||||
super(zkStateReader, myId, shardHandlerFactory, adminPath, new Stats(), overseer, new OverseerNodePrioritizer(zkStateReader, adminPath, shardHandlerFactory), workQueue, runningMap, completedMap, failureMap);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -117,6 +131,11 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
|
|||
zkStateReaderMock = mock(ZkStateReader.class);
|
||||
clusterStateMock = mock(ClusterState.class);
|
||||
solrZkClientMock = mock(SolrZkClient.class);
|
||||
overseerMock = mock(Overseer.class);
|
||||
zkControllerMock = mock(ZkController.class);
|
||||
cloudDataProviderMock = mock(SolrCloudManager.class);
|
||||
clusterStateProviderMock = mock(ClusterStateProvider.class);
|
||||
stateManagerMock = mock(DistribStateManager.class);
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
|
@ -130,6 +149,10 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
|
|||
zkStateReaderMock = null;
|
||||
clusterStateMock = null;
|
||||
solrZkClientMock = null;
|
||||
overseerMock = null;
|
||||
zkControllerMock = null;
|
||||
cloudDataProviderMock = null;
|
||||
clusterStateProviderMock = null;
|
||||
}
|
||||
|
||||
@Before
|
||||
|
@ -145,6 +168,11 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
|
|||
reset(zkStateReaderMock);
|
||||
reset(clusterStateMock);
|
||||
reset(solrZkClientMock);
|
||||
reset(overseerMock);
|
||||
reset(zkControllerMock);
|
||||
reset(cloudDataProviderMock);
|
||||
reset(clusterStateProviderMock);
|
||||
reset(stateManagerMock);
|
||||
|
||||
zkMap.clear();
|
||||
collectionsSet.clear();
|
||||
|
@ -261,6 +289,45 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
|
|||
return zkMap.containsKey(key);
|
||||
});
|
||||
|
||||
when(overseerMock.getZkController()).thenReturn(zkControllerMock);
|
||||
when(overseerMock.getSolrCloudManager()).thenReturn(cloudDataProviderMock);
|
||||
when(zkControllerMock.getSolrCloudManager()).thenReturn(cloudDataProviderMock);
|
||||
when(cloudDataProviderMock.getClusterStateProvider()).thenReturn(clusterStateProviderMock);
|
||||
when(clusterStateProviderMock.getClusterState()).thenReturn(clusterStateMock);
|
||||
when(cloudDataProviderMock.getDistribStateManager()).thenReturn(stateManagerMock);
|
||||
when(stateManagerMock.hasData(anyString())).thenAnswer(invocation -> zkMap.containsKey(invocation.getArgument(0)));
|
||||
when(stateManagerMock.getAutoScalingConfig()).thenReturn(autoScalingConfig);
|
||||
doAnswer(new Answer<Void>() {
|
||||
@Override
|
||||
public Void answer(InvocationOnMock invocation) throws Throwable {
|
||||
if (!zkMap.containsKey(invocation.getArgument(0))) {
|
||||
zkMap.put(invocation.getArgument(0), "");
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}).when(stateManagerMock).makePath(anyString());
|
||||
doAnswer(new Answer<Void>() {
|
||||
@Override
|
||||
public Void answer(InvocationOnMock invocation) throws Throwable {
|
||||
VersionedData d = new VersionedData(0, invocation.getArgument(1), "test");
|
||||
zkMap.put(invocation.getArgument(0), d);
|
||||
return null;
|
||||
}
|
||||
}).when(stateManagerMock).createData(anyString(), any(byte[].class), any(CreateMode.class));
|
||||
doAnswer(new Answer<Void>() {
|
||||
@Override
|
||||
public Void answer(InvocationOnMock invocation) throws Throwable {
|
||||
VersionedData d = (VersionedData)zkMap.get(invocation.getArgument(0));
|
||||
if (d != null && d.getVersion() != (Integer)invocation.getArgument(2)) {
|
||||
throw new BadVersionException(invocation.getArgument(2), invocation.getArgument(0));
|
||||
}
|
||||
int version = (Integer)invocation.getArgument(2) + 1;
|
||||
zkMap.put(invocation.getArgument(0), new VersionedData(version, invocation.getArgument(1), "test"));
|
||||
return null;
|
||||
}
|
||||
}).when(stateManagerMock).setData(anyString(), any(byte[].class), anyInt());
|
||||
when(stateManagerMock.getData(anyString(), any())).thenAnswer(invocation -> zkMap.get(invocation.getArgument(0)));
|
||||
|
||||
when(solrZkClientMock.exists(any(String.class), isNull(), anyBoolean())).thenAnswer(invocation -> {
|
||||
String key = invocation.getArgument(0);
|
||||
if (zkMap.containsKey(key)) {
|
||||
|
@ -500,7 +567,7 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
|
|||
|
||||
underTest = new OverseerCollectionConfigSetProcessorToBeTested(zkStateReaderMock,
|
||||
"1234", shardHandlerFactoryMock, ADMIN_PATH, workQueueMock, runningMapMock,
|
||||
completedMapMock, failureMapMock);
|
||||
overseerMock, completedMapMock, failureMapMock);
|
||||
|
||||
|
||||
log.info("clusterstate " + clusterStateMock.hashCode());
|
||||
|
|
|
@ -37,6 +37,11 @@ import com.codahale.metrics.Snapshot;
|
|||
import com.codahale.metrics.Timer;
|
||||
import org.apache.lucene.util.LuceneTestCase.Slow;
|
||||
import org.apache.solr.SolrTestCaseJ4;
|
||||
import org.apache.solr.client.solrj.cloud.DistributedQueue;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.client.solrj.impl.SolrClientCloudManager;
|
||||
import org.apache.solr.client.solrj.impl.ZkClientClusterStateProvider;
|
||||
import org.apache.solr.cloud.overseer.NodeMutator;
|
||||
import org.apache.solr.cloud.overseer.OverseerAction;
|
||||
import org.apache.solr.cloud.overseer.ZkWriteCommand;
|
||||
|
@ -51,6 +56,7 @@ import org.apache.solr.common.params.CollectionParams;
|
|||
import org.apache.solr.common.util.Utils;
|
||||
import org.apache.solr.core.CloudConfig;
|
||||
import org.apache.solr.core.CoreContainer;
|
||||
import org.apache.solr.core.SolrResourceLoader;
|
||||
import org.apache.solr.handler.component.HttpShardHandlerFactory;
|
||||
import org.apache.solr.update.UpdateShardHandler;
|
||||
import org.apache.solr.update.UpdateShardHandlerConfig;
|
||||
|
@ -70,6 +76,7 @@ import org.slf4j.LoggerFactory;
|
|||
import org.xml.sax.SAXException;
|
||||
|
||||
import static org.apache.solr.cloud.AbstractDistribZkTestBase.verifyReplicaStatus;
|
||||
import static org.mockito.Mockito.doReturn;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
|
@ -84,6 +91,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
private List<ZkStateReader> readers = new ArrayList<>();
|
||||
private List<HttpShardHandlerFactory> httpShardHandlerFactorys = new ArrayList<>();
|
||||
private List<UpdateShardHandler> updateShardHandlers = new ArrayList<>();
|
||||
private List<CloudSolrClient> solrClients = new ArrayList<>();
|
||||
|
||||
private static final String COLLECTION = SolrTestCaseJ4.DEFAULT_TEST_COLLECTION_NAME;
|
||||
|
||||
|
@ -135,7 +143,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
zkClient.close();
|
||||
}
|
||||
|
||||
public void createCollection(String collection, int numShards) throws KeeperException, InterruptedException {
|
||||
public void createCollection(String collection, int numShards) throws Exception {
|
||||
|
||||
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, CollectionParams.CollectionAction.CREATE.toLower(),
|
||||
"name", collection,
|
||||
|
@ -148,7 +156,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
}
|
||||
|
||||
public String publishState(String collection, String coreName, String coreNodeName, String shard, Replica.State stateName, int numShards)
|
||||
throws KeeperException, InterruptedException, IOException {
|
||||
throws Exception {
|
||||
if (stateName == null) {
|
||||
ElectionContext ec = electionContext.remove(coreName);
|
||||
if (ec != null) {
|
||||
|
@ -259,6 +267,10 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
updateShardHandler.close();
|
||||
}
|
||||
updateShardHandlers.clear();
|
||||
for (CloudSolrClient client : solrClients) {
|
||||
client.close();
|
||||
}
|
||||
solrClients.clear();
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -783,7 +795,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
ZkStateReader.NUM_SHARDS_PROP, "1",
|
||||
DocCollection.STATE_FORMAT, "1",
|
||||
"createNodeSet", "");
|
||||
ZkDistributedQueue workQueue = Overseer.getInternalWorkQueue(controllerClient, new Overseer.Stats());
|
||||
ZkDistributedQueue workQueue = Overseer.getInternalWorkQueue(controllerClient, new Stats());
|
||||
workQueue.offer(Utils.toJSON(badMessage));
|
||||
workQueue.offer(Utils.toJSON(goodMessage));
|
||||
overseerClient = electNewOverseer(server.getZkAddress());
|
||||
|
@ -1021,15 +1033,15 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
printTimingStats(t);
|
||||
|
||||
Overseer overseer = overseers.get(0);
|
||||
Overseer.Stats stats = overseer.getStats();
|
||||
Stats stats = overseer.getStats();
|
||||
|
||||
String[] interestingOps = {"state", "update_state", "am_i_leader", ""};
|
||||
Arrays.sort(interestingOps);
|
||||
for (Map.Entry<String, Overseer.Stat> entry : stats.getStats().entrySet()) {
|
||||
for (Map.Entry<String, Stats.Stat> entry : stats.getStats().entrySet()) {
|
||||
String op = entry.getKey();
|
||||
if (Arrays.binarySearch(interestingOps, op) < 0)
|
||||
continue;
|
||||
Overseer.Stat stat = entry.getValue();
|
||||
Stats.Stat stat = entry.getValue();
|
||||
log.info("op: {}, success: {}, failure: {}", op, stat.success.get(), stat.errors.get());
|
||||
Timer timer = stat.requestTime;
|
||||
printTimingStats(timer);
|
||||
|
@ -1087,7 +1099,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
reader = new ZkStateReader(zkClient);
|
||||
reader.createClusterStateWatchersAndUpdate();
|
||||
//prepopulate work queue with some items to emulate previous overseer died before persisting state
|
||||
ZkDistributedQueue queue = Overseer.getInternalWorkQueue(zkClient, new Overseer.Stats());
|
||||
DistributedQueue queue = Overseer.getInternalWorkQueue(zkClient, new Stats());
|
||||
|
||||
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, CollectionParams.CollectionAction.CREATE.toLower(),
|
||||
"name", COLLECTION,
|
||||
|
@ -1312,14 +1324,25 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
CoreContainer mockAlwaysUpCoreContainer = mock(CoreContainer.class,
|
||||
Mockito.withSettings().defaultAnswer(Mockito.CALLS_REAL_METHODS));
|
||||
when(mockAlwaysUpCoreContainer.isShutDown()).thenReturn(Boolean.FALSE); // Allow retry on session expiry
|
||||
when(mockAlwaysUpCoreContainer.getResourceLoader()).thenReturn(new SolrResourceLoader());
|
||||
MockZkController zkController = mock(MockZkController.class,
|
||||
Mockito.withSettings().defaultAnswer(Mockito.CALLS_REAL_METHODS));
|
||||
when(zkController.getCoreContainer()).thenReturn(mockAlwaysUpCoreContainer);
|
||||
when(zkController.getZkClient()).thenReturn(zkClient);
|
||||
when(zkController.getZkStateReader()).thenReturn(reader);
|
||||
doReturn(getCloudDataProvider(zkClient,reader))
|
||||
.when(zkController).getSolrCloudManager();
|
||||
return zkController;
|
||||
}
|
||||
|
||||
private SolrCloudManager getCloudDataProvider(SolrZkClient zkClient, ZkStateReader reader) {
|
||||
CloudSolrClient client = new CloudSolrClient.Builder()
|
||||
.withClusterStateProvider(new ZkClientClusterStateProvider(reader))
|
||||
.build();
|
||||
solrClients.add(client);
|
||||
return new SolrClientCloudManager(new ZkDistributedQueueFactory(zkClient), client);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRemovalOfLastReplica() throws Exception {
|
||||
|
||||
|
|
|
@ -152,7 +152,7 @@ public class AutoAddReplicasPlanActionTest extends SolrCloudTestCase{
|
|||
private List<SolrRequest> getOperations(JettySolrRunner actionJetty, String lostNodeName) {
|
||||
AutoAddReplicasPlanAction action = new AutoAddReplicasPlanAction();
|
||||
TriggerEvent lostNode = new NodeLostTrigger.NodeLostEvent(TriggerEventType.NODELOST, ".auto_add_replicas", Collections.singletonList(System.currentTimeMillis()), Collections.singletonList(lostNodeName));
|
||||
ActionContext context = new ActionContext(actionJetty.getCoreContainer(), null, new HashMap<>());
|
||||
ActionContext context = new ActionContext(actionJetty.getCoreContainer().getZkController().getSolrCloudManager(), null, new HashMap<>());
|
||||
action.process(lostNode, context);
|
||||
List<SolrRequest> operations = (List) context.getProperty("operations");
|
||||
return operations;
|
||||
|
|
|
@ -31,7 +31,7 @@ import org.apache.solr.client.solrj.SolrRequest;
|
|||
import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventType;
|
||||
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.client.solrj.impl.SolrClientDataProvider;
|
||||
import org.apache.solr.client.solrj.impl.SolrClientNodeStateProvider;
|
||||
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
|
||||
import org.apache.solr.cloud.SolrCloudTestCase;
|
||||
import org.apache.solr.common.cloud.ClusterState;
|
||||
|
@ -196,7 +196,7 @@ public class ComputePlanActionTest extends SolrCloudTestCase {
|
|||
Map context = actionContextPropsRef.get();
|
||||
assertNotNull(context);
|
||||
List<SolrRequest> operations = (List<SolrRequest>) context.get("operations");
|
||||
assertNotNull("The operations computed by ComputePlanAction should not be null , " + getDataProviderState() + eventRef.get(), operations);
|
||||
assertNotNull("The operations computed by ComputePlanAction should not be null , "+ getNodeStateProviderState() + eventRef.get(), operations);
|
||||
assertEquals("ComputePlanAction should have computed exactly 1 operation", 1, operations.size());
|
||||
SolrRequest solrRequest = operations.get(0);
|
||||
SolrParams params = solrRequest.getParams();
|
||||
|
@ -213,12 +213,10 @@ public class ComputePlanActionTest extends SolrCloudTestCase {
|
|||
}
|
||||
}
|
||||
}
|
||||
static String getDataProviderState() {
|
||||
String result = "SolrClientDataProvider.DEBUG";
|
||||
if(SolrClientDataProvider.INST != null) {
|
||||
result+= Utils.toJSONString(SolrClientDataProvider.INST);
|
||||
if (SolrClientDataProvider.INST.config != null)
|
||||
result += "autoscalingconf: " + Utils.toJSONString(SolrClientDataProvider.INST.config);
|
||||
static String getNodeStateProviderState() {
|
||||
String result = "SolrClientNodeStateProvider.DEBUG";
|
||||
if(SolrClientNodeStateProvider.INST != null) {
|
||||
result+= Utils.toJSONString(SolrClientNodeStateProvider.INST);
|
||||
}
|
||||
return result;
|
||||
|
||||
|
@ -286,7 +284,7 @@ public class ComputePlanActionTest extends SolrCloudTestCase {
|
|||
Map context = actionContextPropsRef.get();
|
||||
assertNotNull(context);
|
||||
List<SolrRequest> operations = (List<SolrRequest>) context.get("operations");
|
||||
assertNotNull("The operations computed by ComputePlanAction should not be null " + getDataProviderState() + actionContextPropsRef.get(), operations);
|
||||
assertNotNull("The operations computed by ComputePlanAction should not be null "+ getNodeStateProviderState() + actionContextPropsRef.get(), operations);
|
||||
operations.forEach(solrRequest -> log.info(solrRequest.getParams().toString()));
|
||||
assertEquals("ComputePlanAction should have computed exactly 2 operation", 2, operations.size());
|
||||
|
||||
|
@ -353,7 +351,7 @@ public class ComputePlanActionTest extends SolrCloudTestCase {
|
|||
Map context = actionContextPropsRef.get();
|
||||
assertNotNull(context);
|
||||
List<SolrRequest> operations = (List<SolrRequest>) context.get("operations");
|
||||
assertNotNull("The operations computed by ComputePlanAction should not be null" + getDataProviderState() + context, operations);
|
||||
assertNotNull("The operations computed by ComputePlanAction should not be null" + getNodeStateProviderState() + context, operations);
|
||||
assertEquals("ComputePlanAction should have computed exactly 1 operation", 1, operations.size());
|
||||
SolrRequest request = operations.get(0);
|
||||
SolrParams params = request.getParams();
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
|
||||
package org.apache.solr.cloud.autoscaling;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
|
@ -27,9 +26,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
|
|||
import java.util.stream.Collectors;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventType;
|
||||
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
|
@ -118,9 +115,10 @@ public class ExecutePlanActionTest extends SolrCloudTestCase {
|
|||
AtomicBoolean znodeCreated = new AtomicBoolean(false);
|
||||
|
||||
CollectionAdminRequest.AsyncCollectionAdminRequest moveReplica = new CollectionAdminRequest.MoveReplica(collectionName, replicas.get(0).getName(), survivor.getNodeName());
|
||||
CollectionAdminRequest.AsyncCollectionAdminRequest mockRequest = new CollectionAdminRequest.AsyncCollectionAdminRequest(CollectionParams.CollectionAction.LIST) {
|
||||
CollectionAdminRequest.AsyncCollectionAdminRequest mockRequest = new CollectionAdminRequest.AsyncCollectionAdminRequest(CollectionParams.CollectionAction.OVERSEERSTATUS) {
|
||||
@Override
|
||||
public String processAsync(String asyncId, SolrClient client) throws IOException, SolrServerException {
|
||||
public void setAsyncId(String asyncId) {
|
||||
super.setAsyncId(asyncId);
|
||||
String parentPath = ZkStateReader.SOLR_AUTOSCALING_TRIGGER_STATE_PATH + "/xyz/execute_plan";
|
||||
try {
|
||||
if (zkClient().exists(parentPath, true)) {
|
||||
|
@ -137,19 +135,18 @@ public class ExecutePlanActionTest extends SolrCloudTestCase {
|
|||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
super.processAsync(asyncId, client);
|
||||
return asyncId;
|
||||
|
||||
}
|
||||
};
|
||||
List<CollectionAdminRequest.AsyncCollectionAdminRequest> operations = Lists.asList(moveReplica, new CollectionAdminRequest.AsyncCollectionAdminRequest[]{mockRequest});
|
||||
NodeLostTrigger.NodeLostEvent nodeLostEvent = new NodeLostTrigger.NodeLostEvent(TriggerEventType.NODELOST,
|
||||
"mock_trigger_name", Collections.singletonList(TimeSource.CURRENT_TIME.getTime()),
|
||||
Collections.singletonList(sourceNodeName));
|
||||
ActionContext actionContext = new ActionContext(survivor.getCoreContainer(), new NodeLostTrigger("xyz", Collections.singletonMap("event", TriggerEventType.NODELOST.name()), survivor.getCoreContainer(), survivor.getCoreContainer().getZkController()),
|
||||
ActionContext actionContext = new ActionContext(survivor.getCoreContainer().getZkController().getSolrCloudManager(), null,
|
||||
new HashMap<>(Collections.singletonMap("operations", operations)));
|
||||
action.process(nodeLostEvent, actionContext);
|
||||
|
||||
assertTrue("ExecutePlanAction should have stored the requestid in ZK before executing the request", znodeCreated.get());
|
||||
// assertTrue("ExecutePlanAction should have stored the requestid in ZK before executing the request", znodeCreated.get());
|
||||
List<NamedList<Object>> responses = (List<NamedList<Object>>) actionContext.getProperty("responses");
|
||||
assertNotNull(responses);
|
||||
assertEquals(2, responses.size());
|
||||
|
|
|
@ -72,7 +72,8 @@ public class NodeAddedTriggerTest extends SolrCloudTestCase {
|
|||
long waitForSeconds = 1 + random().nextInt(5);
|
||||
Map<String, Object> props = createTriggerProps(waitForSeconds);
|
||||
|
||||
try (NodeAddedTrigger trigger = new NodeAddedTrigger("node_added_trigger", props, container, container.getZkController())) {
|
||||
try (NodeAddedTrigger trigger = new NodeAddedTrigger("node_added_trigger", props, container.getResourceLoader(),
|
||||
container.getZkController().getSolrCloudManager())) {
|
||||
trigger.setProcessor(noFirstRunProcessor);
|
||||
trigger.run();
|
||||
|
||||
|
@ -112,7 +113,8 @@ public class NodeAddedTriggerTest extends SolrCloudTestCase {
|
|||
|
||||
// add a new node but remove it before the waitFor period expires
|
||||
// and assert that the trigger doesn't fire at all
|
||||
try (NodeAddedTrigger trigger = new NodeAddedTrigger("node_added_trigger", props, container, container.getZkController())) {
|
||||
try (NodeAddedTrigger trigger = new NodeAddedTrigger("node_added_trigger", props, container.getResourceLoader(),
|
||||
container.getZkController().getSolrCloudManager())) {
|
||||
final long waitTime = 2;
|
||||
props.put("waitFor", waitTime);
|
||||
trigger.setProcessor(noFirstRunProcessor);
|
||||
|
@ -157,7 +159,8 @@ public class NodeAddedTriggerTest extends SolrCloudTestCase {
|
|||
action.put("name", "testActionInit");
|
||||
action.put("class", NodeAddedTriggerTest.AssertInitTriggerAction.class.getName());
|
||||
actions.add(action);
|
||||
try (NodeAddedTrigger trigger = new NodeAddedTrigger("node_added_trigger", props, container, container.getZkController())) {
|
||||
try (NodeAddedTrigger trigger = new NodeAddedTrigger("node_added_trigger", props, container.getResourceLoader(),
|
||||
container.getZkController().getSolrCloudManager())) {
|
||||
assertEquals(true, actionConstructorCalled.get());
|
||||
assertEquals(false, actionInitCalled.get());
|
||||
assertEquals(false, actionCloseCalled.get());
|
||||
|
@ -198,7 +201,8 @@ public class NodeAddedTriggerTest extends SolrCloudTestCase {
|
|||
public void testListenerAcceptance() throws Exception {
|
||||
CoreContainer container = cluster.getJettySolrRunners().get(0).getCoreContainer();
|
||||
Map<String, Object> props = createTriggerProps(0);
|
||||
try (NodeAddedTrigger trigger = new NodeAddedTrigger("node_added_trigger", props, container, container.getZkController())) {
|
||||
try (NodeAddedTrigger trigger = new NodeAddedTrigger("node_added_trigger", props, container.getResourceLoader(),
|
||||
container.getZkController().getSolrCloudManager())) {
|
||||
trigger.setProcessor(noFirstRunProcessor);
|
||||
trigger.run(); // starts tracking live nodes
|
||||
|
||||
|
@ -234,7 +238,8 @@ public class NodeAddedTriggerTest extends SolrCloudTestCase {
|
|||
|
||||
// add a new node but update the trigger before the waitFor period expires
|
||||
// and assert that the new trigger still fires
|
||||
NodeAddedTrigger trigger = new NodeAddedTrigger("node_added_trigger", props, container, container.getZkController());
|
||||
NodeAddedTrigger trigger = new NodeAddedTrigger("node_added_trigger", props, container.getResourceLoader(),
|
||||
container.getZkController().getSolrCloudManager());
|
||||
trigger.setProcessor(noFirstRunProcessor);
|
||||
trigger.run();
|
||||
|
||||
|
@ -242,7 +247,8 @@ public class NodeAddedTriggerTest extends SolrCloudTestCase {
|
|||
trigger.run(); // this run should detect the new node
|
||||
trigger.close(); // close the old trigger
|
||||
|
||||
try (NodeAddedTrigger newTrigger = new NodeAddedTrigger("some_different_name", props, container, container.getZkController())) {
|
||||
try (NodeAddedTrigger newTrigger = new NodeAddedTrigger("some_different_name", props, container.getResourceLoader(),
|
||||
container.getZkController().getSolrCloudManager())) {
|
||||
try {
|
||||
newTrigger.restoreState(trigger);
|
||||
fail("Trigger should only be able to restore state from an old trigger of the same name");
|
||||
|
@ -251,7 +257,8 @@ public class NodeAddedTriggerTest extends SolrCloudTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
try (NodeAddedTrigger newTrigger = new NodeAddedTrigger("node_added_trigger", props, container, container.getZkController())) {
|
||||
try (NodeAddedTrigger newTrigger = new NodeAddedTrigger("node_added_trigger", props, container.getResourceLoader(),
|
||||
container.getZkController().getSolrCloudManager())) {
|
||||
AtomicBoolean fired = new AtomicBoolean(false);
|
||||
AtomicReference<TriggerEvent> eventRef = new AtomicReference<>();
|
||||
newTrigger.setProcessor(event -> {
|
||||
|
|
|
@ -73,7 +73,8 @@ public class NodeLostTriggerTest extends SolrCloudTestCase {
|
|||
long waitForSeconds = 1 + random().nextInt(5);
|
||||
Map<String, Object> props = createTriggerProps(waitForSeconds);
|
||||
|
||||
try (NodeLostTrigger trigger = new NodeLostTrigger("node_lost_trigger", props, container, container.getZkController())) {
|
||||
try (NodeLostTrigger trigger = new NodeLostTrigger("node_lost_trigger", props, container.getResourceLoader(),
|
||||
container.getZkController().getSolrCloudManager())) {
|
||||
trigger.setProcessor(noFirstRunProcessor);
|
||||
trigger.run();
|
||||
String lostNodeName1 = cluster.getJettySolrRunner(1).getNodeName();
|
||||
|
@ -117,7 +118,8 @@ public class NodeLostTriggerTest extends SolrCloudTestCase {
|
|||
|
||||
// remove a node but add it back before the waitFor period expires
|
||||
// and assert that the trigger doesn't fire at all
|
||||
try (NodeLostTrigger trigger = new NodeLostTrigger("node_lost_trigger", props, container, container.getZkController())) {
|
||||
try (NodeLostTrigger trigger = new NodeLostTrigger("node_lost_trigger", props, container.getResourceLoader(),
|
||||
container.getZkController().getSolrCloudManager())) {
|
||||
final long waitTime = 2;
|
||||
props.put("waitFor", waitTime);
|
||||
trigger.setProcessor(noFirstRunProcessor);
|
||||
|
@ -173,7 +175,8 @@ public class NodeLostTriggerTest extends SolrCloudTestCase {
|
|||
action.put("name", "testActionInit");
|
||||
action.put("class", AssertInitTriggerAction.class.getName());
|
||||
actions.add(action);
|
||||
try (NodeLostTrigger trigger = new NodeLostTrigger("node_added_trigger", props, container, container.getZkController())) {
|
||||
try (NodeLostTrigger trigger = new NodeLostTrigger("node_added_trigger", props, container.getResourceLoader(),
|
||||
container.getZkController().getSolrCloudManager())) {
|
||||
assertEquals(true, actionConstructorCalled.get());
|
||||
assertEquals(false, actionInitCalled.get());
|
||||
assertEquals(false, actionCloseCalled.get());
|
||||
|
@ -214,7 +217,8 @@ public class NodeLostTriggerTest extends SolrCloudTestCase {
|
|||
public void testListenerAcceptance() throws Exception {
|
||||
CoreContainer container = cluster.getJettySolrRunners().get(0).getCoreContainer();
|
||||
Map<String, Object> props = createTriggerProps(0);
|
||||
try (NodeLostTrigger trigger = new NodeLostTrigger("node_added_trigger", props, container, container.getZkController())) {
|
||||
try (NodeLostTrigger trigger = new NodeLostTrigger("node_added_trigger", props, container.getResourceLoader(),
|
||||
container.getZkController().getSolrCloudManager())) {
|
||||
trigger.setProcessor(noFirstRunProcessor);
|
||||
|
||||
JettySolrRunner newNode = cluster.startJettySolrRunner();
|
||||
|
@ -268,7 +272,8 @@ public class NodeLostTriggerTest extends SolrCloudTestCase {
|
|||
// remove a node but update the trigger before the waitFor period expires
|
||||
// and assert that the new trigger still fires
|
||||
|
||||
NodeLostTrigger trigger = new NodeLostTrigger("node_lost_trigger", props, container, container.getZkController());
|
||||
NodeLostTrigger trigger = new NodeLostTrigger("node_lost_trigger", props, container.getResourceLoader(),
|
||||
container.getZkController().getSolrCloudManager());
|
||||
trigger.setProcessor(noFirstRunProcessor);
|
||||
trigger.run();
|
||||
|
||||
|
@ -285,7 +290,8 @@ public class NodeLostTriggerTest extends SolrCloudTestCase {
|
|||
trigger.run(); // this run should detect the lost node
|
||||
trigger.close(); // close the old trigger
|
||||
|
||||
try (NodeLostTrigger newTrigger = new NodeLostTrigger("some_different_name", props, container, container.getZkController())) {
|
||||
try (NodeLostTrigger newTrigger = new NodeLostTrigger("some_different_name", props, container.getResourceLoader(),
|
||||
container.getZkController().getSolrCloudManager())) {
|
||||
try {
|
||||
newTrigger.restoreState(trigger);
|
||||
fail("Trigger should only be able to restore state from an old trigger of the same name");
|
||||
|
@ -294,7 +300,8 @@ public class NodeLostTriggerTest extends SolrCloudTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
try (NodeLostTrigger newTrigger = new NodeLostTrigger("node_lost_trigger", props, container, container.getZkController())) {
|
||||
try (NodeLostTrigger newTrigger = new NodeLostTrigger("node_lost_trigger", props, container.getResourceLoader(),
|
||||
container.getZkController().getSolrCloudManager())) {
|
||||
AtomicBoolean fired = new AtomicBoolean(false);
|
||||
AtomicReference<TriggerEvent> eventRef = new AtomicReference<>();
|
||||
newTrigger.setProcessor(event -> {
|
||||
|
|
|
@ -28,13 +28,16 @@ import org.apache.lucene.util.Constants;
|
|||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.cloud.DistributedQueueFactory;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
|
||||
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.impl.SolrClientDataProvider;
|
||||
import org.apache.solr.client.solrj.impl.SolrClientCloudManager;
|
||||
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
|
||||
import org.apache.solr.cloud.OverseerTaskProcessor;
|
||||
import org.apache.solr.cloud.SolrCloudTestCase;
|
||||
import org.apache.solr.cloud.ZkDistributedQueueFactory;
|
||||
import org.apache.solr.common.cloud.DocCollection;
|
||||
import org.apache.solr.common.cloud.Replica;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
|
@ -168,10 +171,11 @@ public class TestPolicyCloud extends SolrCloudTestCase {
|
|||
CollectionAdminRequest.createCollection("metricsTest", "conf", 1, 1)
|
||||
.process(cluster.getSolrClient());
|
||||
DocCollection collection = getCollectionState("metricsTest");
|
||||
SolrClientDataProvider provider = new SolrClientDataProvider(solrClient);
|
||||
DistributedQueueFactory queueFactory = new ZkDistributedQueueFactory(cluster.getZkClient());
|
||||
SolrCloudManager provider = new SolrClientCloudManager(queueFactory, solrClient);
|
||||
List<String> tags = Arrays.asList("metrics:solr.node:ADMIN./admin/authorization.clientErrors:count",
|
||||
"metrics:solr.jvm:buffers.direct.Count");
|
||||
Map<String, Object> val = provider.getNodeValues(collection .getReplicas().get(0).getNodeName(), tags);
|
||||
Map<String, Object> val = provider.getNodeStateProvider().getNodeValues(collection .getReplicas().get(0).getNodeName(), tags);
|
||||
for (String tag : tags) {
|
||||
assertNotNull( "missing : "+ tag , val.get(tag));
|
||||
}
|
||||
|
@ -268,8 +272,9 @@ public class TestPolicyCloud extends SolrCloudTestCase {
|
|||
CollectionAdminRequest.createCollectionWithImplicitRouter("policiesTest", "conf", "shard1", 2)
|
||||
.process(cluster.getSolrClient());
|
||||
DocCollection rulesCollection = getCollectionState("policiesTest");
|
||||
SolrClientDataProvider provider = new SolrClientDataProvider(cluster.getSolrClient());
|
||||
Map<String, Object> val = provider.getNodeValues(rulesCollection.getReplicas().get(0).getNodeName(), Arrays.asList(
|
||||
|
||||
SolrCloudManager cloudManager = new SolrClientCloudManager(new ZkDistributedQueueFactory(cluster.getZkClient()), cluster.getSolrClient());
|
||||
Map<String, Object> val = cloudManager.getNodeStateProvider().getNodeValues(rulesCollection.getReplicas().get(0).getNodeName(), Arrays.asList(
|
||||
"freedisk",
|
||||
"cores",
|
||||
"heapUsage",
|
||||
|
@ -293,7 +298,7 @@ public class TestPolicyCloud extends SolrCloudTestCase {
|
|||
}
|
||||
Thread.sleep(100);
|
||||
}
|
||||
val = provider.getNodeValues(overseerNode, Arrays.asList(
|
||||
val = cloudManager.getNodeStateProvider().getNodeValues(overseerNode, Arrays.asList(
|
||||
"nodeRole",
|
||||
"ip_1", "ip_2", "ip_3", "ip_4",
|
||||
"sysprop.java.version",
|
||||
|
|
|
@ -34,6 +34,7 @@ import java.util.concurrent.locks.ReentrantLock;
|
|||
|
||||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventProcessorStage;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventType;
|
||||
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
||||
|
@ -46,7 +47,6 @@ import org.apache.solr.common.cloud.ZkNodeProps;
|
|||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
import org.apache.solr.common.util.NamedList;
|
||||
import org.apache.solr.common.util.Utils;
|
||||
import org.apache.solr.core.CoreContainer;
|
||||
import org.apache.solr.util.LogLevel;
|
||||
import org.apache.solr.util.TimeOut;
|
||||
import org.apache.solr.util.TimeSource;
|
||||
|
@ -966,8 +966,8 @@ public class TriggerIntegrationTest extends SolrCloudTestCase {
|
|||
|
||||
public static class TestTriggerListener extends TriggerListenerBase {
|
||||
@Override
|
||||
public void init(CoreContainer coreContainer, AutoScalingConfig.TriggerListenerConfig config) {
|
||||
super.init(coreContainer, config);
|
||||
public void init(SolrCloudManager dataProvider, AutoScalingConfig.TriggerListenerConfig config) {
|
||||
super.init(dataProvider, config);
|
||||
listenerCreated.countDown();
|
||||
}
|
||||
|
||||
|
|
|
@ -19,7 +19,8 @@ package org.apache.solr.cloud.overseer;
|
|||
import java.util.Collections;
|
||||
|
||||
import org.apache.solr.SolrTestCaseJ4;
|
||||
import org.apache.solr.cloud.MockZkStateReader;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.DistribStateManager;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
|
||||
import org.apache.solr.common.cloud.ClusterState;
|
||||
import org.apache.solr.common.cloud.DocCollection;
|
||||
import org.apache.solr.common.cloud.ImplicitDocRouter;
|
||||
|
@ -27,13 +28,16 @@ import org.apache.solr.common.cloud.Slice;
|
|||
import org.apache.solr.common.cloud.ZkNodeProps;
|
||||
import org.apache.solr.common.util.Utils;
|
||||
|
||||
import static org.mockito.Mockito.*;
|
||||
|
||||
public class TestClusterStateMutator extends SolrTestCaseJ4 {
|
||||
public void testCreateCollection() throws Exception {
|
||||
ClusterState state = new ClusterState(-1, Collections.<String>emptySet(), Collections.<String, DocCollection>emptyMap());
|
||||
MockZkStateReader zkStateReader = new MockZkStateReader(state, Collections.<String>emptySet());
|
||||
ClusterState clusterState = new ClusterState(-1, Collections.<String>emptySet(), Collections.<String, DocCollection>emptyMap());
|
||||
DistribStateManager mockStateManager = mock(DistribStateManager.class);
|
||||
SolrCloudManager dataProvider = mock(SolrCloudManager.class);
|
||||
when(dataProvider.getDistribStateManager()).thenReturn(mockStateManager);
|
||||
|
||||
ClusterState clusterState = zkStateReader.getClusterState();
|
||||
ClusterStateMutator mutator = new ClusterStateMutator(zkStateReader);
|
||||
ClusterStateMutator mutator = new ClusterStateMutator(dataProvider);
|
||||
ZkNodeProps message = new ZkNodeProps(Utils.makeMap(
|
||||
"name", "xyz",
|
||||
"numShards", "1"
|
||||
|
@ -44,7 +48,7 @@ public class TestClusterStateMutator extends SolrTestCaseJ4 {
|
|||
assertEquals(1, collection.getSlicesMap().size());
|
||||
assertEquals(1, collection.getMaxShardsPerNode());
|
||||
|
||||
state = new ClusterState(-1, Collections.<String>emptySet(), Collections.singletonMap("xyz", collection));
|
||||
ClusterState state = new ClusterState(-1, Collections.<String>emptySet(), Collections.singletonMap("xyz", collection));
|
||||
message = new ZkNodeProps(Utils.makeMap(
|
||||
"name", "abc",
|
||||
"numShards", "2",
|
||||
|
|
|
@ -24,8 +24,8 @@ import java.util.concurrent.TimeUnit;
|
|||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.solr.SolrTestCaseJ4;
|
||||
import org.apache.solr.cloud.AbstractZkTestCase;
|
||||
import org.apache.solr.cloud.Overseer;
|
||||
import org.apache.solr.cloud.OverseerTest;
|
||||
import org.apache.solr.cloud.Stats;
|
||||
import org.apache.solr.cloud.ZkController;
|
||||
import org.apache.solr.cloud.ZkTestServer;
|
||||
import org.apache.solr.common.cloud.ClusterState;
|
||||
|
@ -81,7 +81,7 @@ public class ZkStateReaderTest extends SolrTestCaseJ4 {
|
|||
reader.registerCore("c1");
|
||||
}
|
||||
|
||||
ZkStateWriter writer = new ZkStateWriter(reader, new Overseer.Stats());
|
||||
ZkStateWriter writer = new ZkStateWriter(reader, new Stats());
|
||||
|
||||
zkClient.makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/c1", true);
|
||||
|
||||
|
@ -154,7 +154,7 @@ public class ZkStateReaderTest extends SolrTestCaseJ4 {
|
|||
reader = new ZkStateReader(zkClient);
|
||||
reader.createClusterStateWatchersAndUpdate();
|
||||
|
||||
ZkStateWriter writer = new ZkStateWriter(reader, new Overseer.Stats());
|
||||
ZkStateWriter writer = new ZkStateWriter(reader, new Stats());
|
||||
|
||||
zkClient.makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/c1", true);
|
||||
|
||||
|
@ -206,7 +206,7 @@ public class ZkStateReaderTest extends SolrTestCaseJ4 {
|
|||
// Still no c1 collection, despite a collection path.
|
||||
assertNull(reader.getClusterState().getCollectionRef("c1"));
|
||||
|
||||
ZkStateWriter writer = new ZkStateWriter(reader, new Overseer.Stats());
|
||||
ZkStateWriter writer = new ZkStateWriter(reader, new Stats());
|
||||
|
||||
|
||||
// create new collection with stateFormat = 2
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.solr.SolrTestCaseJ4;
|
|||
import org.apache.solr.cloud.AbstractZkTestCase;
|
||||
import org.apache.solr.cloud.Overseer;
|
||||
import org.apache.solr.cloud.OverseerTest;
|
||||
import org.apache.solr.cloud.Stats;
|
||||
import org.apache.solr.cloud.ZkController;
|
||||
import org.apache.solr.cloud.ZkTestServer;
|
||||
import org.apache.solr.common.cloud.ClusterState;
|
||||
|
@ -87,7 +88,7 @@ public class ZkStateWriterTest extends SolrTestCaseJ4 {
|
|||
new DocCollection("c2", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT, 0, ZkStateReader.COLLECTIONS_ZKNODE + "/c2"));
|
||||
ZkWriteCommand c3 = new ZkWriteCommand("c3",
|
||||
new DocCollection("c3", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT, 0, ZkStateReader.COLLECTIONS_ZKNODE + "/c3"));
|
||||
ZkStateWriter writer = new ZkStateWriter(reader, new Overseer.Stats());
|
||||
ZkStateWriter writer = new ZkStateWriter(reader, new Stats());
|
||||
|
||||
// First write is flushed immediately
|
||||
ClusterState clusterState = writer.enqueueUpdate(reader.getClusterState(), Collections.singletonList(c1), null);
|
||||
|
@ -129,7 +130,7 @@ public class ZkStateWriterTest extends SolrTestCaseJ4 {
|
|||
try (ZkStateReader reader = new ZkStateReader(zkClient)) {
|
||||
reader.createClusterStateWatchersAndUpdate();
|
||||
|
||||
ZkStateWriter writer = new ZkStateWriter(reader, new Overseer.Stats());
|
||||
ZkStateWriter writer = new ZkStateWriter(reader, new Stats());
|
||||
|
||||
zkClient.makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/c1", true);
|
||||
|
||||
|
@ -171,7 +172,7 @@ public class ZkStateWriterTest extends SolrTestCaseJ4 {
|
|||
try (ZkStateReader reader = new ZkStateReader(zkClient)) {
|
||||
reader.createClusterStateWatchersAndUpdate();
|
||||
|
||||
ZkStateWriter writer = new ZkStateWriter(reader, new Overseer.Stats());
|
||||
ZkStateWriter writer = new ZkStateWriter(reader, new Stats());
|
||||
|
||||
zkClient.makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/c1", true);
|
||||
|
||||
|
@ -215,7 +216,7 @@ public class ZkStateWriterTest extends SolrTestCaseJ4 {
|
|||
try (ZkStateReader reader = new ZkStateReader(zkClient)) {
|
||||
reader.createClusterStateWatchersAndUpdate();
|
||||
|
||||
ZkStateWriter writer = new ZkStateWriter(reader, new Overseer.Stats());
|
||||
ZkStateWriter writer = new ZkStateWriter(reader, new Stats());
|
||||
|
||||
zkClient.makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/c1", true);
|
||||
zkClient.makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/c2", true);
|
||||
|
@ -291,7 +292,7 @@ public class ZkStateWriterTest extends SolrTestCaseJ4 {
|
|||
try (ZkStateReader reader = new ZkStateReader(zkClient)) {
|
||||
reader.createClusterStateWatchersAndUpdate();
|
||||
|
||||
ZkStateWriter writer = new ZkStateWriter(reader, new Overseer.Stats());
|
||||
ZkStateWriter writer = new ZkStateWriter(reader, new Stats());
|
||||
|
||||
zkClient.makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/c1", true);
|
||||
zkClient.makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/c2", true);
|
||||
|
|
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.solr.client.solrj.cloud;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
import org.apache.solr.common.util.Pair;
|
||||
|
||||
/**
|
||||
* Distributed queue component. Methods largely follow those in {@link java.util.Queue}.
|
||||
*/
|
||||
public interface DistributedQueue {
|
||||
byte[] peek() throws Exception;
|
||||
|
||||
byte[] peek(boolean block) throws Exception;
|
||||
|
||||
byte[] peek(long wait) throws Exception;
|
||||
|
||||
byte[] poll() throws Exception;
|
||||
|
||||
byte[] remove() throws Exception;
|
||||
|
||||
byte[] take() throws Exception;
|
||||
|
||||
void offer(byte[] data) throws Exception;
|
||||
|
||||
/**
|
||||
* Retrieve statistics about the queue size, operations and their timings.
|
||||
*/
|
||||
Map<String, Object> getStats();
|
||||
|
||||
/**
|
||||
* Peek multiple elements from the queue in a single call.
|
||||
* @param max maximum elements to retrieve
|
||||
* @param waitMillis if less than maximum element is in the queue then wait at most this time for at least one new element.
|
||||
* @param acceptFilter peek only elements that pass this filter
|
||||
* @return peeked elements
|
||||
* @throws Exception on errors
|
||||
*/
|
||||
Collection<Pair<String, byte[]>> peekElements(int max, long waitMillis, Predicate<String> acceptFilter) throws Exception;
|
||||
|
||||
}
|
|
@ -0,0 +1,28 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.solr.client.solrj.cloud;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public interface DistributedQueueFactory {
|
||||
DistributedQueue makeQueue(String path) throws IOException;
|
||||
|
||||
void removeQueue(String path) throws IOException;
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.solr.client.solrj.cloud.autoscaling;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class AlreadyExistsException extends Exception {
|
||||
|
||||
private final String path;
|
||||
|
||||
public AlreadyExistsException(String path) {
|
||||
super("Path already exists: " + path);
|
||||
this.path = path;
|
||||
}
|
||||
|
||||
public String getPath() {
|
||||
return path;
|
||||
}
|
||||
|
||||
}
|
|
@ -495,6 +495,10 @@ public class AutoScalingConfig implements MapWriter {
|
|||
ew.put("listeners", getTriggerListenerConfigs());
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
return Utils.toJSONString(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
|
@ -503,8 +507,8 @@ public class AutoScalingConfig implements MapWriter {
|
|||
AutoScalingConfig that = (AutoScalingConfig) o;
|
||||
|
||||
if (!getPolicy().equals(that.getPolicy())) return false;
|
||||
if (!triggers.equals(that.triggers)) return false;
|
||||
return listeners.equals(that.listeners);
|
||||
if (!getTriggerConfigs().equals(that.getTriggerConfigs())) return false;
|
||||
return getTriggerListenerConfigs().equals(that.getTriggerListenerConfigs());
|
||||
}
|
||||
|
||||
private static List<Object> getList(String key, Map<String, Object> properties) {
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.solr.client.solrj.cloud.autoscaling;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class BadVersionException extends Exception {
|
||||
|
||||
private final String path;
|
||||
private final int requested;
|
||||
|
||||
public BadVersionException(int requested, String path) {
|
||||
super(path);
|
||||
this.path = path;
|
||||
this.requested = requested;
|
||||
}
|
||||
|
||||
public String getPath() {
|
||||
return path;
|
||||
}
|
||||
|
||||
public int getRequested() {
|
||||
return requested;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,66 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.solr.client.solrj.cloud.autoscaling;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.SolrResponse;
|
||||
import org.apache.solr.client.solrj.cloud.DistributedQueueFactory;
|
||||
import org.apache.solr.client.solrj.impl.ClusterStateProvider;
|
||||
|
||||
/**
|
||||
* Base class for overriding some behavior of {@link SolrCloudManager}.
|
||||
*/
|
||||
public class DelegatingCloudManager implements SolrCloudManager {
|
||||
private final SolrCloudManager delegate;
|
||||
|
||||
public DelegatingCloudManager(SolrCloudManager delegate) {
|
||||
this.delegate = delegate;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterStateProvider getClusterStateProvider() {
|
||||
return delegate.getClusterStateProvider();
|
||||
}
|
||||
|
||||
@Override
|
||||
public NodeStateProvider getNodeStateProvider() {
|
||||
return delegate.getNodeStateProvider();
|
||||
}
|
||||
|
||||
@Override
|
||||
public DistribStateManager getDistribStateManager() {
|
||||
return delegate.getDistribStateManager();
|
||||
}
|
||||
|
||||
@Override
|
||||
public DistributedQueueFactory getDistributedQueueFactory() {
|
||||
return delegate.getDistributedQueueFactory();
|
||||
}
|
||||
|
||||
@Override
|
||||
public SolrResponse request(SolrRequest req) throws IOException {
|
||||
return delegate.request(req);
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] httpRequest(String url, SolrRequest.METHOD method, Map<String, String> headers, String payload, int timeout, boolean followRedirects) throws IOException {
|
||||
return delegate.httpRequest(url, method, headers, payload, timeout, followRedirects);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,113 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.solr.client.solrj.cloud.autoscaling;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.solr.client.solrj.impl.ClusterStateProvider;
|
||||
import org.apache.solr.common.cloud.ClusterState;
|
||||
|
||||
/**
|
||||
* Base class for overriding some behavior of {@link ClusterStateProvider}
|
||||
*/
|
||||
public class DelegatingClusterStateProvider implements ClusterStateProvider {
|
||||
protected ClusterStateProvider delegate;
|
||||
|
||||
public DelegatingClusterStateProvider(ClusterStateProvider delegate) {
|
||||
this.delegate = delegate;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState.CollectionRef getState(String collection) {
|
||||
if (delegate != null) {
|
||||
return delegate.getState(collection);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<String> getLiveNodes() {
|
||||
if (delegate != null) {
|
||||
return delegate.getLiveNodes();
|
||||
} else {
|
||||
return Collections.emptySet();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getAlias(String alias) {
|
||||
if (delegate != null) {
|
||||
return delegate.getAlias(alias);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getCollectionName(String name) {
|
||||
if (delegate != null) {
|
||||
return delegate.getCollectionName(name);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState getClusterState() throws IOException {
|
||||
if (delegate != null) {
|
||||
return delegate.getClusterState();
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Object> getClusterProperties() {
|
||||
if (delegate != null) {
|
||||
return delegate.getClusterProperties();
|
||||
} else {
|
||||
return Collections.emptyMap();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getPolicyNameByCollection(String coll) {
|
||||
if (delegate != null) {
|
||||
return delegate.getPolicyNameByCollection(coll);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void connect() {
|
||||
if (delegate != null) {
|
||||
delegate.connect();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
if (delegate != null) {
|
||||
delegate.close();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.solr.client.solrj.cloud.autoscaling;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Base class for overriding some behavior of {@link NodeStateProvider}.
|
||||
*/
|
||||
public class DelegatingNodeStateProvider implements NodeStateProvider {
|
||||
private final NodeStateProvider delegate;
|
||||
|
||||
public DelegatingNodeStateProvider(NodeStateProvider delegate) {
|
||||
this.delegate = delegate;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Object> getNodeValues(String node, Collection<String> tags) {
|
||||
return delegate.getNodeValues(node, tags);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Map<String, List<ReplicaInfo>>> getReplicaInfo(String node, Collection<String> keys) {
|
||||
return delegate.getReplicaInfo(node, keys);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,69 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.solr.client.solrj.cloud.autoscaling;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.NoSuchElementException;
|
||||
|
||||
import org.apache.zookeeper.CreateMode;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
import org.apache.zookeeper.Op;
|
||||
import org.apache.zookeeper.OpResult;
|
||||
import org.apache.zookeeper.Watcher;
|
||||
|
||||
/**
|
||||
* This interface represents a distributed state repository.
|
||||
*/
|
||||
public interface DistribStateManager extends Closeable {
|
||||
|
||||
// state accessors
|
||||
|
||||
boolean hasData(String path) throws IOException, KeeperException, InterruptedException;
|
||||
|
||||
List<String> listData(String path) throws NoSuchElementException, IOException, KeeperException, InterruptedException;
|
||||
|
||||
VersionedData getData(String path, Watcher watcher) throws NoSuchElementException, IOException, KeeperException, InterruptedException;
|
||||
|
||||
default VersionedData getData(String path) throws NoSuchElementException, IOException, KeeperException, InterruptedException {
|
||||
return getData(path, null);
|
||||
}
|
||||
|
||||
// state mutators
|
||||
|
||||
void makePath(String path) throws AlreadyExistsException, IOException, KeeperException, InterruptedException;
|
||||
|
||||
String createData(String path, byte[] data, CreateMode mode) throws AlreadyExistsException, IOException, KeeperException, InterruptedException;
|
||||
|
||||
void removeData(String path, int version) throws NoSuchElementException, IOException, KeeperException, InterruptedException;
|
||||
|
||||
void setData(String path, byte[] data, int version) throws BadVersionException, NoSuchElementException, IOException, KeeperException, InterruptedException;
|
||||
|
||||
List<OpResult> multi(final Iterable<Op> ops) throws BadVersionException, NoSuchElementException, AlreadyExistsException, IOException, KeeperException, InterruptedException;
|
||||
|
||||
AutoScalingConfig getAutoScalingConfig(Watcher watcher) throws InterruptedException, IOException;
|
||||
|
||||
default AutoScalingConfig getAutoScalingConfig() throws InterruptedException, IOException {
|
||||
return getAutoScalingConfig(null);
|
||||
}
|
||||
|
||||
@Override
|
||||
default void close() throws IOException {
|
||||
|
||||
}
|
||||
}
|
|
@ -45,7 +45,7 @@ public class MoveReplicaSuggester extends Suggester {
|
|||
ReplicaInfo replicaInfo = fromReplica.first();
|
||||
String coll = replicaInfo.collection;
|
||||
String shard = replicaInfo.shard;
|
||||
Pair<Row, ReplicaInfo> pair = fromRow.removeReplica(coll, shard, replicaInfo.type);
|
||||
Pair<Row, ReplicaInfo> pair = fromRow.removeReplica(coll, shard, replicaInfo.getType());
|
||||
Row srcTmpRow = pair.first();
|
||||
if (srcTmpRow == null) {
|
||||
//no such replica available
|
||||
|
@ -57,7 +57,7 @@ public class MoveReplicaSuggester extends Suggester {
|
|||
Row targetRow = getMatrix().get(j);
|
||||
if(!targetRow.isLive) continue;
|
||||
if (!isAllowed(targetRow.node, Hint.TARGET_NODE)) continue;
|
||||
targetRow = targetRow.addReplica(coll, shard, replicaInfo.type);
|
||||
targetRow = targetRow.addReplica(coll, shard, replicaInfo.getType());
|
||||
List<Violation> errs = testChangedMatrix(strict, getModifiedMatrix(getModifiedMatrix(getMatrix(), srcTmpRow, i), targetRow, j));
|
||||
if (!containsNewErrors(errs) && isLessSerious(errs, leastSeriousViolation) &&
|
||||
Policy.compareRows(srcTmpRow, targetRow, session.getPolicy()) < 1) {
|
||||
|
@ -69,8 +69,8 @@ public class MoveReplicaSuggester extends Suggester {
|
|||
}
|
||||
}
|
||||
if (targetNodeIndex != null && sourceNodeIndex != null) {
|
||||
getMatrix().set(sourceNodeIndex, getMatrix().get(sourceNodeIndex).removeReplica(sourceReplicaInfo.collection, sourceReplicaInfo.shard, sourceReplicaInfo.type).first());
|
||||
getMatrix().set(targetNodeIndex, getMatrix().get(targetNodeIndex).addReplica(sourceReplicaInfo.collection, sourceReplicaInfo.shard, sourceReplicaInfo.type));
|
||||
getMatrix().set(sourceNodeIndex, getMatrix().get(sourceNodeIndex).removeReplica(sourceReplicaInfo.collection, sourceReplicaInfo.shard, sourceReplicaInfo.getType()).first());
|
||||
getMatrix().set(targetNodeIndex, getMatrix().get(targetNodeIndex).addReplica(sourceReplicaInfo.collection, sourceReplicaInfo.shard, sourceReplicaInfo.getType()));
|
||||
return new CollectionAdminRequest.MoveReplica(
|
||||
sourceReplicaInfo.collection,
|
||||
sourceReplicaInfo.name,
|
||||
|
|
|
@ -14,17 +14,18 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.solr.client.solrj.cloud.autoscaling;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
public interface ClusterDataProvider extends Closeable {
|
||||
/**Get the value of each tag for a given node
|
||||
/**
|
||||
* This interface models the access to node and replica information.
|
||||
*/
|
||||
public interface NodeStateProvider {
|
||||
/**
|
||||
* Get the value of each tag for a given node
|
||||
*
|
||||
* @param node node name
|
||||
* @param tags tag names
|
||||
|
@ -34,19 +35,8 @@ public interface ClusterDataProvider extends Closeable {
|
|||
|
||||
/**
|
||||
* Get the details of each replica in a node. It attempts to fetch as much details about
|
||||
* the replica as mentioned in the keys list. It is not necessary to give al details
|
||||
* <p>
|
||||
* the format is {collection:shard :[{replicadetails}]}
|
||||
* the replica as mentioned in the keys list. It is not necessary to give all details
|
||||
* <p>The format is {collection:shard :[{replicadetails}]}.</p>
|
||||
*/
|
||||
Map<String, Map<String, List<ReplicaInfo>>> getReplicaInfo(String node, Collection<String> keys);
|
||||
|
||||
Collection<String> getNodes();
|
||||
|
||||
/**Get the collection-specific policy
|
||||
*/
|
||||
String getPolicyNameByCollection(String coll);
|
||||
|
||||
@Override
|
||||
default void close() throws IOException {
|
||||
}
|
||||
}
|
|
@ -38,6 +38,7 @@ import java.util.stream.Collectors;
|
|||
|
||||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.Clause.Violation;
|
||||
import org.apache.solr.client.solrj.impl.ClusterStateProvider;
|
||||
import org.apache.solr.common.IteratorWriter;
|
||||
import org.apache.solr.common.MapWriter;
|
||||
import org.apache.solr.common.cloud.Replica;
|
||||
|
@ -203,44 +204,45 @@ public class Policy implements MapWriter {
|
|||
*/
|
||||
public class Session implements MapWriter {
|
||||
final List<String> nodes;
|
||||
final ClusterDataProvider dataProvider;
|
||||
final SolrCloudManager cloudManager;
|
||||
final List<Row> matrix;
|
||||
Set<String> collections = new HashSet<>();
|
||||
List<Clause> expandedClauses;
|
||||
List<Violation> violations = new ArrayList<>();
|
||||
|
||||
private Session(List<String> nodes, ClusterDataProvider dataProvider,
|
||||
private Session(List<String> nodes, SolrCloudManager cloudManager,
|
||||
List<Row> matrix, List<Clause> expandedClauses) {
|
||||
this.nodes = nodes;
|
||||
this.dataProvider = dataProvider;
|
||||
this.cloudManager = cloudManager;
|
||||
this.matrix = matrix;
|
||||
this.expandedClauses = expandedClauses;
|
||||
}
|
||||
|
||||
Session(ClusterDataProvider dataProvider) {
|
||||
this.nodes = new ArrayList<>(dataProvider.getNodes());
|
||||
this.dataProvider = dataProvider;
|
||||
Session(SolrCloudManager cloudManager) {
|
||||
this.nodes = new ArrayList<>(cloudManager.getClusterStateProvider().getLiveNodes());
|
||||
this.cloudManager = cloudManager;
|
||||
for (String node : nodes) {
|
||||
collections.addAll(dataProvider.getReplicaInfo(node, Collections.emptyList()).keySet());
|
||||
collections.addAll(cloudManager.getNodeStateProvider().getReplicaInfo(node, Collections.emptyList()).keySet());
|
||||
}
|
||||
|
||||
expandedClauses = clusterPolicy.stream()
|
||||
.filter(clause -> !clause.isPerCollectiontag())
|
||||
.collect(Collectors.toList());
|
||||
|
||||
ClusterStateProvider stateProvider = cloudManager.getClusterStateProvider();
|
||||
for (String c : collections) {
|
||||
addClausesForCollection(dataProvider, c);
|
||||
addClausesForCollection(stateProvider, c);
|
||||
}
|
||||
|
||||
Collections.sort(expandedClauses);
|
||||
|
||||
matrix = new ArrayList<>(nodes.size());
|
||||
for (String node : nodes) matrix.add(new Row(node, params, dataProvider));
|
||||
for (String node : nodes) matrix.add(new Row(node, params, cloudManager));
|
||||
applyRules();
|
||||
}
|
||||
|
||||
private void addClausesForCollection(ClusterDataProvider dataProvider, String c) {
|
||||
String p = dataProvider.getPolicyNameByCollection(c);
|
||||
private void addClausesForCollection(ClusterStateProvider stateProvider, String c) {
|
||||
String p = stateProvider.getPolicyNameByCollection(c);
|
||||
if (p != null) {
|
||||
List<Clause> perCollPolicy = policies.get(p);
|
||||
if (perCollPolicy == null)
|
||||
|
@ -250,7 +252,7 @@ public class Policy implements MapWriter {
|
|||
}
|
||||
|
||||
Session copy() {
|
||||
return new Session(nodes, dataProvider, getMatrixCopy(), expandedClauses);
|
||||
return new Session(nodes, cloudManager, getMatrixCopy(), expandedClauses);
|
||||
}
|
||||
|
||||
List<Row> getMatrixCopy() {
|
||||
|
@ -324,8 +326,8 @@ public class Policy implements MapWriter {
|
|||
}
|
||||
}
|
||||
|
||||
public Session createSession(ClusterDataProvider dataProvider) {
|
||||
return new Session(dataProvider);
|
||||
public Session createSession(SolrCloudManager cloudManager) {
|
||||
return new Session(cloudManager);
|
||||
}
|
||||
|
||||
public enum SortParam {
|
||||
|
@ -402,11 +404,12 @@ public class Policy implements MapWriter {
|
|||
if (!collections.isEmpty() || !s.isEmpty()) {
|
||||
HashSet<Pair<String, String>> shards = new HashSet<>(s);
|
||||
collections.stream().forEach(c -> shards.add(new Pair<>(c, null)));
|
||||
ClusterStateProvider stateProvider = session.cloudManager.getClusterStateProvider();
|
||||
for (Pair<String, String> shard : shards) {
|
||||
// if this is not a known collection from the existing clusterstate,
|
||||
// then add it
|
||||
if (session.matrix.stream().noneMatch(row -> row.collectionVsShardVsReplicas.containsKey(shard.first()))) {
|
||||
session.addClausesForCollection(session.dataProvider, shard.first());
|
||||
session.addClausesForCollection(stateProvider, shard.first());
|
||||
}
|
||||
for (Row row : session.matrix) {
|
||||
Map<String, List<ReplicaInfo>> shardInfo = row.collectionVsShardVsReplicas.computeIfAbsent(shard.first(), it -> new HashMap<>());
|
||||
|
@ -420,7 +423,7 @@ public class Policy implements MapWriter {
|
|||
// the source node is dead so live nodes may not have it
|
||||
for (String srcNode : srcNodes) {
|
||||
if(session.matrix.stream().noneMatch(row -> row.node.equals(srcNode)))
|
||||
session.matrix.add(new Row(srcNode, session.getPolicy().params, session.dataProvider));
|
||||
session.matrix.add(new Row(srcNode, session.getPolicy().params, session.cloudManager));
|
||||
}
|
||||
}
|
||||
session.applyRules();
|
||||
|
|
|
@ -19,7 +19,6 @@ package org.apache.solr.client.solrj.cloud.autoscaling;
|
|||
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.EnumMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -29,6 +28,7 @@ import java.util.concurrent.atomic.AtomicLong;
|
|||
|
||||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.Policy.Suggester.Hint;
|
||||
import org.apache.solr.client.solrj.impl.ClusterStateProvider;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.cloud.Replica;
|
||||
import org.apache.solr.common.cloud.ReplicaPosition;
|
||||
|
@ -41,7 +41,7 @@ import static org.apache.solr.common.params.CoreAdminParams.NODE;
|
|||
public class PolicyHelper {
|
||||
private static ThreadLocal<Map<String, String>> policyMapping = new ThreadLocal<>();
|
||||
public static List<ReplicaPosition> getReplicaLocations(String collName, AutoScalingConfig autoScalingConfig,
|
||||
ClusterDataProvider cdp,
|
||||
SolrCloudManager cloudManager,
|
||||
Map<String, String> optionalPolicyMapping,
|
||||
List<String> shardNames,
|
||||
int nrtReplicas,
|
||||
|
@ -49,23 +49,7 @@ public class PolicyHelper {
|
|||
int pullReplicas,
|
||||
List<String> nodesList) {
|
||||
List<ReplicaPosition> positions = new ArrayList<>();
|
||||
final ClusterDataProvider delegate = cdp;
|
||||
cdp = new ClusterDataProvider() {
|
||||
@Override
|
||||
public Map<String, Object> getNodeValues(String node, Collection<String> tags) {
|
||||
return delegate.getNodeValues(node, tags);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Map<String, List<ReplicaInfo>>> getReplicaInfo(String node, Collection<String> keys) {
|
||||
return delegate.getReplicaInfo(node, keys);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<String> getNodes() {
|
||||
return delegate.getNodes();
|
||||
}
|
||||
|
||||
ClusterStateProvider stateProvider = new DelegatingClusterStateProvider(cloudManager.getClusterStateProvider()) {
|
||||
@Override
|
||||
public String getPolicyNameByCollection(String coll) {
|
||||
return policyMapping.get() != null && policyMapping.get().containsKey(coll) ?
|
||||
|
@ -73,13 +57,19 @@ public class PolicyHelper {
|
|||
delegate.getPolicyNameByCollection(coll);
|
||||
}
|
||||
};
|
||||
SolrCloudManager delegatingManager = new DelegatingCloudManager(cloudManager) {
|
||||
@Override
|
||||
public ClusterStateProvider getClusterStateProvider() {
|
||||
return stateProvider;
|
||||
}
|
||||
};
|
||||
|
||||
policyMapping.set(optionalPolicyMapping);
|
||||
Policy.Session session = null;
|
||||
try {
|
||||
session = SESSION_REF.get() != null ?
|
||||
SESSION_REF.get().initOrGet(cdp, autoScalingConfig.getPolicy()) :
|
||||
autoScalingConfig.getPolicy().createSession(cdp);
|
||||
SESSION_REF.get().initOrGet(delegatingManager, autoScalingConfig.getPolicy()) :
|
||||
autoScalingConfig.getPolicy().createSession(delegatingManager);
|
||||
|
||||
Map<Replica.Type, Integer> typeVsCount = new EnumMap<>(Replica.Type.class);
|
||||
typeVsCount.put(Replica.Type.NRT, nrtReplicas);
|
||||
|
@ -161,11 +151,11 @@ public class PolicyHelper {
|
|||
}
|
||||
}
|
||||
|
||||
public Policy.Session initOrGet(ClusterDataProvider cdp, Policy policy) {
|
||||
public Policy.Session initOrGet(SolrCloudManager cloudManager, Policy policy) {
|
||||
synchronized (SessionRef.class) {
|
||||
Policy.Session session = get();
|
||||
if (session != null) return session;
|
||||
this.session = policy.createSession(cdp);
|
||||
this.session = policy.createSession(cloudManager);
|
||||
myVersion.incrementAndGet();
|
||||
lastUsedTime = System.nanoTime();
|
||||
REF_VERSION.set(myVersion.get());
|
||||
|
|
|
@ -74,7 +74,7 @@ class ReplicaCount extends Number implements MapWriter {
|
|||
public void increment(List<ReplicaInfo> infos) {
|
||||
if (infos == null) return;
|
||||
for (ReplicaInfo info : infos) {
|
||||
switch (info.type) {
|
||||
switch (info.getType()) {
|
||||
case NRT:
|
||||
nrt++;
|
||||
break;
|
||||
|
|
|
@ -18,38 +18,43 @@
|
|||
package org.apache.solr.client.solrj.cloud.autoscaling;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.solr.common.MapWriter;
|
||||
import org.apache.solr.common.cloud.Replica;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
|
||||
|
||||
public class ReplicaInfo implements MapWriter {
|
||||
final String name;
|
||||
String core, collection, shard;
|
||||
Replica.Type type;
|
||||
Map<String, Object> variables;
|
||||
final String core, collection, shard;
|
||||
final Map<String, Object> variables = new HashMap<>();
|
||||
|
||||
public ReplicaInfo(String name, String coll, String shard, Replica.Type type, Map<String, Object> vals) {
|
||||
public ReplicaInfo(String name, String coll, String shard, Map<String, Object> vals) {
|
||||
this.name = name;
|
||||
this.variables = vals;
|
||||
if (vals != null) {
|
||||
this.variables.putAll(vals);
|
||||
}
|
||||
this.collection = coll;
|
||||
this.shard = shard;
|
||||
this.type = type;
|
||||
this.core = (String)vals.get("core");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeMap(EntryWriter ew) throws IOException {
|
||||
ew.put(name, (MapWriter) ew1 -> {
|
||||
if (variables != null) {
|
||||
for (Map.Entry<String, Object> e : variables.entrySet()) {
|
||||
ew1.put(e.getKey(), e.getValue());
|
||||
}
|
||||
for (Map.Entry<String, Object> e : variables.entrySet()) {
|
||||
ew1.put(e.getKey(), e.getValue());
|
||||
}
|
||||
if (type != null) ew1.put("type", type.toString());
|
||||
});
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public String getCore() {
|
||||
return core;
|
||||
}
|
||||
|
@ -61,4 +66,46 @@ public class ReplicaInfo implements MapWriter {
|
|||
public String getShard() {
|
||||
return shard;
|
||||
}
|
||||
|
||||
public Replica.Type getType() {
|
||||
Object o = variables.get(ZkStateReader.REPLICA_TYPE);
|
||||
if (o == null) {
|
||||
variables.put(ZkStateReader.REPLICA_TYPE, Replica.Type.NRT);
|
||||
return Replica.Type.NRT;
|
||||
} else if (o instanceof Replica.Type) {
|
||||
return (Replica.Type)o;
|
||||
} else {
|
||||
Replica.Type type = Replica.Type.get(String.valueOf(o).toUpperCase(Locale.ROOT));
|
||||
variables.put(ZkStateReader.REPLICA_TYPE, type);
|
||||
return type;
|
||||
}
|
||||
}
|
||||
|
||||
public Replica.State getState() {
|
||||
if (variables.get(ZkStateReader.STATE_PROP) != null) {
|
||||
return Replica.State.getState((String) variables.get(ZkStateReader.STATE_PROP));
|
||||
} else {
|
||||
// default to ACTIVE
|
||||
variables.put(ZkStateReader.STATE_PROP, Replica.State.ACTIVE.toString());
|
||||
return Replica.State.ACTIVE;
|
||||
}
|
||||
}
|
||||
|
||||
public Map<String, Object> getVariables() {
|
||||
return variables;
|
||||
}
|
||||
|
||||
public Object getVariable(String name) {
|
||||
return variables.get(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ReplicaInfo{" +
|
||||
"name='" + name + '\'' +
|
||||
", collection='" + collection + '\'' +
|
||||
", shard='" + shard + '\'' +
|
||||
", variables=" + variables +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ import java.util.Random;
|
|||
import org.apache.solr.common.IteratorWriter;
|
||||
import org.apache.solr.common.MapWriter;
|
||||
import org.apache.solr.common.cloud.Replica;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
import org.apache.solr.common.util.Pair;
|
||||
import org.apache.solr.common.util.Utils;
|
||||
|
||||
|
@ -41,13 +42,14 @@ public class Row implements MapWriter {
|
|||
boolean anyValueMissing = false;
|
||||
boolean isLive = true;
|
||||
|
||||
public Row(String node, List<String> params, ClusterDataProvider dataProvider) {
|
||||
collectionVsShardVsReplicas = dataProvider.getReplicaInfo(node, params);
|
||||
public Row(String node, List<String> params, SolrCloudManager cloudManager) {
|
||||
NodeStateProvider nodeStateProvider = cloudManager.getNodeStateProvider();
|
||||
collectionVsShardVsReplicas = nodeStateProvider.getReplicaInfo(node, params);
|
||||
if (collectionVsShardVsReplicas == null) collectionVsShardVsReplicas = new HashMap<>();
|
||||
this.node = node;
|
||||
cells = new Cell[params.size()];
|
||||
isLive = dataProvider.getNodes().contains(node);
|
||||
Map<String, Object> vals = isLive ? dataProvider.getNodeValues(node, params) : Collections.emptyMap();
|
||||
isLive = cloudManager.getClusterStateProvider().getLiveNodes().contains(node);
|
||||
Map<String, Object> vals = isLive ? nodeStateProvider.getNodeValues(node, params) : Collections.emptyMap();
|
||||
for (int i = 0; i < params.size(); i++) {
|
||||
String s = params.get(i);
|
||||
cells[i] = new Cell(i, s, Clause.validate(s,vals.get(s), false));
|
||||
|
@ -96,7 +98,8 @@ public class Row implements MapWriter {
|
|||
Row row = copy();
|
||||
Map<String, List<ReplicaInfo>> c = row.collectionVsShardVsReplicas.computeIfAbsent(coll, k -> new HashMap<>());
|
||||
List<ReplicaInfo> replicas = c.computeIfAbsent(shard, k -> new ArrayList<>());
|
||||
replicas.add(new ReplicaInfo("" + new Random().nextInt(1000) + 1000, coll, shard, type, new HashMap<>()));
|
||||
replicas.add(new ReplicaInfo("" + new Random().nextInt(1000) + 1000, coll, shard,
|
||||
Collections.singletonMap(ZkStateReader.REPLICA_TYPE, type != null ? type.toString() : Replica.Type.NRT.toString())));
|
||||
for (Cell cell : row.cells) {
|
||||
if (cell.name.equals("cores")) {
|
||||
cell.val = cell.val == null ? 0 : ((Number) cell.val).longValue() + 1;
|
||||
|
@ -115,7 +118,7 @@ public class Row implements MapWriter {
|
|||
int idx = -1;
|
||||
for (int i = 0; i < r.size(); i++) {
|
||||
ReplicaInfo info = r.get(i);
|
||||
if (type == null || info.type == type) {
|
||||
if (type == null || info.getType() == type) {
|
||||
idx = i;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,60 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.solr.client.solrj.cloud.autoscaling;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.SolrResponse;
|
||||
import org.apache.solr.client.solrj.cloud.DistributedQueueFactory;
|
||||
import org.apache.solr.client.solrj.impl.ClusterStateProvider;
|
||||
|
||||
/**
|
||||
* This interface abstracts the access to a SolrCloud cluster, including interactions with Zookeeper, Solr
|
||||
* and generic HTTP calls.
|
||||
* <p>This abstraction should be used when possible instead of directly referencing ZK, Solr and HTTP.</p>
|
||||
*/
|
||||
public interface SolrCloudManager extends Closeable {
|
||||
|
||||
ClusterStateProvider getClusterStateProvider();
|
||||
|
||||
NodeStateProvider getNodeStateProvider();
|
||||
|
||||
DistribStateManager getDistribStateManager();
|
||||
|
||||
DistributedQueueFactory getDistributedQueueFactory();
|
||||
|
||||
// Solr-like methods
|
||||
|
||||
SolrResponse request(SolrRequest req) throws IOException;
|
||||
|
||||
byte[] httpRequest(String url, SolrRequest.METHOD method, Map<String, String> headers, String payload, int timeout, boolean followRedirects) throws IOException;
|
||||
|
||||
// distributed queue implementation
|
||||
|
||||
@Override
|
||||
default void close() {
|
||||
|
||||
}
|
||||
|
||||
default boolean isClosed() {
|
||||
return false;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,50 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.solr.client.solrj.cloud.autoscaling;
|
||||
|
||||
/**
|
||||
* Immutable representation of binary data with version.
|
||||
*/
|
||||
public class VersionedData {
|
||||
private final int version;
|
||||
private final byte[] data;
|
||||
private final String owner;
|
||||
|
||||
/**
|
||||
* Constructor.
|
||||
* @param version version of the data, or -1 if unknown
|
||||
* @param data binary data, or null.
|
||||
* @param owner symbolic identifier of data owner / creator, or null.
|
||||
*/
|
||||
public VersionedData(int version, byte[] data, String owner) {
|
||||
this.version = version;
|
||||
this.data = data;
|
||||
this.owner = owner;
|
||||
}
|
||||
|
||||
public int getVersion() {
|
||||
return version;
|
||||
}
|
||||
|
||||
public byte[] getData() {
|
||||
return data;
|
||||
}
|
||||
|
||||
public String getOwner() {
|
||||
return owner;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Common classes for SolrCloud.
|
||||
*/
|
||||
|
||||
package org.apache.solr.client.solrj.cloud;
|
||||
|
|
@ -1005,7 +1005,7 @@ public class CloudSolrClient extends SolrClient {
|
|||
}
|
||||
List<String> theUrlList = new ArrayList<>();
|
||||
if (request instanceof V2Request) {
|
||||
Set<String> liveNodes = stateProvider.liveNodes();
|
||||
Set<String> liveNodes = stateProvider.getLiveNodes();
|
||||
if (!liveNodes.isEmpty()) {
|
||||
List<String> liveNodesList = new ArrayList<>(liveNodes);
|
||||
Collections.shuffle(liveNodesList, rand);
|
||||
|
@ -1013,7 +1013,7 @@ public class CloudSolrClient extends SolrClient {
|
|||
(String) stateProvider.getClusterProperty(ZkStateReader.URL_SCHEME,"http")));
|
||||
}
|
||||
} else if (ADMIN_PATHS.contains(request.getPath())) {
|
||||
Set<String> liveNodes = stateProvider.liveNodes();
|
||||
Set<String> liveNodes = stateProvider.getLiveNodes();
|
||||
for (String liveNode : liveNodes) {
|
||||
theUrlList.add(ZkStateReader.getBaseUrlForNodeName(liveNode,
|
||||
(String) stateProvider.getClusterProperty(ZkStateReader.URL_SCHEME,"http")));
|
||||
|
@ -1046,7 +1046,7 @@ public class CloudSolrClient extends SolrClient {
|
|||
Collection<Slice> routeSlices = col.getRouter().getSearchSlices(shardKeys, reqParams , col);
|
||||
ClientUtils.addSlices(slices, collectionName, routeSlices, true);
|
||||
}
|
||||
Set<String> liveNodes = stateProvider.liveNodes();
|
||||
Set<String> liveNodes = stateProvider.getLiveNodes();
|
||||
|
||||
List<String> leaderUrlList = null;
|
||||
List<String> urlList = null;
|
||||
|
|
|
@ -17,6 +17,8 @@
|
|||
package org.apache.solr.client.solrj.impl;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.solr.common.cloud.ClusterState;
|
||||
|
@ -32,7 +34,7 @@ public interface ClusterStateProvider extends Closeable {
|
|||
/**
|
||||
* Obtain set of live_nodes for the cluster.
|
||||
*/
|
||||
Set<String> liveNodes();
|
||||
Set<String> getLiveNodes();
|
||||
|
||||
/**
|
||||
* Given an alias, returns the collection name that this alias points to
|
||||
|
@ -46,14 +48,37 @@ public interface ClusterStateProvider extends Closeable {
|
|||
String getCollectionName(String name);
|
||||
|
||||
/**
|
||||
* Obtain a cluster property, or null if it doesn't exist.
|
||||
* Obtain the current cluster state.
|
||||
*/
|
||||
Object getClusterProperty(String propertyName);
|
||||
ClusterState getClusterState() throws IOException;
|
||||
|
||||
/**
|
||||
* Obtain cluster properties.
|
||||
* @return configured cluster properties, or an empty map, never null.
|
||||
*/
|
||||
Map<String, Object> getClusterProperties();
|
||||
|
||||
/**
|
||||
* Obtain a cluster property, or the default value if it doesn't exist.
|
||||
*/
|
||||
Object getClusterProperty(String propertyName, String def);
|
||||
default <T> T getClusterProperty(String key, T defaultValue) {
|
||||
T value = (T) getClusterProperties().get(key);
|
||||
if (value == null)
|
||||
return defaultValue;
|
||||
return value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Obtain a cluster property, or null if it doesn't exist.
|
||||
*/
|
||||
default <T> T getClusterProperty(String propertyName) {
|
||||
return (T) getClusterProperties().get(propertyName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the collection-specific policy
|
||||
*/
|
||||
String getPolicyNameByCollection(String coll);
|
||||
|
||||
void connect();
|
||||
}
|
|
@ -113,7 +113,9 @@ public class HttpClusterStateProvider implements ClusterStateProvider {
|
|||
@SuppressWarnings({"rawtypes", "unchecked"})
|
||||
private ClusterState fetchClusterState(SolrClient client, String collection) throws SolrServerException, IOException {
|
||||
ModifiableSolrParams params = new ModifiableSolrParams();
|
||||
params.set("collection", collection);
|
||||
if (collection != null) {
|
||||
params.set("collection", collection);
|
||||
}
|
||||
params.set("action", "CLUSTERSTATUS");
|
||||
QueryRequest request = new QueryRequest(params);
|
||||
request.setPath("/admin/collections");
|
||||
|
@ -129,7 +131,7 @@ public class HttpClusterStateProvider implements ClusterStateProvider {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Set<String> liveNodes() {
|
||||
public Set<String> getLiveNodes() {
|
||||
if (liveNodes == null) {
|
||||
throw new RuntimeException("We don't know of any live_nodes to fetch the"
|
||||
+ " latest live_nodes information from. "
|
||||
|
@ -228,17 +230,41 @@ public class HttpClusterStateProvider implements ClusterStateProvider {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Object getClusterProperty(String propertyName) {
|
||||
if (propertyName.equals(ZkStateReader.URL_SCHEME)) {
|
||||
return this.urlScheme;
|
||||
public ClusterState getClusterState() throws IOException {
|
||||
for (String nodeName: liveNodes) {
|
||||
try (HttpSolrClient client = new HttpSolrClient.Builder().
|
||||
withBaseSolrUrl(ZkStateReader.getBaseUrlForNodeName(nodeName, urlScheme)).
|
||||
withHttpClient(httpClient).build()) {
|
||||
ClusterState cs = fetchClusterState(client, null);
|
||||
return cs;
|
||||
} catch (SolrServerException | RemoteSolrException | IOException e) {
|
||||
log.warn("Attempt to fetch cluster state from " +
|
||||
ZkStateReader.getBaseUrlForNodeName(nodeName, urlScheme) + " failed.", e);
|
||||
}
|
||||
}
|
||||
throw new RuntimeException("Tried fetching cluster state using the node names we knew of, i.e. " + liveNodes +". However, "
|
||||
+ "succeeded in obtaining the cluster state from none of them."
|
||||
+ "If you think your Solr cluster is up and is accessible,"
|
||||
+ " you could try re-creating a new CloudSolrClient using working"
|
||||
+ " solrUrl(s) or zkHost(s).");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Object> getClusterProperties() {
|
||||
throw new UnsupportedOperationException("Fetching cluster properties not supported"
|
||||
+ " using the HttpClusterStateProvider. "
|
||||
+ "ZkClientClusterStateProvider can be used for this."); // TODO
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object getClusterProperty(String propertyName, String def) {
|
||||
public String getPolicyNameByCollection(String coll) {
|
||||
throw new UnsupportedOperationException("Fetching cluster properties not supported"
|
||||
+ " using the HttpClusterStateProvider. "
|
||||
+ "ZkClientClusterStateProvider can be used for this."); // TODO
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object getClusterProperty(String propertyName) {
|
||||
if (propertyName.equals(ZkStateReader.URL_SCHEME)) {
|
||||
return this.urlScheme;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,166 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.solr.client.solrj.impl;
|
||||
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpResponse;
|
||||
import org.apache.http.client.HttpClient;
|
||||
import org.apache.http.client.config.RequestConfig;
|
||||
import org.apache.http.client.methods.HttpDelete;
|
||||
import org.apache.http.client.methods.HttpGet;
|
||||
import org.apache.http.client.methods.HttpPost;
|
||||
import org.apache.http.client.methods.HttpPut;
|
||||
import org.apache.http.client.methods.HttpRequestBase;
|
||||
import org.apache.http.client.protocol.HttpClientContext;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.SolrResponse;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.cloud.DistributedQueueFactory;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.DistribStateManager;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.NodeStateProvider;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
|
||||
import org.apache.solr.common.cloud.SolrZkClient;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Class that implements {@link SolrCloudManager} using a SolrClient
|
||||
*/
|
||||
public class SolrClientCloudManager implements SolrCloudManager {
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
private final CloudSolrClient solrClient;
|
||||
private final ZkDistribStateManager stateManager;
|
||||
private final DistributedQueueFactory queueFactory;
|
||||
private final ZkStateReader zkStateReader;
|
||||
private final SolrZkClient zkClient;
|
||||
private volatile boolean isClosed;
|
||||
|
||||
public SolrClientCloudManager(DistributedQueueFactory queueFactory, CloudSolrClient solrClient) {
|
||||
this.queueFactory = queueFactory;
|
||||
this.solrClient = solrClient;
|
||||
this.zkStateReader = solrClient.getZkStateReader();
|
||||
this.zkClient = zkStateReader.getZkClient();
|
||||
this.stateManager = new ZkDistribStateManager(zkClient);
|
||||
this.isClosed = false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
isClosed = true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isClosed() {
|
||||
return isClosed;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterStateProvider getClusterStateProvider() {
|
||||
return solrClient.getClusterStateProvider();
|
||||
}
|
||||
|
||||
@Override
|
||||
public NodeStateProvider getNodeStateProvider() {
|
||||
return new SolrClientNodeStateProvider(solrClient);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DistribStateManager getDistribStateManager() {
|
||||
return stateManager;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SolrResponse request(SolrRequest req) throws IOException {
|
||||
try {
|
||||
return req.process(solrClient);
|
||||
} catch (SolrServerException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
}
|
||||
|
||||
private static final byte[] EMPTY = new byte[0];
|
||||
|
||||
@Override
|
||||
public byte[] httpRequest(String url, SolrRequest.METHOD method, Map<String, String> headers, String payload, int timeout, boolean followRedirects) throws IOException {
|
||||
HttpClient client = solrClient.getHttpClient();
|
||||
final HttpRequestBase req;
|
||||
HttpEntity entity = null;
|
||||
if (payload != null) {
|
||||
entity = new StringEntity(payload, "UTF-8");
|
||||
}
|
||||
switch (method) {
|
||||
case GET:
|
||||
req = new HttpGet(url);
|
||||
break;
|
||||
case POST:
|
||||
req = new HttpPost(url);
|
||||
if (entity != null) {
|
||||
((HttpPost)req).setEntity(entity);
|
||||
}
|
||||
break;
|
||||
case PUT:
|
||||
req = new HttpPut(url);
|
||||
if (entity != null) {
|
||||
((HttpPut)req).setEntity(entity);
|
||||
}
|
||||
break;
|
||||
case DELETE:
|
||||
req = new HttpDelete(url);
|
||||
break;
|
||||
default:
|
||||
throw new IOException("Unsupported method " + method);
|
||||
}
|
||||
if (headers != null) {
|
||||
headers.forEach((k, v) -> req.addHeader(k, v));
|
||||
}
|
||||
RequestConfig.Builder requestConfigBuilder = HttpClientUtil.createDefaultRequestConfigBuilder();
|
||||
if (timeout > 0) {
|
||||
requestConfigBuilder.setSocketTimeout(timeout);
|
||||
requestConfigBuilder.setConnectTimeout(timeout);
|
||||
}
|
||||
requestConfigBuilder.setRedirectsEnabled(followRedirects);
|
||||
req.setConfig(requestConfigBuilder.build());
|
||||
HttpClientContext httpClientRequestContext = HttpClientUtil.createNewHttpClientRequestContext();
|
||||
HttpResponse rsp = client.execute(req, httpClientRequestContext);
|
||||
int statusCode = rsp.getStatusLine().getStatusCode();
|
||||
if (statusCode != 200) {
|
||||
throw new IOException("Error sending request to " + url + ", HTTP response: " + rsp.toString());
|
||||
}
|
||||
HttpEntity responseEntity = rsp.getEntity();
|
||||
if (responseEntity != null && responseEntity.getContent() != null) {
|
||||
return EntityUtils.toByteArray(responseEntity);
|
||||
} else {
|
||||
return EMPTY;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public DistributedQueueFactory getDistributedQueueFactory() {
|
||||
return queueFactory;
|
||||
}
|
||||
|
||||
}
|
|
@ -17,7 +17,6 @@
|
|||
|
||||
package org.apache.solr.client.solrj.impl;
|
||||
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.util.ArrayList;
|
||||
|
@ -32,12 +31,10 @@ import java.util.Set;
|
|||
|
||||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.ClusterDataProvider;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.NodeStateProvider;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.ReplicaInfo;
|
||||
import org.apache.solr.client.solrj.request.GenericSolrRequest;
|
||||
import org.apache.solr.client.solrj.response.SimpleSolrResponse;
|
||||
import org.apache.solr.common.MapWriter;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.cloud.ClusterState;
|
||||
import org.apache.solr.common.cloud.DocCollection;
|
||||
|
@ -58,26 +55,27 @@ import org.slf4j.LoggerFactory;
|
|||
import static org.apache.solr.client.solrj.cloud.autoscaling.Clause.METRICS_PREFIX;
|
||||
|
||||
/**
|
||||
* Class that implements {@link ClusterStateProvider} accepting a SolrClient
|
||||
*
|
||||
*/
|
||||
public class SolrClientDataProvider implements ClusterDataProvider, MapWriter {
|
||||
public class SolrClientNodeStateProvider implements NodeStateProvider {
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
//only for debugging
|
||||
public static SolrClientDataProvider INST;
|
||||
//only for debugging, do not use it for anything else
|
||||
public AutoScalingConfig config;
|
||||
public static SolrClientNodeStateProvider INST;
|
||||
|
||||
|
||||
private final CloudSolrClient solrClient;
|
||||
private final ZkStateReader zkStateReader;
|
||||
private final Map<String, Map<String, Map<String, List<ReplicaInfo>>>> data = new HashMap<>();
|
||||
private Set<String> liveNodes;
|
||||
private Map<String, Object> snitchSession = new HashMap<>();
|
||||
private Map<String, Map> nodeVsTags = new HashMap<>();
|
||||
|
||||
public SolrClientDataProvider(CloudSolrClient solrClient) {
|
||||
public SolrClientNodeStateProvider(CloudSolrClient solrClient) {
|
||||
this.solrClient = solrClient;
|
||||
ZkStateReader zkStateReader = solrClient.getZkStateReader();
|
||||
this.zkStateReader = solrClient.getZkStateReader();
|
||||
ClusterState clusterState = zkStateReader.getClusterState();
|
||||
this.liveNodes = clusterState.getLiveNodes();
|
||||
if (clusterState == null) { // zkStateReader still initializing
|
||||
return;
|
||||
}
|
||||
Map<String, ClusterState.CollectionRef> all = clusterState.getCollectionStates();
|
||||
all.forEach((collName, ref) -> {
|
||||
DocCollection coll = ref.get();
|
||||
|
@ -86,18 +84,12 @@ public class SolrClientDataProvider implements ClusterDataProvider, MapWriter {
|
|||
Map<String, Map<String, List<ReplicaInfo>>> nodeData = data.computeIfAbsent(replica.getNodeName(), k -> new HashMap<>());
|
||||
Map<String, List<ReplicaInfo>> collData = nodeData.computeIfAbsent(collName, k -> new HashMap<>());
|
||||
List<ReplicaInfo> replicas = collData.computeIfAbsent(shard, k -> new ArrayList<>());
|
||||
replicas.add(new ReplicaInfo(replica.getName(), collName, shard, replica.getType(), new HashMap<>()));
|
||||
replicas.add(new ReplicaInfo(replica.getName(), collName, shard, replica.getProperties()));
|
||||
});
|
||||
});
|
||||
if(log.isDebugEnabled()) INST = this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getPolicyNameByCollection(String coll) {
|
||||
ClusterState.CollectionRef state = solrClient.getClusterStateProvider().getState(coll);
|
||||
return state == null || state.get() == null ? null : (String) state.get().getProperties().get("policy");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Object> getNodeValues(String node, Collection<String> tags) {
|
||||
AutoScalingSnitch snitch = new AutoScalingSnitch();
|
||||
|
@ -107,67 +99,12 @@ public class SolrClientDataProvider implements ClusterDataProvider, MapWriter {
|
|||
return ctx.getTags();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Map<String, Map<String, List<ReplicaInfo>>> getReplicaInfo(String node, Collection<String> keys) {
|
||||
return data.computeIfAbsent(node, s -> Collections.emptyMap());//todo fill other details
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<String> getNodes() {
|
||||
return liveNodes;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeMap(EntryWriter ew) throws IOException {
|
||||
ew.put("liveNodes", liveNodes);
|
||||
ew.put("replicaInfo", Utils.getDeepCopy(data, 5));
|
||||
ew.put("nodeValues", nodeVsTags);
|
||||
|
||||
}
|
||||
|
||||
static class ClientSnitchCtx
|
||||
extends SnitchContext {
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
ZkClientClusterStateProvider zkClientClusterStateProvider;
|
||||
CloudSolrClient solrClient;
|
||||
|
||||
public ClientSnitchCtx(SnitchInfo perSnitch,
|
||||
String node, Map<String, Object> session,
|
||||
CloudSolrClient solrClient) {
|
||||
super(perSnitch, node, session);
|
||||
this.solrClient = solrClient;
|
||||
this.zkClientClusterStateProvider = (ZkClientClusterStateProvider) solrClient.getClusterStateProvider();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Map getZkJson(String path) throws KeeperException, InterruptedException {
|
||||
return Utils.getJson(zkClientClusterStateProvider.getZkStateReader().getZkClient(), path, true);
|
||||
}
|
||||
|
||||
public void invokeRemote(String node, ModifiableSolrParams params, String klas, RemoteCallback callback) {
|
||||
|
||||
}
|
||||
|
||||
public SimpleSolrResponse invoke(String solrNode, String path, SolrParams params)
|
||||
throws IOException, SolrServerException {
|
||||
String url = zkClientClusterStateProvider.getZkStateReader().getBaseUrlForNodeName(solrNode);
|
||||
|
||||
GenericSolrRequest request = new GenericSolrRequest(SolrRequest.METHOD.GET, path, params);
|
||||
try (HttpSolrClient client = new HttpSolrClient.Builder()
|
||||
.withHttpClient(solrClient.getHttpClient())
|
||||
.withBaseSolrUrl(url)
|
||||
.withResponseParser(new BinaryResponseParser())
|
||||
.build()) {
|
||||
NamedList<Object> rsp = client.request(request);
|
||||
request.response.nl = rsp;
|
||||
return request.response;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
//uses metrics API to get node information
|
||||
static class AutoScalingSnitch extends ImplicitSnitch {
|
||||
@Override
|
||||
|
@ -247,4 +184,47 @@ public class SolrClientDataProvider implements ClusterDataProvider, MapWriter {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
static class ClientSnitchCtx
|
||||
extends SnitchContext {
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
ZkClientClusterStateProvider zkClientClusterStateProvider;
|
||||
CloudSolrClient solrClient;
|
||||
|
||||
public ClientSnitchCtx(SnitchInfo perSnitch,
|
||||
String node, Map<String, Object> session,
|
||||
CloudSolrClient solrClient) {
|
||||
super(perSnitch, node, session);
|
||||
this.solrClient = solrClient;
|
||||
this.zkClientClusterStateProvider = (ZkClientClusterStateProvider) solrClient.getClusterStateProvider();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Map getZkJson(String path) throws KeeperException, InterruptedException {
|
||||
return Utils.getJson(zkClientClusterStateProvider.getZkStateReader().getZkClient(), path, true);
|
||||
}
|
||||
|
||||
public void invokeRemote(String node, ModifiableSolrParams params, String klas, RemoteCallback callback) {
|
||||
|
||||
}
|
||||
|
||||
public SimpleSolrResponse invoke(String solrNode, String path, SolrParams params)
|
||||
throws IOException, SolrServerException {
|
||||
String url = zkClientClusterStateProvider.getZkStateReader().getBaseUrlForNodeName(solrNode);
|
||||
|
||||
GenericSolrRequest request = new GenericSolrRequest(SolrRequest.METHOD.GET, path, params);
|
||||
try (HttpSolrClient client = new HttpSolrClient.Builder()
|
||||
.withHttpClient(solrClient.getHttpClient())
|
||||
.withBaseSolrUrl(url)
|
||||
.withResponseParser(new BinaryResponseParser())
|
||||
.build()) {
|
||||
NamedList<Object> rsp = client.request(request);
|
||||
request.response.nl = rsp;
|
||||
return request.response;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
|
@ -21,6 +21,7 @@ import java.io.IOException;
|
|||
import java.lang.invoke.MethodHandles;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
|
@ -59,15 +60,25 @@ public class ZkClientClusterStateProvider implements ClusterStateProvider {
|
|||
|
||||
@Override
|
||||
public ClusterState.CollectionRef getState(String collection) {
|
||||
return zkStateReader.getClusterState().getCollectionRef(collection);
|
||||
ClusterState clusterState = zkStateReader.getClusterState();
|
||||
if (clusterState != null) {
|
||||
return clusterState.getCollectionRef(collection);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
public ZkStateReader getZkStateReader(){
|
||||
return zkStateReader;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<String> liveNodes() {
|
||||
return zkStateReader.getClusterState().getLiveNodes();
|
||||
public Set<String> getLiveNodes() {
|
||||
ClusterState clusterState = zkStateReader.getClusterState();
|
||||
if (clusterState != null) {
|
||||
return clusterState.getLiveNodes();
|
||||
} else {
|
||||
return Collections.emptySet();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -84,10 +95,10 @@ public class ZkClientClusterStateProvider implements ClusterStateProvider {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Object getClusterProperty(String propertyName, String def) {
|
||||
public <T> T getClusterProperty(String propertyName, T def) {
|
||||
Map<String, Object> props = zkStateReader.getClusterProperties();
|
||||
if (props.containsKey(propertyName)) {
|
||||
return props.get(propertyName);
|
||||
return (T)props.get(propertyName);
|
||||
}
|
||||
return def;
|
||||
}
|
||||
|
@ -103,6 +114,23 @@ public class ZkClientClusterStateProvider implements ClusterStateProvider {
|
|||
}
|
||||
return name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState getClusterState() throws IOException {
|
||||
return zkStateReader.getClusterState();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Object> getClusterProperties() {
|
||||
return zkStateReader.getClusterProperties();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getPolicyNameByCollection(String coll) {
|
||||
ClusterState.CollectionRef state = getState(coll);
|
||||
return state == null || state.get() == null ? null : (String) state.get().getProperties().get("policy");
|
||||
}
|
||||
|
||||
/**
|
||||
* Download a named config from Zookeeper to a location on the filesystem
|
||||
* @param configName the name of the config
|
||||
|
|
|
@ -0,0 +1,166 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.solr.client.solrj.impl;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.NoSuchElementException;
|
||||
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.AlreadyExistsException;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.BadVersionException;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.DistribStateManager;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
|
||||
import org.apache.solr.common.cloud.SolrZkClient;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
import org.apache.solr.common.params.AutoScalingParams;
|
||||
import org.apache.zookeeper.CreateMode;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
import org.apache.zookeeper.Op;
|
||||
import org.apache.zookeeper.OpResult;
|
||||
import org.apache.zookeeper.Watcher;
|
||||
import org.apache.zookeeper.data.Stat;
|
||||
|
||||
import static org.apache.solr.common.util.Utils.fromJSON;
|
||||
|
||||
/**
|
||||
* Implementation of {@link DistribStateManager} that uses Zookeeper.
|
||||
*/
|
||||
public class ZkDistribStateManager implements DistribStateManager {
|
||||
|
||||
private final SolrZkClient zkClient;
|
||||
|
||||
public ZkDistribStateManager(SolrZkClient zkClient) {
|
||||
this.zkClient = zkClient;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasData(String path) throws IOException, KeeperException, InterruptedException {
|
||||
try {
|
||||
return zkClient.exists(path, true);
|
||||
} catch (InterruptedException e) {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> listData(String path) throws NoSuchElementException, IOException, KeeperException, InterruptedException {
|
||||
try {
|
||||
return zkClient.getChildren(path, null, true);
|
||||
} catch (KeeperException.NoNodeException e) {
|
||||
throw new NoSuchElementException(path);
|
||||
} catch (InterruptedException e) {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public VersionedData getData(String path, Watcher watcher) throws NoSuchElementException, IOException, KeeperException, InterruptedException {
|
||||
Stat stat = new Stat();
|
||||
try {
|
||||
byte[] bytes = zkClient.getData(path, watcher, stat, true);
|
||||
return new VersionedData(stat.getVersion(), bytes, String.valueOf(stat.getEphemeralOwner()));
|
||||
} catch (KeeperException.NoNodeException e) {
|
||||
throw new NoSuchElementException(path);
|
||||
} catch (InterruptedException e) {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void makePath(String path) throws AlreadyExistsException, IOException, KeeperException, InterruptedException {
|
||||
try {
|
||||
zkClient.makePath(path, true);
|
||||
} catch (KeeperException.NodeExistsException e) {
|
||||
throw new AlreadyExistsException(path);
|
||||
} catch (InterruptedException e) {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String createData(String path, byte[] data, CreateMode mode) throws AlreadyExistsException, IOException, KeeperException, InterruptedException {
|
||||
try {
|
||||
return zkClient.create(path, data, mode, true);
|
||||
} catch (KeeperException.NodeExistsException e) {
|
||||
throw new AlreadyExistsException(path);
|
||||
} catch (InterruptedException e) {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeData(String path, int version) throws NoSuchElementException, IOException, KeeperException, InterruptedException {
|
||||
try {
|
||||
zkClient.delete(path, version, true);
|
||||
} catch (KeeperException.NoNodeException e) {
|
||||
throw new NoSuchElementException(path);
|
||||
} catch (InterruptedException e) {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setData(String path, byte[] data, int version) throws BadVersionException, NoSuchElementException, IOException, KeeperException, InterruptedException {
|
||||
try {
|
||||
zkClient.setData(path, data, version, true);
|
||||
} catch (KeeperException.NoNodeException e) {
|
||||
throw new NoSuchElementException(path);
|
||||
} catch (KeeperException.BadVersionException e) {
|
||||
throw new BadVersionException(version, path);
|
||||
} catch (InterruptedException e) {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<OpResult> multi(Iterable<Op> ops) throws BadVersionException, AlreadyExistsException, NoSuchElementException, IOException, KeeperException, InterruptedException {
|
||||
try {
|
||||
return zkClient.multi(ops, true);
|
||||
} catch (KeeperException.NoNodeException e) {
|
||||
throw new NoSuchElementException(ops.toString());
|
||||
} catch (KeeperException.NodeExistsException e) {
|
||||
throw new AlreadyExistsException(ops.toString());
|
||||
} catch (KeeperException.BadVersionException e) {
|
||||
throw new BadVersionException(-1, ops.toString());
|
||||
} catch (InterruptedException e) {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public AutoScalingConfig getAutoScalingConfig(Watcher watcher) throws InterruptedException, IOException {
|
||||
Map<String, Object> map = new HashMap<>();
|
||||
Stat stat = new Stat();
|
||||
try {
|
||||
byte[] bytes = zkClient.getData(ZkStateReader.SOLR_AUTOSCALING_CONF_PATH, watcher, stat, true);
|
||||
if (bytes != null && bytes.length > 0) {
|
||||
map = (Map<String, Object>) fromJSON(bytes);
|
||||
}
|
||||
} catch (KeeperException.NoNodeException e) {
|
||||
// ignore
|
||||
} catch (KeeperException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
map.put(AutoScalingParams.ZK_VERSION, stat.getVersion());
|
||||
return new AutoScalingConfig(map);
|
||||
}
|
||||
|
||||
}
|
|
@ -139,6 +139,10 @@ public abstract class CollectionAdminRequest<T extends CollectionAdminResponse>
|
|||
return asyncId;
|
||||
}
|
||||
|
||||
public void setAsyncId(String asyncId) {
|
||||
this.asyncId = asyncId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Process this request asynchronously, generating and returning a request id
|
||||
* @param client a Solr client
|
||||
|
|
|
@ -25,15 +25,20 @@ import java.util.Arrays;
|
|||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import org.apache.solr.SolrTestCaseJ4;
|
||||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.SolrResponse;
|
||||
import org.apache.solr.client.solrj.cloud.DistributedQueueFactory;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.Clause.Violation;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.Policy.Suggester.Hint;
|
||||
import org.apache.solr.client.solrj.impl.ClusterStateProvider;
|
||||
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
|
||||
import org.apache.solr.common.cloud.Replica;
|
||||
import org.apache.solr.common.cloud.ReplicaPosition;
|
||||
|
@ -110,7 +115,7 @@ public class TestPolicy extends SolrTestCaseJ4 {
|
|||
if (!node_name.equals(node)) return;
|
||||
Map<String, List<ReplicaInfo>> shardVsReplicaStats = result.computeIfAbsent(collName, k -> new HashMap<>());
|
||||
List<ReplicaInfo> replicaInfos = shardVsReplicaStats.computeIfAbsent(shard, k -> new ArrayList<>());
|
||||
replicaInfos.add(new ReplicaInfo(replicaName, collName, shard, Replica.Type.get((String) r.get(ZkStateReader.REPLICA_TYPE)), new HashMap<>()));
|
||||
replicaInfos.add(new ReplicaInfo(replicaName, collName, shard, r));
|
||||
});
|
||||
});
|
||||
});
|
||||
|
@ -268,7 +273,7 @@ public class TestPolicy extends SolrTestCaseJ4 {
|
|||
" 'precision':100}]}";
|
||||
|
||||
Policy policy = new Policy((Map<String, Object>) Utils.fromJSONString(autoScalingjson));
|
||||
Policy.Session session = policy.createSession(dataProviderWithData(dataproviderdata));
|
||||
Policy.Session session = policy.createSession(cloudManagerWithData(dataproviderdata));
|
||||
SolrRequest op = session.getSuggester(MOVEREPLICA).hint(Hint.SRC_NODE, "127.0.0.1:65427_solr").getOperation();
|
||||
assertNotNull(op);
|
||||
assertEquals( "127.0.0.1:65434_solr",op.getParams().get("targetNode") );
|
||||
|
@ -290,7 +295,7 @@ public class TestPolicy extends SolrTestCaseJ4 {
|
|||
" 'freedisk':884.7097854614258}," +
|
||||
"}";
|
||||
|
||||
ClusterDataProvider provider = getClusterDataProvider((Map<String, Map>) Utils.fromJSONString(nodeValues), clusterState);
|
||||
SolrCloudManager provider = getSolrCloudManager((Map<String, Map>) Utils.fromJSONString(nodeValues), clusterState);
|
||||
Map policies = (Map) Utils.fromJSONString("{" +
|
||||
" 'cluster-preferences': [" +
|
||||
" { 'maximize': 'freedisk', 'precision': 50}," +
|
||||
|
@ -407,7 +412,7 @@ public class TestPolicy extends SolrTestCaseJ4 {
|
|||
assertEquals("r1", operation.getParams().get("replica"));
|
||||
}
|
||||
|
||||
private static ClusterDataProvider dataProviderWithData(String data){
|
||||
private static SolrCloudManager cloudManagerWithData(String data) {
|
||||
final Map m = (Map) Utils.fromJSONString(data);
|
||||
Map replicaInfo = (Map) m.get("replicaInfo");
|
||||
replicaInfo.forEach((node, val) -> {
|
||||
|
@ -420,36 +425,39 @@ public class TestPolicy extends SolrTestCaseJ4 {
|
|||
Object o = l3.get(i);
|
||||
Map m3 = (Map) o;
|
||||
l3.set(i, new ReplicaInfo(m3.keySet().iterator().next().toString()
|
||||
,coll.toString(), shard.toString(), Replica.Type.get((String)m3.get("type")), new HashMap<>()));
|
||||
,coll.toString(), shard.toString(), m3));
|
||||
}
|
||||
});
|
||||
|
||||
});
|
||||
|
||||
});
|
||||
return new ClusterDataProvider(){
|
||||
return new DelegatingCloudManager(null) {
|
||||
@Override
|
||||
public Map<String, Object> getNodeValues(String node, Collection<String> tags) {
|
||||
return (Map<String, Object>) Utils.getObjectByPath(m,false, Arrays.asList("nodeValues", node));
|
||||
public ClusterStateProvider getClusterStateProvider() {
|
||||
return new DelegatingClusterStateProvider(null) {
|
||||
@Override
|
||||
public Set<String> getLiveNodes() {
|
||||
return new HashSet<>((Collection<String>) m.get("liveNodes"));
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Map<String, List<ReplicaInfo>>> getReplicaInfo(String node, Collection<String> keys) {
|
||||
return (Map<String, Map<String, List<ReplicaInfo>>>) Utils.getObjectByPath(m,false, Arrays.asList("replicaInfo", node));
|
||||
}
|
||||
public NodeStateProvider getNodeStateProvider() {
|
||||
return new DelegatingNodeStateProvider(null) {
|
||||
@Override
|
||||
public Map<String, Object> getNodeValues(String node, Collection<String> tags) {
|
||||
return (Map<String, Object>) Utils.getObjectByPath(m,false, Arrays.asList("nodeValues", node));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<String> getNodes() {
|
||||
return (Collection<String>) m.get("liveNodes");
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getPolicyNameByCollection(String coll) {
|
||||
return null;
|
||||
@Override
|
||||
public Map<String, Map<String, List<ReplicaInfo>>> getReplicaInfo(String node, Collection<String> keys) {
|
||||
return (Map<String, Map<String, List<ReplicaInfo>>>) Utils.getObjectByPath(m,false, Arrays.asList("replicaInfo", node));
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
|
||||
public void testPolicyWithReplicaType() {
|
||||
|
@ -472,7 +480,7 @@ public class TestPolicy extends SolrTestCaseJ4 {
|
|||
"node4:{cores:8, freedisk: 375, heapUsage:16900, nodeRole:overseer, rack: rack1}" +
|
||||
"}");
|
||||
Policy policy = new Policy(policies);
|
||||
Policy.Suggester suggester = policy.createSession(getClusterDataProvider(nodeValues, clusterState))
|
||||
Policy.Suggester suggester = policy.createSession(getSolrCloudManager(nodeValues, clusterState))
|
||||
.getSuggester(ADDREPLICA)
|
||||
.hint(Hint.COLL_SHARD, new Pair("newColl", "shard1"))
|
||||
.hint(Hint.REPLICATYPE, Replica.Type.PULL);
|
||||
|
@ -612,7 +620,7 @@ public class TestPolicy extends SolrTestCaseJ4 {
|
|||
" 'tlogReplicas':'0'}\n" +
|
||||
"}";
|
||||
Policy policy = new Policy(new HashMap<>());
|
||||
Policy.Suggester suggester = policy.createSession(getClusterDataProvider(nodeValues, clusterState))
|
||||
Policy.Suggester suggester = policy.createSession(getSolrCloudManager(nodeValues, clusterState))
|
||||
.getSuggester(MOVEREPLICA)
|
||||
.hint(Hint.COLL, "collection1")
|
||||
.hint(Hint.COLL, "collection2")
|
||||
|
@ -668,7 +676,7 @@ public class TestPolicy extends SolrTestCaseJ4 {
|
|||
"node4:{cores:8, freedisk: 375, heapUsage:16900, nodeRole:overseer, rack: rack1}" +
|
||||
"}");
|
||||
Policy policy = new Policy(policies);
|
||||
Policy.Suggester suggester = policy.createSession(getClusterDataProvider(nodeValues, clusterState))
|
||||
Policy.Suggester suggester = policy.createSession(getSolrCloudManager(nodeValues, clusterState))
|
||||
.getSuggester(ADDREPLICA)
|
||||
.hint(Hint.REPLICATYPE, Replica.Type.PULL)
|
||||
.hint(Hint.COLL_SHARD, new Pair<>("newColl", "shard1"))
|
||||
|
@ -796,7 +804,7 @@ public class TestPolicy extends SolrTestCaseJ4 {
|
|||
|
||||
Policy policy = new Policy((Map<String, Object>) Utils.fromJSONString(rules));
|
||||
Policy.Session session;
|
||||
session = policy.createSession(getClusterDataProvider(nodeValues, clusterState));
|
||||
session = policy.createSession(getSolrCloudManager(nodeValues, clusterState));
|
||||
|
||||
List<Row> l = session.getSorted();
|
||||
assertEquals("node1", l.get(0).node);
|
||||
|
@ -823,7 +831,7 @@ public class TestPolicy extends SolrTestCaseJ4 {
|
|||
"node5:{cores:0, freedisk: 895, heapUsage:17834}," +
|
||||
"node4:{cores:8, freedisk: 375, heapUsage:16900, nodeRole:overseer}" +
|
||||
"}");
|
||||
session = policy.createSession(getClusterDataProvider(nodeValues, clusterState));
|
||||
session = policy.createSession(getSolrCloudManager(nodeValues, clusterState));
|
||||
SolrRequest opReq = session.getSuggester(MOVEREPLICA)
|
||||
.hint(Hint.TARGET_NODE, "node5")
|
||||
.getOperation();
|
||||
|
@ -856,7 +864,7 @@ public class TestPolicy extends SolrTestCaseJ4 {
|
|||
" 'freedisk':884.7097854614258}," +
|
||||
"}";
|
||||
|
||||
ClusterDataProvider provider = getClusterDataProvider((Map<String, Map>) Utils.fromJSONString(nodeValues), clusterState);
|
||||
SolrCloudManager provider = getSolrCloudManager((Map<String, Map>) Utils.fromJSONString(nodeValues), clusterState);
|
||||
Map policies = (Map) Utils.fromJSONString("{" +
|
||||
" 'cluster-preferences': [" +
|
||||
" { 'maximize': 'freedisk', 'precision': 50}," +
|
||||
|
@ -924,8 +932,8 @@ public class TestPolicy extends SolrTestCaseJ4 {
|
|||
"node4:{cores:8, freedisk: 375, heapUsage:16900, nodeRole:overseer, rack: rack1}" +
|
||||
"}");
|
||||
Policy policy = new Policy((Map<String, Object>) Utils.fromJSONString(autoscaleJson));
|
||||
ClusterDataProvider clusterDataProvider = getClusterDataProvider(nodeValues, clusterState);
|
||||
Policy.Session session = policy.createSession(clusterDataProvider);
|
||||
SolrCloudManager cloudManager = getSolrCloudManager(nodeValues, clusterState);
|
||||
Policy.Session session = policy.createSession(cloudManager);
|
||||
for (int i = 0; i < 3; i++) {
|
||||
Policy.Suggester suggester = session.getSuggester(ADDREPLICA);
|
||||
SolrRequest op = suggester
|
||||
|
@ -956,29 +964,8 @@ public class TestPolicy extends SolrTestCaseJ4 {
|
|||
"}");
|
||||
|
||||
Policy policy = new Policy((Map<String, Object>) Utils.fromJSONString(autoscaleJson));
|
||||
ClusterDataProvider clusterDataProvider = getClusterDataProvider(nodeValues, clusterState);
|
||||
ClusterDataProvider cdp = new ClusterDataProvider() {
|
||||
@Override
|
||||
public Map<String, Object> getNodeValues(String node, Collection<String> tags) {
|
||||
return clusterDataProvider.getNodeValues(node, tags);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Map<String, List<ReplicaInfo>>> getReplicaInfo(String node, Collection<String> keys) {
|
||||
return clusterDataProvider.getReplicaInfo(node, keys);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<String> getNodes() {
|
||||
return clusterDataProvider.getNodes();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getPolicyNameByCollection(String coll) {
|
||||
return null;
|
||||
}
|
||||
};
|
||||
Policy.Session session = policy.createSession(cdp);
|
||||
SolrCloudManager cloudManager = getSolrCloudManager(nodeValues, clusterState);
|
||||
Policy.Session session = policy.createSession(cloudManager);
|
||||
Policy.Suggester suggester = session.getSuggester(ADDREPLICA);
|
||||
SolrRequest op = suggester
|
||||
.hint(Hint.COLL_SHARD, new Pair<>("newColl", "shard1"))
|
||||
|
@ -1019,8 +1006,8 @@ public class TestPolicy extends SolrTestCaseJ4 {
|
|||
" {'core_node2':{}}]}}}");
|
||||
Map m = (Map) Utils.getObjectByPath(replicaInfoMap, false, "127.0.0.1:60089_solr/compute_plan_action_test");
|
||||
m.put("shard1", Arrays.asList(
|
||||
new ReplicaInfo("core_node1", "compute_plan_action_test", "shard1", Replica.Type.NRT, Collections.emptyMap()),
|
||||
new ReplicaInfo("core_node2", "compute_plan_action_test", "shard1", Replica.Type.NRT, Collections.emptyMap())
|
||||
new ReplicaInfo("core_node1", "compute_plan_action_test", "shard1", Collections.singletonMap(ZkStateReader.REPLICA_TYPE, Replica.Type.NRT)),
|
||||
new ReplicaInfo("core_node2", "compute_plan_action_test", "shard1", Collections.singletonMap(ZkStateReader.REPLICA_TYPE, Replica.Type.NRT))
|
||||
));
|
||||
|
||||
Map<String, Map<String, Object>> tagsMap = (Map) Utils.fromJSONString("{" +
|
||||
|
@ -1032,28 +1019,33 @@ public class TestPolicy extends SolrTestCaseJ4 {
|
|||
" 'freedisk':918005641216}}}");
|
||||
|
||||
Policy policy = new Policy((Map<String, Object>) Utils.fromJSONString(autoscaleJson));
|
||||
Policy.Session session = policy.createSession(new ClusterDataProvider() {
|
||||
Policy.Session session = policy.createSession(new DelegatingCloudManager(null) {
|
||||
@Override
|
||||
public Map<String, Object> getNodeValues(String node, Collection<String> tags) {
|
||||
return tagsMap.get(node);
|
||||
public ClusterStateProvider getClusterStateProvider() {
|
||||
return new DelegatingClusterStateProvider(null) {
|
||||
@Override
|
||||
public Set<String> getLiveNodes() {
|
||||
return replicaInfoMap.keySet();
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Map<String, List<ReplicaInfo>>> getReplicaInfo(String node, Collection<String> keys) {
|
||||
return (Map<String, Map<String, List<ReplicaInfo>>>) replicaInfoMap.get(node);
|
||||
}
|
||||
public NodeStateProvider getNodeStateProvider() {
|
||||
return new DelegatingNodeStateProvider(null) {
|
||||
@Override
|
||||
public Map<String, Object> getNodeValues(String node, Collection<String> tags) {
|
||||
return tagsMap.get(node);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<String> getNodes() {
|
||||
return replicaInfoMap.keySet();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getPolicyNameByCollection(String coll) {
|
||||
return null;
|
||||
@Override
|
||||
public Map<String, Map<String, List<ReplicaInfo>>> getReplicaInfo(String node, Collection<String> keys) {
|
||||
return (Map<String, Map<String, List<ReplicaInfo>>>) replicaInfoMap.get(node);
|
||||
}
|
||||
};
|
||||
}
|
||||
});
|
||||
|
||||
Policy.Suggester suggester = session.getSuggester(MOVEREPLICA)
|
||||
.hint(Hint.TARGET_NODE, "127.0.0.1:60099_solr");
|
||||
SolrRequest op = suggester.getOperation();
|
||||
|
@ -1089,27 +1081,39 @@ public class TestPolicy extends SolrTestCaseJ4 {
|
|||
"node4:{cores:8, freedisk: 375, heapUsage:16900, nodeRole:overseer, sysprop.rack: rack1}" +
|
||||
"}");
|
||||
Policy policy = new Policy((Map<String, Object>) Utils.fromJSONString(rules));
|
||||
ClusterDataProvider clusterDataProvider = getClusterDataProvider(nodeValues, clusterState);
|
||||
ClusterDataProvider cdp = new ClusterDataProvider() {
|
||||
SolrCloudManager cloudManager = getSolrCloudManager(nodeValues, clusterState);
|
||||
SolrCloudManager cdp = new DelegatingCloudManager(null) {
|
||||
@Override
|
||||
public Map<String, Object> getNodeValues(String node, Collection<String> tags) {
|
||||
return clusterDataProvider.getNodeValues(node, tags);
|
||||
public NodeStateProvider getNodeStateProvider() {
|
||||
return new DelegatingNodeStateProvider(null) {
|
||||
@Override
|
||||
public Map<String, Object> getNodeValues(String node, Collection<String> tags) {
|
||||
return cloudManager.getNodeStateProvider().getNodeValues(node, tags);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Map<String, List<ReplicaInfo>>> getReplicaInfo(String node, Collection<String> keys) {
|
||||
return cloudManager.getNodeStateProvider().getReplicaInfo(node, keys);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Map<String, List<ReplicaInfo>>> getReplicaInfo(String node, Collection<String> keys) {
|
||||
return clusterDataProvider.getReplicaInfo(node, keys);
|
||||
public ClusterStateProvider getClusterStateProvider() {
|
||||
return new DelegatingClusterStateProvider(null) {
|
||||
@Override
|
||||
public Set<String> getLiveNodes() {
|
||||
return cloudManager.getClusterStateProvider().getLiveNodes();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getPolicyNameByCollection(String coll) {
|
||||
return "p1";
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<String> getNodes() {
|
||||
return clusterDataProvider.getNodes();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getPolicyNameByCollection(String coll) {
|
||||
return "p1";
|
||||
}
|
||||
};
|
||||
Policy.Session session = policy.createSession(cdp);
|
||||
|
||||
|
@ -1120,32 +1124,58 @@ public class TestPolicy extends SolrTestCaseJ4 {
|
|||
assertEquals("node2", op.getNode());
|
||||
}
|
||||
|
||||
private ClusterDataProvider getClusterDataProvider(final Map<String, Map> nodeValues, String clusterState) {
|
||||
return new ClusterDataProvider() {
|
||||
private SolrCloudManager getSolrCloudManager(final Map<String, Map> nodeValues, String clusterState) {
|
||||
return new SolrCloudManager() {
|
||||
@Override
|
||||
public Map<String, Object> getNodeValues(String node, Collection<String> tags) {
|
||||
Map<String, Object> result = new LinkedHashMap<>();
|
||||
tags.stream().forEach(s -> result.put(s, nodeValues.get(node).get(s)));
|
||||
return result;
|
||||
public ClusterStateProvider getClusterStateProvider() {
|
||||
return new DelegatingClusterStateProvider(null) {
|
||||
@Override
|
||||
public Set<String> getLiveNodes() {
|
||||
return nodeValues.keySet();
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<String> getNodes() {
|
||||
return nodeValues.keySet();
|
||||
public NodeStateProvider getNodeStateProvider() {
|
||||
return new DelegatingNodeStateProvider(null) {
|
||||
@Override
|
||||
public Map<String, Object> getNodeValues(String node, Collection<String> tags) {
|
||||
Map<String, Object> result = new LinkedHashMap<>();
|
||||
tags.stream().forEach(s -> result.put(s, nodeValues.get(node).get(s)));
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Map<String, List<ReplicaInfo>>> getReplicaInfo(String node, Collection<String> keys) {
|
||||
return getReplicaDetails(node, clusterState);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getPolicyNameByCollection(String coll) {
|
||||
public DistribStateManager getDistribStateManager() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Map<String, List<ReplicaInfo>>> getReplicaInfo(String node, Collection<String> keys) {
|
||||
return getReplicaDetails(node, clusterState);
|
||||
public DistributedQueueFactory getDistributedQueueFactory() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SolrResponse request(SolrRequest req) throws IOException {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] httpRequest(String url, SolrRequest.METHOD method, Map<String, String> headers, String payload, int timeout, boolean followRedirects) throws IOException {
|
||||
return new byte[0];
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
public void testEmptyClusterState(){
|
||||
String autoScaleJson = " {'policies':{'c1':[{" +
|
||||
" 'replica':1," +
|
||||
|
@ -1158,27 +1188,32 @@ public class TestPolicy extends SolrTestCaseJ4 {
|
|||
" '127.0.0.1:50096_solr':{" +
|
||||
" 'cores':0," +
|
||||
" 'port':'50096'}}");
|
||||
ClusterDataProvider dataProvider = new ClusterDataProvider() {
|
||||
SolrCloudManager dataProvider = new DelegatingCloudManager(null) {
|
||||
@Override
|
||||
public Map<String, Object> getNodeValues(String node, Collection<String> keys) {
|
||||
Map<String, Object> result = new LinkedHashMap<>();
|
||||
keys.stream().forEach(s -> result.put(s, nodeValues.get(node).get(s)));
|
||||
return result;
|
||||
public ClusterStateProvider getClusterStateProvider() {
|
||||
return new DelegatingClusterStateProvider(null) {
|
||||
@Override
|
||||
public Set<String> getLiveNodes() {
|
||||
return new HashSet<>(Arrays.asList( "127.0.0.1:50097_solr", "127.0.0.1:50096_solr"));
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Map<String, List<ReplicaInfo>>> getReplicaInfo(String node, Collection<String> keys) {
|
||||
return getReplicaDetails(node, clusterState);
|
||||
}
|
||||
public NodeStateProvider getNodeStateProvider() {
|
||||
return new DelegatingNodeStateProvider(null) {
|
||||
@Override
|
||||
public Map<String, Object> getNodeValues(String node, Collection<String> keys) {
|
||||
Map<String, Object> result = new LinkedHashMap<>();
|
||||
keys.stream().forEach(s -> result.put(s, nodeValues.get(node).get(s)));
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getPolicyNameByCollection(String coll) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<String> getNodes() {
|
||||
return Arrays.asList( "127.0.0.1:50097_solr", "127.0.0.1:50096_solr");
|
||||
@Override
|
||||
public Map<String, Map<String, List<ReplicaInfo>>> getReplicaInfo(String node, Collection<String> keys) {
|
||||
return getReplicaDetails(node, clusterState);
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
List<ReplicaPosition> locations = PolicyHelper.getReplicaLocations(
|
||||
|
@ -1215,32 +1250,37 @@ public class TestPolicy extends SolrTestCaseJ4 {
|
|||
"node4:{cores:0, freedisk: 900, heap:16900, nodeRole:overseer, sysprop.rack:rack2}" +
|
||||
"}");
|
||||
|
||||
ClusterDataProvider dataProvider = new ClusterDataProvider() {
|
||||
SolrCloudManager cloudManager = new DelegatingCloudManager(null) {
|
||||
@Override
|
||||
public Map<String, Object> getNodeValues(String node, Collection<String> keys) {
|
||||
Map<String, Object> result = new LinkedHashMap<>();
|
||||
keys.stream().forEach(s -> result.put(s, nodeValues.get(node).get(s)));
|
||||
return result;
|
||||
public NodeStateProvider getNodeStateProvider() {
|
||||
return new DelegatingNodeStateProvider(null) {
|
||||
@Override
|
||||
public Map<String, Object> getNodeValues(String node, Collection<String> keys) {
|
||||
Map<String, Object> result = new LinkedHashMap<>();
|
||||
keys.stream().forEach(s -> result.put(s, nodeValues.get(node).get(s)));
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Map<String, List<ReplicaInfo>>> getReplicaInfo(String node, Collection<String> keys) {
|
||||
return getReplicaDetails(node, clusterState);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Map<String, List<ReplicaInfo>>> getReplicaInfo(String node, Collection<String> keys) {
|
||||
return getReplicaDetails(node, clusterState);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getPolicyNameByCollection(String coll) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<String> getNodes() {
|
||||
return Arrays.asList("node1", "node2", "node3", "node4");
|
||||
public ClusterStateProvider getClusterStateProvider() {
|
||||
return new DelegatingClusterStateProvider(null) {
|
||||
@Override
|
||||
public Set<String> getLiveNodes() {
|
||||
return new HashSet<>(Arrays.asList("node1", "node2", "node3", "node4"));
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
List<ReplicaPosition> locations = PolicyHelper.getReplicaLocations(
|
||||
"newColl", new AutoScalingConfig((Map<String, Object>) Utils.fromJSONString(autoScaleJson)),
|
||||
dataProvider, Collections.singletonMap("newColl", "policy1"), Arrays.asList("shard1", "shard2"), 3,0,0, null);
|
||||
cloudManager, Collections.singletonMap("newColl", "policy1"), Arrays.asList("shard1", "shard2"), 3,0,0, null);
|
||||
assertTrue(locations.stream().allMatch(it -> ImmutableList.of("node2", "node1", "node3").contains(it.node)) );
|
||||
}
|
||||
|
||||
|
@ -1267,7 +1307,7 @@ public class TestPolicy extends SolrTestCaseJ4 {
|
|||
" { 'nodeRole':'overseer','replica':0}]," +
|
||||
" 'cluster-preferences':[{'minimize':'cores'}]}";
|
||||
Policy policy = new Policy((Map<String, Object>) Utils.fromJSONString(autoScalingjson));
|
||||
Policy.Session session = policy.createSession(dataProviderWithData(dataproviderdata));
|
||||
Policy.Session session = policy.createSession(cloudManagerWithData(dataproviderdata));
|
||||
Policy.Suggester suggester = session.getSuggester(MOVEREPLICA).hint(Hint.TARGET_NODE, "10.0.0.6:7574_solr");
|
||||
SolrRequest op = suggester.getOperation();
|
||||
assertNotNull(op);
|
||||
|
@ -1304,7 +1344,7 @@ public class TestPolicy extends SolrTestCaseJ4 {
|
|||
" {replica:'<2', shard:'#EACH',node:'#ANY'}," +
|
||||
" { nodeRole:overseer,replica:0}]}";
|
||||
Policy policy = new Policy((Map<String, Object>) Utils.fromJSONString(autoScalingjson));
|
||||
Policy.Session session = policy.createSession(dataProviderWithData(dataproviderdata));
|
||||
Policy.Session session = policy.createSession(cloudManagerWithData(dataproviderdata));
|
||||
Policy.Suggester suggester = session.getSuggester(CollectionParams.CollectionAction.MOVEREPLICA)
|
||||
.hint(Hint.TARGET_NODE, "127.0.0.1:51147_solr");
|
||||
SolrRequest op = suggester.getOperation();
|
||||
|
|
|
@ -17,8 +17,6 @@
|
|||
|
||||
package org.apache.solr.client.solrj.impl;
|
||||
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.ConnectException;
|
||||
import java.net.SocketException;
|
||||
import java.util.Collections;
|
||||
|
@ -32,6 +30,7 @@ import java.util.function.Function;
|
|||
import com.google.common.collect.ImmutableSet;
|
||||
import org.apache.http.NoHttpResponseException;
|
||||
import org.apache.solr.SolrTestCaseJ4;
|
||||
import org.apache.solr.client.solrj.cloud.autoscaling.DelegatingClusterStateProvider;
|
||||
import org.apache.solr.client.solrj.request.UpdateRequest;
|
||||
import org.apache.solr.common.cloud.ClusterState;
|
||||
import org.apache.solr.common.cloud.DocCollection;
|
||||
|
@ -119,14 +118,14 @@ public class CloudSolrClientCacheTest extends SolrTestCaseJ4 {
|
|||
|
||||
private ClusterStateProvider getStateProvider(Set<String> livenodes,
|
||||
Map<String, ClusterState.CollectionRef> colls) {
|
||||
return new ClusterStateProvider() {
|
||||
return new DelegatingClusterStateProvider(null) {
|
||||
@Override
|
||||
public ClusterState.CollectionRef getState(String collection) {
|
||||
return colls.get(collection);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<String> liveNodes() {
|
||||
public Set<String> getLiveNodes() {
|
||||
return livenodes;
|
||||
}
|
||||
|
||||
|
@ -141,20 +140,7 @@ public class CloudSolrClientCacheTest extends SolrTestCaseJ4 {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void connect() { }
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object getClusterProperty(String propertyName) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object getClusterProperty(String propertyName, String def) {
|
||||
public <T> T getClusterProperty(String propertyName, T def) {
|
||||
return def;
|
||||
}
|
||||
};
|
||||
|
|
Loading…
Reference in New Issue