SOLR-7325: Change Slice state into enum

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1670566 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Shai Erera 2015-04-01 05:42:47 +00:00
parent 5211c25c73
commit 208d0c79eb
24 changed files with 188 additions and 145 deletions

View File

@ -94,6 +94,10 @@ Upgrading from Solr 5.0
contain some LGPL-only code. Until that's resolved by Tika, you can download the
.jar yourself and place it under contrib/extraction/lib.
* SOLR-7325: Slice.getState() now returns a State enum instead of a String. This helps
clarify the states a Slice can be in, as well comparing the state of a Slice.
(Shai Erera)
Detailed Change List
----------------------

View File

@ -39,7 +39,7 @@ public class CloudDescriptor {
/* shardRange and shardState are used once-only during sub shard creation for shard splits
* Use the values from {@link Slice} instead */
volatile String shardRange = null;
volatile String shardState = Slice.ACTIVE;
volatile Slice.State shardState = Slice.State.ACTIVE;
volatile String shardParent = null;
volatile boolean isLeader = false;

View File

@ -175,7 +175,7 @@ public class OverseerAutoReplicaFailoverThread implements Runnable, Closeable {
Collection<Slice> slices = docCollection.getSlices();
for (Slice slice : slices) {
if (slice.getState().equals(Slice.ACTIVE)) {
if (slice.getState() == Slice.State.ACTIVE) {
final Collection<DownReplica> downReplicas = new ArrayList<DownReplica>();
@ -318,7 +318,7 @@ public class OverseerAutoReplicaFailoverThread implements Runnable, Closeable {
Collection<Slice> slices = docCollection.getSlices();
for (Slice slice : slices) {
// only look at active shards
if (slice.getState().equals(Slice.ACTIVE)) {
if (slice.getState() == Slice.State.ACTIVE) {
log.debug("look at slice {} as possible create candidate", slice.getName());
Collection<Replica> replicas = slice.getReplicas();

View File

@ -18,6 +18,7 @@ package org.apache.solr.cloud;
*/
import com.google.common.collect.ImmutableSet;
import org.apache.commons.lang.StringUtils;
import org.apache.solr.client.solrj.SolrResponse;
import org.apache.solr.client.solrj.SolrServerException;
@ -43,6 +44,7 @@ import org.apache.solr.common.cloud.PlainIdRouter;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.RoutingRule;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.Slice.State;
import org.apache.solr.common.cloud.SolrZkClient;
import org.apache.solr.common.cloud.ZkConfigManager;
import org.apache.solr.common.cloud.ZkCoreNodeProps;
@ -1072,7 +1074,7 @@ public class OverseerCollectionProcessor implements Runnable, Closeable {
log.warn("Exception trying to unload core " + sreq, e);
}
collectShardResponses(!Slice.ACTIVE.equals(replica.getStr(Slice.STATE)) ? new NamedList() : results,
collectShardResponses(!ZkStateReader.ACTIVE.equals(replica.getStr(ZkStateReader.STATE_PROP)) ? new NamedList() : results,
false, null, shardHandler);
if (waitForCoreNodeGone(collectionName, shard, replicaName, 5000))
@ -1495,9 +1497,10 @@ public class OverseerCollectionProcessor implements Runnable, Closeable {
Slice oSlice = clusterState.getSlice(collectionName, subSlice);
if (oSlice != null) {
if (Slice.ACTIVE.equals(oSlice.getState())) {
final Slice.State state = oSlice.getState();
if (state == Slice.State.ACTIVE) {
throw new SolrException(ErrorCode.BAD_REQUEST, "Sub-shard: " + subSlice + " exists in active state. Aborting split shard.");
} else if (Slice.CONSTRUCTION.equals(oSlice.getState()) || Slice.RECOVERY.equals(oSlice.getState())) {
} else if (state == Slice.State.CONSTRUCTION || state == Slice.State.RECOVERY) {
// delete the shards
for (String sub : subSlices) {
log.info("Sub-shard: {} already exists therefore requesting its deletion", sub);
@ -1538,7 +1541,7 @@ public class OverseerCollectionProcessor implements Runnable, Closeable {
propMap.put(ZkStateReader.SHARD_ID_PROP, subSlice);
propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
propMap.put(ZkStateReader.SHARD_RANGE_PROP, subRange.toString());
propMap.put(ZkStateReader.SHARD_STATE_PROP, Slice.CONSTRUCTION);
propMap.put(ZkStateReader.SHARD_STATE_PROP, Slice.State.CONSTRUCTION.toString());
propMap.put(ZkStateReader.SHARD_PARENT_PROP, parentSlice.getName());
DistributedQueue inQueue = Overseer.getInQueue(zkStateReader.getZkClient());
inQueue.offer(ZkStateReader.toJSON(new ZkNodeProps(propMap)));
@ -1733,9 +1736,9 @@ public class OverseerCollectionProcessor implements Runnable, Closeable {
DistributedQueue inQueue = Overseer.getInQueue(zkStateReader.getZkClient());
Map<String, Object> propMap = new HashMap<>();
propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
propMap.put(slice, Slice.INACTIVE);
propMap.put(slice, Slice.State.INACTIVE.toString());
for (String subSlice : subSlices) {
propMap.put(subSlice, Slice.ACTIVE);
propMap.put(subSlice, Slice.State.ACTIVE.toString());
}
propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
ZkNodeProps m = new ZkNodeProps(propMap);
@ -1746,7 +1749,7 @@ public class OverseerCollectionProcessor implements Runnable, Closeable {
Map<String, Object> propMap = new HashMap<>();
propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
for (String subSlice : subSlices) {
propMap.put(subSlice, Slice.RECOVERY);
propMap.put(subSlice, Slice.State.RECOVERY.toString());
}
propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
ZkNodeProps m = new ZkNodeProps(propMap);
@ -1887,11 +1890,12 @@ public class OverseerCollectionProcessor implements Runnable, Closeable {
}
// For now, only allow for deletions of Inactive slices or custom hashes (range==null).
// TODO: Add check for range gaps on Slice deletion
if (!(slice.getRange() == null || slice.getState().equals(Slice.INACTIVE)
|| slice.getState().equals(Slice.RECOVERY) || slice.getState().equals(Slice.CONSTRUCTION))) {
final Slice.State state = slice.getState();
if (!(slice.getRange() == null || state == Slice.State.INACTIVE
|| state == Slice.State.RECOVERY || state == Slice.State.CONSTRUCTION)) {
throw new SolrException(ErrorCode.BAD_REQUEST,
"The slice: " + slice.getName() + " is currently "
+ slice.getState() + ". Only non-active (or custom-hashed) slices can be deleted.");
+ state + ". Only non-active (or custom-hashed) slices can be deleted.");
}
ShardHandler shardHandler = shardHandlerFactory.getShardHandler();

View File

@ -17,6 +17,14 @@ package org.apache.solr.cloud;
* limitations under the License.
*/
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.store.Directory;
@ -57,14 +65,6 @@ import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
public class RecoveryStrategy extends Thread implements ClosableThread {
private static final int WAIT_FOR_UPDATES_WITH_STALE_STATE_PAUSE = Integer.getInteger("solr.cloud.wait-for-updates-with-stale-state-pause", 7000);
private static final int MAX_RETRIES = 500;
@ -580,7 +580,8 @@ public class RecoveryStrategy extends Thread implements ClosableThread {
prepCmd.setState(ZkStateReader.RECOVERING);
prepCmd.setCheckLive(true);
prepCmd.setOnlyIfLeader(true);
if (!Slice.CONSTRUCTION.equals(slice.getState()) && !Slice.RECOVERY.equals(slice.getState())) {
final Slice.State state = slice.getState();
if (state != Slice.State.CONSTRUCTION && state != Slice.State.RECOVERY) {
prepCmd.setOnlyIfLeaderActive(true);
}
HttpUriRequestResponse mrr = client.httpUriRequest(prepCmd);

View File

@ -17,6 +17,33 @@ package org.apache.solr.cloud;
* limitations under the License.
*/
import static org.apache.solr.common.cloud.ZkStateReader.*;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.InetAddress;
import java.net.NetworkInterface;
import java.net.URLEncoder;
import java.net.UnknownHostException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.apache.commons.lang.StringUtils;
import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.request.CoreAdminRequest.WaitForState;
@ -69,40 +96,6 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.MDC;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.InetAddress;
import java.net.NetworkInterface;
import java.net.URLEncoder;
import java.net.UnknownHostException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import static org.apache.solr.common.cloud.ZkStateReader.BASE_URL_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.ELECTION_NODE_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.NODE_NAME_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.REJOIN_AT_HEAD_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
/**
* Handle ZooKeeper interactions.
* <p>
@ -895,9 +888,8 @@ public final class ZkController {
if (!core.isReloaded() && ulog != null) {
// disable recovery in case shard is in construction state (for shard splits)
Slice slice = getClusterState().getSlice(collection, shardId);
if (!Slice.CONSTRUCTION.equals(slice.getState()) || !isLeader) {
Future<UpdateLog.RecoveryInfo> recoveryFuture = core.getUpdateHandler()
.getUpdateLog().recoverFromLog();
if (slice.getState() != Slice.State.CONSTRUCTION || !isLeader) {
Future<UpdateLog.RecoveryInfo> recoveryFuture = core.getUpdateHandler().getUpdateLog().recoverFromLog();
if (recoveryFuture != null) {
log.info("Replaying tlog for " + ourUrl + " during startup... NOTE: This can take a while.");
recoveryFuture.get(); // NOTE: this could potentially block for

View File

@ -53,7 +53,7 @@ public class CollectionMutator {
String shardState = message.getStr(ZkStateReader.SHARD_STATE_PROP);
String shardParent = message.getStr(ZkStateReader.SHARD_PARENT_PROP);
sliceProps.put(Slice.RANGE, shardRange);
sliceProps.put(Slice.STATE, shardState);
sliceProps.put(ZkStateReader.STATE_PROP, shardState);
if (shardParent != null) {
sliceProps.put(Slice.PARENT, shardParent);
}

View File

@ -17,6 +17,9 @@ package org.apache.solr.cloud.overseer;
* limitations under the License.
*/
import static org.apache.solr.cloud.OverseerCollectionProcessor.*;
import static org.apache.solr.cloud.overseer.CollectionMutator.*;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedHashMap;
@ -39,10 +42,6 @@ import org.apache.solr.common.cloud.ZkStateReader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.solr.cloud.OverseerCollectionProcessor.COLL_PROP_PREFIX;
import static org.apache.solr.cloud.overseer.CollectionMutator.checkCollectionKeyExistence;
import static org.apache.solr.cloud.overseer.CollectionMutator.checkKeyExistence;
public class ReplicaMutator {
private static Logger log = LoggerFactory.getLogger(ReplicaMutator.class);
@ -322,7 +321,7 @@ public class ReplicaMutator {
replicas = new HashMap<>(1);
sliceProps = new HashMap<>();
sliceProps.put(Slice.RANGE, shardRange);
sliceProps.put(Slice.STATE, shardState);
sliceProps.put(ZkStateReader.STATE_PROP, shardState);
sliceProps.put(Slice.PARENT, shardParent);
}
@ -357,8 +356,7 @@ public class ReplicaMutator {
private DocCollection checkAndCompleteShardSplit(ClusterState prevState, DocCollection collection, String coreNodeName, String sliceName, Map<String, Object> replicaProps) {
Slice slice = collection.getSlice(sliceName);
Map<String, Object> sliceProps = slice.getProperties();
String sliceState = slice.getState();
if (Slice.RECOVERY.equals(sliceState)) {
if (slice.getState() == Slice.State.RECOVERY) {
log.info("Shard: {} is in recovery state", sliceName);
// is this replica active?
if (ZkStateReader.ACTIVE.equals(replicaProps.get(ZkStateReader.STATE_PROP))) {
@ -367,7 +365,7 @@ public class ReplicaMutator {
boolean allActive = true;
for (Map.Entry<String, Replica> entry : slice.getReplicasMap().entrySet()) {
if (coreNodeName.equals(entry.getKey())) continue;
if (!Slice.ACTIVE.equals(entry.getValue().getStr(Slice.STATE))) {
if (!ZkStateReader.ACTIVE.equals(entry.getValue().getStr(ZkStateReader.STATE_PROP))) {
allActive = false;
break;
}
@ -382,7 +380,7 @@ public class ReplicaMutator {
if (sliceName.equals(entry.getKey()))
continue;
Slice otherSlice = entry.getValue();
if (Slice.RECOVERY.equals(otherSlice.getState())) {
if (otherSlice.getState() == Slice.State.RECOVERY) {
if (slice.getParent() != null && slice.getParent().equals(otherSlice.getParent())) {
log.info("Shard: {} - Fellow sub-shard: {} found", sliceName, otherSlice.getName());
// this is a fellow sub shard so check if all replicas are active
@ -404,10 +402,10 @@ public class ReplicaMutator {
Map<String, Object> propMap = new HashMap<>();
propMap.put(Overseer.QUEUE_OPERATION, "updateshardstate");
propMap.put(parentSliceName, Slice.INACTIVE);
propMap.put(sliceName, Slice.ACTIVE);
propMap.put(parentSliceName, Slice.State.INACTIVE.toString());
propMap.put(sliceName, Slice.State.ACTIVE.toString());
for (Slice subShardSlice : subShardSlices) {
propMap.put(subShardSlice.getName(), Slice.ACTIVE);
propMap.put(subShardSlice.getName(), Slice.State.ACTIVE.toString());
}
propMap.put(ZkStateReader.COLLECTION_PROP, collection.getName());
ZkNodeProps m = new ZkNodeProps(propMap);

View File

@ -23,6 +23,7 @@ import java.util.Map;
import java.util.Set;
import com.google.common.collect.ImmutableSet;
import org.apache.solr.cloud.Assign;
import org.apache.solr.cloud.Overseer;
import org.apache.solr.common.cloud.ClusterState;
@ -187,10 +188,12 @@ public class SliceMutator {
}
log.info("Update shard state " + key + " to " + message.getStr(key));
Map<String, Object> props = slice.shallowCopy();
if (Slice.RECOVERY.equals(props.get(Slice.STATE)) && Slice.ACTIVE.equals(message.getStr(key))) {
if (Slice.State.getState((String) props.get(ZkStateReader.STATE_PROP)) == Slice.State.RECOVERY
&& Slice.State.getState(message.getStr(key)) == Slice.State.ACTIVE) {
props.remove(Slice.PARENT);
}
props.put(Slice.STATE, message.getStr(key));
props.put(ZkStateReader.STATE_PROP, message.getStr(key));
Slice newSlice = new Slice(slice.getName(), slice.getReplicasCopy(), props);
slicesCopy.put(slice.getName(), newSlice);
}

View File

@ -913,9 +913,9 @@ public final class SolrCore implements SolrInfoMBean, Closeable {
// ZK pre-Register would have already happened so we read slice properties now
ClusterState clusterState = cc.getZkController().getClusterState();
Slice slice = clusterState.getSlice(cd.getCloudDescriptor().getCollectionName(),
Slice slice = clusterState.getSlice(cd.getCloudDescriptor().getCollectionName(),
cd.getCloudDescriptor().getShardId());
if (Slice.CONSTRUCTION.equals(slice.getState())) {
if (slice.getState() == Slice.State.CONSTRUCTION) {
// set update log to buffer before publishing the core
getUpdateHandler().getUpdateLog().bufferUpdates();
}

View File

@ -46,6 +46,7 @@ import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.cloud.OnReconnect;
import org.apache.solr.common.cloud.SolrZkClient;
import org.apache.solr.common.cloud.ZkNodeProps;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.params.SolrParams;
import org.apache.solr.core.CoreContainer;
import org.apache.solr.util.FastWriter;
@ -166,12 +167,12 @@ public final class ZookeeperInfoServlet extends BaseSolrServlet {
// state can lie to you if the node is offline, so need to reconcile with live_nodes too
if (!liveNodes.contains(nodeName))
coreState = "down"; // not on a live node, so must be down
coreState = ZkStateReader.DOWN; // not on a live node, so must be down
if ("active".equals(coreState)) {
if (ZkStateReader.ACTIVE.equals(coreState)) {
hasActive = true; // assumed no replicas active and found one that is for this shard
} else {
if ("recovering".equals(coreState)) {
if (ZkStateReader.RECOVERING.equals(coreState)) {
replicaInRecovery = true;
}
isHealthy = false; // assumed healthy and found one replica that is not
@ -188,7 +189,7 @@ public final class ZookeeperInfoServlet extends BaseSolrServlet {
return !hasDownedShard && !isHealthy; // means no shards offline but not 100% healthy either
} else if ("downed_shard".equals(filter)) {
return hasDownedShard;
} else if ("recovering".equals(filter)) {
} else if (ZkStateReader.RECOVERING.equals(filter)) {
return !isHealthy && replicaInRecovery;
}

View File

@ -37,6 +37,7 @@ import org.apache.solr.common.cloud.DocRouter;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.RoutingRule;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.Slice.State;
import org.apache.solr.common.cloud.SolrZkClient;
import org.apache.solr.common.cloud.ZkCoreNodeProps;
import org.apache.solr.common.cloud.ZkNodeProps;
@ -436,19 +437,18 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
private boolean couldIbeSubShardLeader(DocCollection coll) {
// Could I be the leader of a shard in "construction/recovery" state?
String myShardId = req.getCore().getCoreDescriptor().getCloudDescriptor()
.getShardId();
String myShardId = req.getCore().getCoreDescriptor().getCloudDescriptor().getShardId();
Slice mySlice = coll.getSlice(myShardId);
String state = mySlice.getState();
return (Slice.CONSTRUCTION.equals(state) || Slice.RECOVERY.equals(state));
State state = mySlice.getState();
return state == Slice.State.CONSTRUCTION || state == Slice.State.RECOVERY;
}
private boolean amISubShardLeader(DocCollection coll, Slice parentSlice, String id, SolrInputDocument doc) throws InterruptedException {
// Am I the leader of a shard in "construction/recovery" state?
String myShardId = req.getCore().getCoreDescriptor().getCloudDescriptor().getShardId();
Slice mySlice = coll.getSlice(myShardId);
String state = mySlice.getState();
if (Slice.CONSTRUCTION.equals(state) || Slice.RECOVERY.equals(state)) {
final State state = mySlice.getState();
if (state == Slice.State.CONSTRUCTION || state == Slice.State.RECOVERY) {
Replica myLeader = zkController.getZkStateReader().getLeaderRetry(collection, myShardId);
boolean amILeader = myLeader.getName().equals(
req.getCore().getCoreDescriptor().getCloudDescriptor()
@ -473,7 +473,8 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
Collection<Slice> allSlices = coll.getSlices();
List<Node> nodes = null;
for (Slice aslice : allSlices) {
if (Slice.CONSTRUCTION.equals(aslice.getState()) || Slice.RECOVERY.equals(aslice.getState())) {
final Slice.State state = aslice.getState();
if (state == Slice.State.CONSTRUCTION || state == Slice.State.RECOVERY) {
DocRouter.Range myRange = coll.getSlice(shardId).getRange();
if (myRange == null) myRange = new DocRouter.Range(Integer.MIN_VALUE, Integer.MAX_VALUE);
boolean isSubset = aslice.getRange() != null && aslice.getRange().isSubsetOf(myRange);
@ -588,7 +589,7 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
if (DistribPhase.FROMLEADER == phase && localIsLeader && from != null) { // from will be null on log replay
String fromShard = req.getParams().get(DISTRIB_FROM_PARENT);
if (fromShard != null) {
if (Slice.ACTIVE.equals(mySlice.getState())) {
if (mySlice.getState() == Slice.State.ACTIVE) {
throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE,
"Request says it is coming from parent shard leader but we are in active state");
}

View File

@ -864,7 +864,7 @@ public class SolrCLI {
if (replicaHealth.isLeader)
hasLeader = true;
if (!"active".equals(replicaHealth.status)) {
if (!ZkStateReader.ACTIVE.equals(replicaHealth.status)) {
healthy = false;
} else {
atLeastOneActive = true;

View File

@ -282,7 +282,7 @@ public class CustomCollectionTest extends AbstractFullDistribZkTestBase {
attempts++;
int activeReplicaCount = 0;
for (Replica x : zkStateReader.getClusterState().getCollection(collectionName).getSlice("x").getReplicas()) {
if("active".equals(x.getStr("state"))) activeReplicaCount++;
if(ZkStateReader.ACTIVE.equals(x.getStr(ZkStateReader.STATE_PROP))) activeReplicaCount++;
}
Thread.sleep(500);
if(activeReplicaCount >= replicationFactor) break;

View File

@ -17,6 +17,14 @@ package org.apache.solr.cloud;
* limitations under the License.
*/
import static org.apache.solr.cloud.CollectionsAPIDistributedZkTest.*;
import static org.apache.solr.common.cloud.ZkNodeProps.*;
import java.net.URL;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.client.solrj.impl.CloudSolrClient;
@ -31,16 +39,6 @@ import org.apache.solr.common.params.MapSolrParams;
import org.apache.solr.common.util.NamedList;
import org.junit.Test;
import java.net.URL;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.apache.solr.cloud.CollectionsAPIDistributedZkTest.setClusterProp;
import static org.apache.solr.cloud.OverseerCollectionProcessor.NUM_SLICES;
import static org.apache.solr.common.cloud.ZkNodeProps.makeMap;
import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
public class DeleteInactiveReplicaTest extends AbstractFullDistribZkTestBase{
@Test
@ -109,8 +107,8 @@ public class DeleteInactiveReplicaTest extends AbstractFullDistribZkTestBase{
while (System.currentTimeMillis() < endAt) {
testcoll = client.getZkStateReader()
.getClusterState().getCollection(collectionName);
if (!"active".equals(testcoll.getSlice(shard1.getName())
.getReplica(replica1.getName()).getStr(Slice.STATE))) {
if (!ZkStateReader.ACTIVE.equals(testcoll.getSlice(shard1.getName())
.getReplica(replica1.getName()).getStr(ZkStateReader.STATE_PROP))) {
success = true;
}
if (success) break;

View File

@ -92,10 +92,10 @@ public class DeleteReplicaTest extends AbstractFullDistribZkTestBase {
for (Slice slice : testcoll.getSlices()) {
if(replica1 != null)
break;
if ("active".equals(slice.getStr("state"))) {
if (slice.getState() == Slice.State.ACTIVE) {
shard1 = slice;
for (Replica replica : shard1.getReplicas()) {
if ("active".equals(replica.getStr("state"))) {
if (ZkStateReader.ACTIVE.equals(replica.getStr(ZkStateReader.STATE_PROP))) {
replica1 = replica;
break;
}

View File

@ -25,6 +25,7 @@ import org.apache.solr.cloud.overseer.OverseerAction;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.Slice.State;
import org.apache.solr.common.cloud.ZkNodeProps;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.params.CollectionParams;
@ -69,8 +70,8 @@ public class DeleteShardTest extends AbstractFullDistribZkTestBase {
assertNotNull("Shard1 not found", slice1);
assertNotNull("Shard2 not found", slice2);
assertEquals("Shard1 is not active", Slice.ACTIVE, slice1.getState());
assertEquals("Shard2 is not active", Slice.ACTIVE, slice2.getState());
assertSame("Shard1 is not active", Slice.State.ACTIVE, slice1.getState());
assertSame("Shard2 is not active", Slice.State.ACTIVE, slice2.getState());
try {
deleteShard(SHARD1);
@ -79,19 +80,19 @@ public class DeleteShardTest extends AbstractFullDistribZkTestBase {
// expected
}
setSliceState(SHARD1, Slice.INACTIVE);
setSliceState(SHARD1, Slice.State.INACTIVE);
clusterState = cloudClient.getZkStateReader().getClusterState();
slice1 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1);
assertEquals("Shard1 is not inactive yet.", Slice.INACTIVE, slice1.getState());
assertSame("Shard1 is not inactive yet.", Slice.State.INACTIVE, slice1.getState());
deleteShard(SHARD1);
confirmShardDeletion(SHARD1);
setSliceState(SHARD2, Slice.CONSTRUCTION);
setSliceState(SHARD2, Slice.State.CONSTRUCTION);
deleteShard(SHARD2);
confirmShardDeletion(SHARD2);
}
@ -135,12 +136,12 @@ public class DeleteShardTest extends AbstractFullDistribZkTestBase {
}
}
protected void setSliceState(String slice, String state) throws SolrServerException, IOException,
protected void setSliceState(String slice, State state) throws SolrServerException, IOException,
KeeperException, InterruptedException {
DistributedQueue inQueue = Overseer.getInQueue(cloudClient.getZkStateReader().getZkClient());
Map<String, Object> propMap = new HashMap<>();
propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
propMap.put(slice, state);
propMap.put(slice, state.toString());
propMap.put(ZkStateReader.COLLECTION_PROP, "collection1");
ZkNodeProps m = new ZkNodeProps(propMap);
ZkStateReader zkStateReader = cloudClient.getZkStateReader();
@ -150,8 +151,8 @@ public class DeleteShardTest extends AbstractFullDistribZkTestBase {
for (int counter = 10; counter > 0; counter--) {
zkStateReader.updateClusterState(true);
ClusterState clusterState = zkStateReader.getClusterState();
String sliceState = clusterState.getSlice("collection1", slice).getState();
if (sliceState.equals(state)) {
State sliceState = clusterState.getSlice("collection1", slice).getState();
if (sliceState == state) {
transition = true;
break;
}
@ -164,4 +165,3 @@ public class DeleteShardTest extends AbstractFullDistribZkTestBase {
}
}

View File

@ -22,6 +22,7 @@ import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.ZkCoreNodeProps;
import org.apache.solr.common.cloud.ZkStateReader;
import org.junit.Test;
import java.io.File;
@ -57,6 +58,7 @@ public class LeaderInitiatedRecoveryOnCommitTest extends BasicDistributedZkTest
}
}
@Override
@Test
public void test() throws Exception {
oneShardTest();
@ -91,7 +93,7 @@ public class LeaderInitiatedRecoveryOnCommitTest extends BasicDistributedZkTest
cloudClient.getZkStateReader().updateClusterState(true); // get the latest state
leader = cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, "shard1");
assertEquals("Leader was not active", "active", leader.getStr("state"));
assertEquals("Leader was not active", ZkStateReader.ACTIVE, leader.getStr(ZkStateReader.STATE_PROP));
leaderProxy.reopen();
Thread.sleep(sleepMsBeforeHealPartition);
@ -134,7 +136,7 @@ public class LeaderInitiatedRecoveryOnCommitTest extends BasicDistributedZkTest
cloudClient.getZkStateReader().updateClusterState(true); // get the latest state
leader = cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, "shard1");
assertEquals("Leader was not active", "active", leader.getStr("state"));
assertEquals("Leader was not active", ZkStateReader.ACTIVE, leader.getStr(ZkStateReader.STATE_PROP));
leaderProxy.reopen();
Thread.sleep(sleepMsBeforeHealPartition);

View File

@ -396,8 +396,9 @@ public class ShardSplitTest extends BasicDistributedZkTest {
clusterState = zkStateReader.getClusterState();
slice1_0 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, "shard1_0");
slice1_1 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, "shard1_1");
if (Slice.ACTIVE.equals(slice1_0.getState()) && Slice.ACTIVE.equals(slice1_1.getState()))
if (slice1_0.getState() == Slice.State.ACTIVE && slice1_1.getState() == Slice.State.ACTIVE) {
break;
}
Thread.sleep(500);
}
@ -405,8 +406,8 @@ public class ShardSplitTest extends BasicDistributedZkTest {
assertNotNull("Cluster state does not contain shard1_0", slice1_0);
assertNotNull("Cluster state does not contain shard1_0", slice1_1);
assertEquals("shard1_0 is not active", Slice.ACTIVE, slice1_0.getState());
assertEquals("shard1_1 is not active", Slice.ACTIVE, slice1_1.getState());
assertSame("shard1_0 is not active", Slice.State.ACTIVE, slice1_0.getState());
assertSame("shard1_1 is not active", Slice.State.ACTIVE, slice1_1.getState());
assertEquals("Wrong number of replicas created for shard1_0", numReplicas, slice1_0.getReplicas().size());
assertEquals("Wrong number of replicas created for shard1_1", numReplicas, slice1_1.getReplicas().size());

View File

@ -32,8 +32,9 @@ import java.util.Map;
import java.util.Set;
public class SliceStateTest extends SolrTestCaseJ4 {
@Test
public void testDefaultSliceState() throws Exception {
public void testDefaultSliceState() {
Map<String, DocCollection> collectionStates = new HashMap<>();
Set<String> liveNodes = new HashSet<>();
liveNodes.add("node1");
@ -45,15 +46,14 @@ public class SliceStateTest extends SolrTestCaseJ4 {
Replica replica = new Replica("node1", props);
sliceToProps.put("node1", replica);
Slice slice = new Slice("shard1", sliceToProps, null);
assertEquals("Default state not set to active", Slice.ACTIVE, slice.getState());
assertSame("Default state not set to active", Slice.State.ACTIVE, slice.getState());
slices.put("shard1", slice);
collectionStates.put("collection1", new DocCollection("collection1", slices, null, DocRouter.DEFAULT));
ZkStateReader mockZkStateReader = ClusterStateTest.getMockZkStateReader(collectionStates.keySet());
ClusterState clusterState = new ClusterState(-1,liveNodes, collectionStates);
byte[] bytes = ZkStateReader.toJSON(clusterState);
ClusterState loadedClusterState = ClusterState.load(-1, bytes, liveNodes);
assertEquals("Default state not set to active", "active", loadedClusterState.getSlice("collection1", "shard1").getState());
assertSame("Default state not set to active", Slice.State.ACTIVE, loadedClusterState.getSlice("collection1", "shard1").getState());
}
}

View File

@ -24,6 +24,7 @@ import org.apache.solr.cloud.MockZkStateReader;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.ImplicitDocRouter;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.ZkNodeProps;
public class TestClusterStateMutator extends SolrTestCaseJ4 {
@ -60,8 +61,8 @@ public class TestClusterStateMutator extends SolrTestCaseJ4 {
assertNotNull(collection.getSlicesMap().get("y"));
assertNull(collection.getSlicesMap().get("x").getRange());
assertNull(collection.getSlicesMap().get("y").getRange());
assertEquals("active", collection.getSlicesMap().get("x").getState());
assertEquals("active", collection.getSlicesMap().get("y").getState());
assertSame(Slice.State.ACTIVE, collection.getSlicesMap().get("x").getState());
assertSame(Slice.State.ACTIVE, collection.getSlicesMap().get("y").getState());
assertEquals(4, collection.getMaxShardsPerNode());
assertEquals(ImplicitDocRouter.class, collection.getRouter().getClass());
assertNotNull(state.getCollectionOrNull("xyz")); // we still have the old collection

View File

@ -75,7 +75,7 @@ public class ClusterStateUtil {
Collection<Slice> slices = docCollection.getSlices();
for (Slice slice : slices) {
// only look at active shards
if (slice.getState().equals(Slice.ACTIVE)) {
if (slice.getState() == Slice.State.ACTIVE) {
Collection<Replica> replicas = slice.getReplicas();
for (Replica replica : replicas) {
// on a live node?
@ -135,7 +135,7 @@ public class ClusterStateUtil {
Collection<Slice> slices = docCollection.getSlices();
for (Slice slice : slices) {
// only look at active shards
if (slice.getState().equals(Slice.ACTIVE)) {
if (slice.getState() == Slice.State.ACTIVE) {
Collection<Replica> replicas = slice.getReplicas();
for (Replica replica : replicas) {
// on a live node?
@ -188,7 +188,7 @@ public class ClusterStateUtil {
Collection<Slice> slices = docCollection.getSlices();
for (Slice slice : slices) {
// only look at active shards
if (slice.getState().equals(Slice.ACTIVE)) {
if (slice.getState() == Slice.State.ACTIVE) {
Collection<Replica> replicas = slice.getReplicas();
for (Replica replica : replicas) {
// on a live node?

View File

@ -87,7 +87,7 @@ public class DocCollection extends ZkNodeProps {
while (iter.hasNext()) {
Map.Entry<String, Slice> slice = iter.next();
if (slice.getValue().getState().equals(Slice.ACTIVE))
if (slice.getValue().getState() == Slice.State.ACTIVE)
this.activeSlices.put(slice.getKey(), slice.getValue());
}
this.router = router;

View File

@ -23,28 +23,65 @@ import org.noggit.JSONWriter;
import java.util.Collection;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.Locale;
import java.util.Map;
/**
* A Slice contains immutable information about a logical shard (all replicas that share the same shard id).
*/
public class Slice extends ZkNodeProps {
public static String REPLICAS = "replicas";
public static String RANGE = "range";
public static String STATE = "state";
public static String LEADER = "leader"; // FUTURE: do we want to record the leader as a slice property in the JSON (as opposed to isLeader as a replica property?)
public static String ACTIVE = "active";
public static String INACTIVE = "inactive";
public static String CONSTRUCTION = "construction";
public static String RECOVERY = "recovery";
public static String PARENT = "parent";
/** The slice's state. */
public enum State {
/** The default state of a slice. */
ACTIVE,
/**
* A slice is put in that state after it has been successfully split. See
* <a href="https://cwiki.apache.org/confluence/display/solr/Collections+API#CollectionsAPI-api3">
* the reference guide</a> for more details.
*/
INACTIVE,
/**
* When a shard is split, the new sub-shards are put in that state while the
* split operation is in progress. A shard in that state still receives
* update requests from the parent shard leader, however does not participate
* in distributed search.
*/
CONSTRUCTION,
/**
* Sub-shards of a split shard are put in that state, when they need to
* create replicas in order to meet the collection's replication factor. A
* shard in that state still receives update requests from the parent shard
* leader, however does not participate in distributed search.
*/
RECOVERY;
@Override
public String toString() {
return super.toString().toLowerCase(Locale.ROOT);
}
/** Converts the state string to a State instance. */
public static State getState(String stateStr) {
return State.valueOf(stateStr.toUpperCase(Locale.ROOT));
}
}
public static final String REPLICAS = "replicas";
public static final String RANGE = "range";
public static final String LEADER = "leader"; // FUTURE: do we want to record the leader as a slice property in the JSON (as opposed to isLeader as a replica property?)
public static final String PARENT = "parent";
private final String name;
private final DocRouter.Range range;
private final Integer replicationFactor; // FUTURE: optional per-slice override of the collection replicationFactor
private final Map<String,Replica> replicas;
private final Replica leader;
private final String state;
private final State state;
private final String parent;
private final Map<String, RoutingRule> routingRules;
@ -58,11 +95,11 @@ public class Slice extends ZkNodeProps {
this.name = name;
Object rangeObj = propMap.get(RANGE);
if (propMap.containsKey(STATE) && propMap.get(STATE) != null)
this.state = (String) propMap.get(STATE);
else {
this.state = ACTIVE; //Default to ACTIVE
propMap.put(STATE, this.state);
if (propMap.get(ZkStateReader.STATE_PROP) != null) {
this.state = State.getState((String) propMap.get(ZkStateReader.STATE_PROP));
} else {
this.state = State.ACTIVE; //Default to ACTIVE
propMap.put(ZkStateReader.STATE_PROP, state.toString());
}
DocRouter.Range tmpRange = null;
if (rangeObj instanceof DocRouter.Range) {
@ -172,7 +209,7 @@ public class Slice extends ZkNodeProps {
return range;
}
public String getState() {
public State getState() {
return state;
}