diff --git a/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/CarrotClusteringEngine.java b/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/CarrotClusteringEngine.java
index 52a85eb29b8..84665357f1c 100644
--- a/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/CarrotClusteringEngine.java
+++ b/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/CarrotClusteringEngine.java
@@ -194,7 +194,7 @@ public class CarrotClusteringEngine extends SearchClusteringEngine {
try {
configDir = "configDir=" + new File(resourceLoader.getConfigDir()).getAbsolutePath() + ", ";
} catch (Exception ignored) {
- // If we get the exception, the resource loader implementation
+ // If we get the exception, the resource loader implementation
// probably does not support getConfigDir(). Not a big problem.
}
diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestDIHEndToEnd.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestDIHEndToEnd.java
index 874fbf555f8..f8087c82a68 100644
--- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestDIHEndToEnd.java
+++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestDIHEndToEnd.java
@@ -39,7 +39,7 @@ public class TestDIHEndToEnd extends AbstractDIHJdbcTestCase {
assertQ(req("COUNTRY_NAME:zealand"), "//*[@numFound='2']");
assertQ(req("COUNTRY_NAME:niue"), "//*[@numFound='3']");
- //It would be nice if there was a way to get it to run transformers before putting
+ //It would be nice if there was a way to get it to run transformers before putting
//data in the cache, then id=2 (person=Ethan, country=NU,NA,NE) could join...)
//assertQ(req("COUNTRY_NAME:Netherlands"), "//*[@numFound='3']");
diff --git a/solr/core/src/java/org/apache/solr/SolrLogFormatter.java b/solr/core/src/java/org/apache/solr/SolrLogFormatter.java
index 565d55b41b5..424c60d1fc8 100644
--- a/solr/core/src/java/org/apache/solr/SolrLogFormatter.java
+++ b/solr/core/src/java/org/apache/solr/SolrLogFormatter.java
@@ -19,7 +19,7 @@ package org.apache.solr;
import org.apache.solr.cloud.ZkController;
import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.core.SolrCore;
import org.apache.solr.request.SolrQueryRequest;
@@ -99,7 +99,7 @@ public class SolrLogFormatter extends Formatter {
static int maxCoreNum;
String shortId;
String url;
- Map coreProps;
+ Map coreProps;
}
Map coreInfoMap = new WeakHashMap(); // TODO: use something that survives across a core reload?
@@ -200,7 +200,7 @@ sb.append("(group_name=").append(tg.getName()).append(")");
info.coreProps = getCoreProps(zkController, core);
}
- Map coreProps = getCoreProps(zkController, core);
+ Map coreProps = getCoreProps(zkController, core);
if(!coreProps.equals(info.coreProps)) {
info.coreProps = coreProps;
final String corePropsString = "coll:" + core.getCoreDescriptor().getCloudDescriptor().getCollectionName() + " core:" + core.getName() + " props:" + coreProps;
@@ -261,9 +261,9 @@ sb.append("(group_name=").append(tg.getName()).append(")");
return sb.toString();
}
- private Map getCoreProps(ZkController zkController, SolrCore core) {
+ private Map getCoreProps(ZkController zkController, SolrCore core) {
final String collection = core.getCoreDescriptor().getCloudDescriptor().getCollectionName();
- ZkNodeProps props = zkController.getClusterState().getShardProps(collection, ZkStateReader.getCoreNodeName(zkController.getNodeName(), core.getName()));
+ Replica props = zkController.getClusterState().getShardProps(collection, ZkStateReader.getCoreNodeName(zkController.getNodeName(), core.getName()));
if(props!=null) {
return props.getProperties();
}
diff --git a/solr/core/src/java/org/apache/solr/cloud/AssignShard.java b/solr/core/src/java/org/apache/solr/cloud/AssignShard.java
index 13dc8f4dc09..62dcaba516d 100644
--- a/solr/core/src/java/org/apache/solr/cloud/AssignShard.java
+++ b/solr/core/src/java/org/apache/solr/cloud/AssignShard.java
@@ -56,7 +56,7 @@ public class AssignShard {
// else figure out which shard needs more replicas
final Map map = new HashMap();
for (String shardId : shardIdNames) {
- int cnt = sliceMap.get(shardId).getShards().size();
+ int cnt = sliceMap.get(shardId).getReplicasMap().size();
map.put(shardId, cnt);
}
diff --git a/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java b/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java
index 82dbb4e16ce..43a8d10e79b 100644
--- a/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java
+++ b/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java
@@ -8,6 +8,7 @@ import java.util.Set;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.SolrZkClient;
import org.apache.solr.common.cloud.ZkCoreNodeProps;
@@ -89,10 +90,10 @@ class ShardLeaderElectionContextBase extends ElectionContext {
zkClient.makePath(leaderPath, ZkStateReader.toJSON(leaderProps),
CreateMode.EPHEMERAL, true);
- ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, "leader",
+ ZkNodeProps m = ZkNodeProps.fromKeyVals(Overseer.QUEUE_OPERATION, "leader",
ZkStateReader.SHARD_ID_PROP, shardId, ZkStateReader.COLLECTION_PROP,
collection, ZkStateReader.BASE_URL_PROP, leaderProps.getProperties()
- .get(ZkStateReader.BASE_URL_PROP), ZkStateReader.CORE_NAME_PROP,
+ .get(ZkStateReader.BASE_URL_PROP), ZkStateReader.CORE_NAME_PROP,
leaderProps.getProperties().get(ZkStateReader.CORE_NAME_PROP),
ZkStateReader.STATE_PROP, ZkStateReader.ACTIVE);
Overseer.getInQueue(zkClient).offer(ZkStateReader.toJSON(m));
@@ -133,7 +134,7 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
InterruptedException, IOException {
log.info("Running the leader process. afterExpiration=" + afterExpiration);
- String coreName = leaderProps.get(ZkStateReader.CORE_NAME_PROP);
+ String coreName = leaderProps.getStr(ZkStateReader.CORE_NAME_PROP);
// clear the leader in clusterstate
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, "leader",
@@ -244,11 +245,11 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
while (true && !isClosed) {
// wait for everyone to be up
if (slices != null) {
- Map shards = slices.getShards();
- Set> entrySet = shards.entrySet();
+ Map shards = slices.getReplicasMap();
+ Set> entrySet = shards.entrySet();
int found = 0;
tryAgain = false;
- for (Entry entry : entrySet) {
+ for (Entry entry : entrySet) {
ZkCoreNodeProps props = new ZkCoreNodeProps(entry.getValue());
if (props.getState().equals(ZkStateReader.ACTIVE)
&& zkController.getClusterState().liveNodesContain(
@@ -259,16 +260,16 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
// on startup and after connection timeout, wait for all known shards
if ((afterExpiration || !weAreReplacement)
- && found >= slices.getShards().size()) {
+ && found >= slices.getReplicasMap().size()) {
log.info("Enough replicas found to continue.");
break;
- } else if (!afterExpiration && found >= slices.getShards().size() - 1) {
+ } else if (!afterExpiration && found >= slices.getReplicasMap().size() - 1) {
// a previous leader went down - wait for one less than the total
// known shards
log.info("Enough replicas found to continue.");
break;
} else {
- log.info("Waiting until we see more replicas up: total=" + slices.getShards().size() + " found=" + found + " timeoutin=" + (timeoutAt - System.currentTimeMillis()));
+ log.info("Waiting until we see more replicas up: total=" + slices.getReplicasMap().size() + " found=" + found + " timeoutin=" + (timeoutAt - System.currentTimeMillis()));
}
if (System.currentTimeMillis() > timeoutAt) {
@@ -310,16 +311,16 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
ClusterState clusterState = zkController.getZkStateReader().getClusterState();
Map slices = clusterState.getSlices(this.collection);
Slice slice = slices.get(shardId);
- Map shards = slice.getShards();
+ Map shards = slice.getReplicasMap();
boolean foundSomeoneElseActive = false;
- for (Map.Entry shard : shards.entrySet()) {
- String state = shard.getValue().get(ZkStateReader.STATE_PROP);
+ for (Map.Entry shard : shards.entrySet()) {
+ String state = shard.getValue().getStr(ZkStateReader.STATE_PROP);
if (new ZkCoreNodeProps(shard.getValue()).getCoreUrl().equals(
new ZkCoreNodeProps(leaderProps).getCoreUrl())) {
if (state.equals(ZkStateReader.ACTIVE)
- && clusterState.liveNodesContain(shard.getValue().get(
- ZkStateReader.NODE_NAME_PROP))) {
+ && clusterState.liveNodesContain(shard.getValue().getStr(
+ ZkStateReader.NODE_NAME_PROP))) {
// we are alive
log.info("I am Active and live, it's okay to be the leader.");
return true;
@@ -327,8 +328,8 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
}
if ((state.equals(ZkStateReader.ACTIVE))
- && clusterState.liveNodesContain(shard.getValue().get(
- ZkStateReader.NODE_NAME_PROP))
+ && clusterState.liveNodesContain(shard.getValue().getStr(
+ ZkStateReader.NODE_NAME_PROP))
&& !new ZkCoreNodeProps(shard.getValue()).getCoreUrl().equals(
new ZkCoreNodeProps(leaderProps).getCoreUrl())) {
foundSomeoneElseActive = true;
@@ -354,15 +355,15 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
ClusterState clusterState = zkController.getZkStateReader().getClusterState();
Map slices = clusterState.getSlices(this.collection);
Slice slice = slices.get(shardId);
- Map shards = slice.getShards();
+ Map shards = slice.getReplicasMap();
- for (Map.Entry shard : shards.entrySet()) {
- String state = shard.getValue().get(ZkStateReader.STATE_PROP);
+ for (Map.Entry shard : shards.entrySet()) {
+ String state = shard.getValue().getStr(ZkStateReader.STATE_PROP);
if ((state.equals(ZkStateReader.ACTIVE))
- && clusterState.liveNodesContain(shard.getValue().get(
- ZkStateReader.NODE_NAME_PROP))) {
+ && clusterState.liveNodesContain(shard.getValue().getStr(
+ ZkStateReader.NODE_NAME_PROP))) {
return true;
}
}
diff --git a/solr/core/src/java/org/apache/solr/cloud/Overseer.java b/solr/core/src/java/org/apache/solr/cloud/Overseer.java
index b8b3654511a..a77fc2dcc3e 100644
--- a/solr/core/src/java/org/apache/solr/cloud/Overseer.java
+++ b/solr/core/src/java/org/apache/solr/cloud/Overseer.java
@@ -26,6 +26,7 @@ import java.util.Map.Entry;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.ClosableThread;
+import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.SolrZkClient;
import org.apache.solr.common.cloud.ZkCoreNodeProps;
@@ -87,7 +88,7 @@ public class Overseer {
while (head != null && amILeader()) {
final ZkNodeProps message = ZkNodeProps.load(head);
final String operation = message
- .get(QUEUE_OPERATION);
+ .getStr(QUEUE_OPERATION);
clusterState = processMessage(clusterState, message, operation);
zkClient.setData(ZkStateReader.CLUSTER_STATE,
ZkStateReader.toJSON(clusterState), true);
@@ -123,7 +124,7 @@ public class Overseer {
while (head != null) {
final ZkNodeProps message = ZkNodeProps.load(head);
- final String operation = message.get(QUEUE_OPERATION);
+ final String operation = message.getStr(QUEUE_OPERATION);
clusterState = processMessage(clusterState, message, operation);
workQueue.offer(head);
@@ -168,15 +169,15 @@ public class Overseer {
} else if (ZkStateReader.LEADER_PROP.equals(operation)) {
StringBuilder sb = new StringBuilder();
- String baseUrl = message.get(ZkStateReader.BASE_URL_PROP);
- String coreName = message.get(ZkStateReader.CORE_NAME_PROP);
+ String baseUrl = message.getStr(ZkStateReader.BASE_URL_PROP);
+ String coreName = message.getStr(ZkStateReader.CORE_NAME_PROP);
sb.append(baseUrl);
if (baseUrl != null && !baseUrl.endsWith("/")) sb.append("/");
sb.append(coreName == null ? "" : coreName);
if (!(sb.substring(sb.length() - 1).equals("/"))) sb.append("/");
clusterState = setShardLeader(clusterState,
- message.get(ZkStateReader.COLLECTION_PROP),
- message.get(ZkStateReader.SHARD_ID_PROP),
+ message.getStr(ZkStateReader.COLLECTION_PROP),
+ message.getStr(ZkStateReader.SHARD_ID_PROP),
sb.length() > 0 ? sb.toString() : null);
} else {
@@ -189,7 +190,7 @@ public class Overseer {
private boolean amILeader() {
try {
ZkNodeProps props = ZkNodeProps.load(zkClient.getData("/overseer_elect/leader", null, null, true));
- if(myId.equals(props.get("id"))) {
+ if(myId.equals(props.getStr("id"))) {
return true;
}
} catch (KeeperException e) {
@@ -204,9 +205,9 @@ public class Overseer {
* Try to assign core to the cluster.
*/
private ClusterState updateState(ClusterState state, final ZkNodeProps message) {
- final String collection = message.get(ZkStateReader.COLLECTION_PROP);
- final String zkCoreNodeName = message.get(ZkStateReader.NODE_NAME_PROP) + "_" + message.get(ZkStateReader.CORE_NAME_PROP);
- final Integer numShards = message.get(ZkStateReader.NUM_SHARDS_PROP)!=null?Integer.parseInt(message.get(ZkStateReader.NUM_SHARDS_PROP)):null;
+ final String collection = message.getStr(ZkStateReader.COLLECTION_PROP);
+ final String zkCoreNodeName = message.getStr(ZkStateReader.NODE_NAME_PROP) + "_" + message.getStr(ZkStateReader.CORE_NAME_PROP);
+ final Integer numShards = message.getStr(ZkStateReader.NUM_SHARDS_PROP)!=null?Integer.parseInt(message.getStr(ZkStateReader.NUM_SHARDS_PROP)):null;
//collection does not yet exist, create placeholders if num shards is specified
if (!state.getCollections().contains(collection)
@@ -215,9 +216,9 @@ public class Overseer {
}
// use the provided non null shardId
- String shardId = message.get(ZkStateReader.SHARD_ID_PROP);
+ String shardId = message.getStr(ZkStateReader.SHARD_ID_PROP);
if (shardId == null) {
- String nodeName = message.get(ZkStateReader.NODE_NAME_PROP);
+ String nodeName = message.getStr(ZkStateReader.NODE_NAME_PROP);
//get shardId from ClusterState
shardId = getAssignedId(state, nodeName, message);
}
@@ -226,22 +227,22 @@ public class Overseer {
shardId = AssignShard.assignShard(collection, state, numShards);
}
- Map props = new HashMap();
- Map coreProps = new HashMap(message.getProperties().size());
+ Map props = new HashMap();
+ Map coreProps = new HashMap(message.getProperties().size());
coreProps.putAll(message.getProperties());
// we don't put num_shards in the clusterstate
coreProps.remove(ZkStateReader.NUM_SHARDS_PROP);
coreProps.remove(QUEUE_OPERATION);
- for (Entry entry : coreProps.entrySet()) {
+ for (Entry entry : coreProps.entrySet()) {
props.put(entry.getKey(), entry.getValue());
}
- ZkNodeProps zkProps = new ZkNodeProps(props);
+ Replica zkProps = new Replica(zkCoreNodeName, props);
Slice slice = state.getSlice(collection, shardId);
- Map shardProps;
+ Map shardProps;
if (slice == null) {
- shardProps = new HashMap();
+ shardProps = new HashMap();
} else {
- shardProps = state.getSlice(collection, shardId).getShardsCopy();
+ shardProps = state.getSlice(collection, shardId).getReplicasCopy();
}
shardProps.put(zkCoreNodeName, zkProps);
@@ -268,11 +269,11 @@ public class Overseer {
*/
private String getAssignedId(final ClusterState state, final String nodeName,
final ZkNodeProps coreState) {
- final String key = coreState.get(ZkStateReader.NODE_NAME_PROP) + "_" + coreState.get(ZkStateReader.CORE_NAME_PROP);
- Map slices = state.getSlices(coreState.get(ZkStateReader.COLLECTION_PROP));
+ final String key = coreState.getStr(ZkStateReader.NODE_NAME_PROP) + "_" + coreState.getStr(ZkStateReader.CORE_NAME_PROP);
+ Map slices = state.getSlices(coreState.getStr(ZkStateReader.COLLECTION_PROP));
if (slices != null) {
for (Slice slice : slices.values()) {
- if (slice.getShards().get(key) != null) {
+ if (slice.getReplicasMap().get(key) != null) {
return slice.getName();
}
}
@@ -293,16 +294,16 @@ public class Overseer {
if (!slices.containsKey(slice.getName())) {
slices.put(slice.getName(), slice);
} else {
- final Map shards = new LinkedHashMap();
+ final Map shards = new LinkedHashMap();
final Slice existingSlice = slices.get(slice.getName());
- shards.putAll(existingSlice.getShards());
+ shards.putAll(existingSlice.getReplicasMap());
//XXX preserve existing leader
- for(Entry edit: slice.getShards().entrySet()) {
- if(existingSlice.getShards().get(edit.getKey())!=null && existingSlice.getShards().get(edit.getKey()).containsKey(ZkStateReader.LEADER_PROP)) {
- HashMap newProps = new HashMap();
+ for(Entry edit: slice.getReplicasMap().entrySet()) {
+ if(existingSlice.getReplicasMap().get(edit.getKey())!=null && existingSlice.getReplicasMap().get(edit.getKey()).containsKey(ZkStateReader.LEADER_PROP)) {
+ HashMap newProps = new HashMap();
newProps.putAll(edit.getValue().getProperties());
- newProps.put(ZkStateReader.LEADER_PROP, existingSlice.getShards().get(edit.getKey()).get(ZkStateReader.LEADER_PROP));
- shards.put(edit.getKey(), new ZkNodeProps(newProps));
+ newProps.put(ZkStateReader.LEADER_PROP, existingSlice.getReplicasMap().get(edit.getKey()).getStr(ZkStateReader.LEADER_PROP));
+ shards.put(edit.getKey(), new Replica(edit.getKey(), newProps));
} else {
shards.put(edit.getKey(), edit.getValue());
}
@@ -329,9 +330,9 @@ public class Overseer {
log.error("Could not mark leader for non existing slice:" + sliceName);
return state;
} else {
- final Map newShards = new LinkedHashMap();
- for(Entry shard: slices.get(sliceName).getShards().entrySet()) {
- Map newShardProps = new LinkedHashMap();
+ final Map newShards = new LinkedHashMap();
+ for(Entry shard: slices.get(sliceName).getReplicasMap().entrySet()) {
+ Map newShardProps = new LinkedHashMap();
newShardProps.putAll(shard.getValue().getProperties());
newShardProps.remove(ZkStateReader.LEADER_PROP); //clean any previously existed flag
@@ -340,7 +341,7 @@ public class Overseer {
if(leaderUrl!=null && leaderUrl.equals(zkCoreNodeProps.getCoreUrl())) {
newShardProps.put(ZkStateReader.LEADER_PROP,"true");
}
- newShards.put(shard.getKey(), new ZkNodeProps(newShardProps));
+ newShards.put(shard.getKey(), new Replica(shard.getKey(), newShardProps));
}
Slice slice = new Slice(sliceName, newShards);
slices.put(sliceName, slice);
@@ -353,8 +354,8 @@ public class Overseer {
*/
private ClusterState removeCore(final ClusterState clusterState, ZkNodeProps message) {
- final String coreNodeName = message.get(ZkStateReader.NODE_NAME_PROP) + "_" + message.get(ZkStateReader.CORE_NAME_PROP);
- final String collection = message.get(ZkStateReader.COLLECTION_PROP);
+ final String coreNodeName = message.getStr(ZkStateReader.NODE_NAME_PROP) + "_" + message.getStr(ZkStateReader.CORE_NAME_PROP);
+ final String collection = message.getStr(ZkStateReader.COLLECTION_PROP);
final LinkedHashMap> newStates = new LinkedHashMap>();
for(String collectionName: clusterState.getCollections()) {
@@ -362,9 +363,9 @@ public class Overseer {
Map slices = clusterState.getSlices(collection);
LinkedHashMap newSlices = new LinkedHashMap();
for(Slice slice: slices.values()) {
- if(slice.getShards().containsKey(coreNodeName)) {
- LinkedHashMap newShards = new LinkedHashMap();
- newShards.putAll(slice.getShards());
+ if(slice.getReplicasMap().containsKey(coreNodeName)) {
+ LinkedHashMap newShards = new LinkedHashMap();
+ newShards.putAll(slice.getReplicasMap());
newShards.remove(coreNodeName);
Slice newSlice = new Slice(slice.getName(), newShards);
@@ -376,7 +377,7 @@ public class Overseer {
}
int cnt = 0;
for (Slice slice : newSlices.values()) {
- cnt+=slice.getShards().size();
+ cnt+=slice.getReplicasMap().size();
}
// TODO: if no nodes are left after this unload
// remove from zk - do we have a race where Overseer
diff --git a/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionProcessor.java b/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionProcessor.java
index d4cacf1aef3..aa1f9f19332 100644
--- a/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionProcessor.java
+++ b/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionProcessor.java
@@ -26,6 +26,7 @@ import java.util.Set;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.ZkNodeProps;
import org.apache.solr.common.cloud.ZkStateReader;
@@ -84,13 +85,13 @@ public class OverseerCollectionProcessor implements Runnable {
//if (head != null) { // should not happen since we block above
final ZkNodeProps message = ZkNodeProps.load(head);
- final String operation = message.get(QUEUE_OPERATION);
+ final String operation = message.getStr(QUEUE_OPERATION);
boolean success = processMessage(message, operation);
if (!success) {
// TODO: what to do on failure / partial failure
// if we fail, do we clean up then ?
- SolrException.log(log, "Collection creation of " + message.get("name") + " failed");
+ SolrException.log(log, "Collection creation of " + message.getStr("name") + " failed");
}
//}
workQueue.remove();
@@ -118,7 +119,7 @@ public class OverseerCollectionProcessor implements Runnable {
try {
ZkNodeProps props = ZkNodeProps.load(zkStateReader.getZkClient().getData(
"/overseer_elect/leader", null, null, true));
- if (myId.equals(props.get("id"))) {
+ if (myId.equals(props.getStr("id"))) {
return true;
}
} catch (KeeperException e) {
@@ -152,7 +153,7 @@ public class OverseerCollectionProcessor implements Runnable {
// look at the replication factor and see if it matches reality
// if it does not, find best nodes to create more cores
- String numReplicasString = message.get("numReplicas");
+ String numReplicasString = message.getStr("numReplicas");
int numReplicas;
try {
numReplicas = numReplicasString == null ? 0 : Integer.parseInt(numReplicasString);
@@ -160,7 +161,7 @@ public class OverseerCollectionProcessor implements Runnable {
SolrException.log(log, "Could not parse numReplicas", ex);
return false;
}
- String numShardsString = message.get("numShards");
+ String numShardsString = message.getStr("numShards");
int numShards;
try {
numShards = numShardsString == null ? 0 : Integer.parseInt(numShardsString);
@@ -169,8 +170,8 @@ public class OverseerCollectionProcessor implements Runnable {
return false;
}
- String name = message.get("name");
- String configName = message.get("collection.configName");
+ String name = message.getStr("name");
+ String configName = message.getStr("collection.configName");
// we need to look at every node and see how many cores it serves
// add our new cores to existing nodes serving the least number of cores
@@ -237,7 +238,7 @@ public class OverseerCollectionProcessor implements Runnable {
private boolean collectionCmd(ClusterState clusterState, ZkNodeProps message, ModifiableSolrParams params) {
log.info("Executing Collection Cmd : " + params);
- String name = message.get("name");
+ String name = message.getStr("name");
Map slices = clusterState.getCollectionStates().get(name);
@@ -247,14 +248,14 @@ public class OverseerCollectionProcessor implements Runnable {
for (Map.Entry entry : slices.entrySet()) {
Slice slice = entry.getValue();
- Map shards = slice.getShards();
- Set> shardEntries = shards.entrySet();
- for (Map.Entry shardEntry : shardEntries) {
+ Map shards = slice.getReplicasMap();
+ Set> shardEntries = shards.entrySet();
+ for (Map.Entry shardEntry : shardEntries) {
final ZkNodeProps node = shardEntry.getValue();
- if (clusterState.liveNodesContain(node.get(ZkStateReader.NODE_NAME_PROP))) {
- params.set(CoreAdminParams.CORE, node.get(ZkStateReader.CORE_NAME_PROP));
+ if (clusterState.liveNodesContain(node.getStr(ZkStateReader.NODE_NAME_PROP))) {
+ params.set(CoreAdminParams.CORE, node.getStr(ZkStateReader.CORE_NAME_PROP));
- String replica = node.get(ZkStateReader.BASE_URL_PROP);
+ String replica = node.getStr(ZkStateReader.BASE_URL_PROP);
ShardRequest sreq = new ShardRequest();
// yes, they must use same admin handler path everywhere...
params.set("qt", adminPath);
diff --git a/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java b/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java
index 737c8d92783..02c777fbbaa 100644
--- a/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java
+++ b/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java
@@ -119,7 +119,7 @@ public class RecoveryStrategy extends Thread implements ClosableThread {
private void replicate(String nodeName, SolrCore core, ZkNodeProps leaderprops, String baseUrl)
throws SolrServerException, IOException {
- String leaderBaseUrl = leaderprops.get(ZkStateReader.BASE_URL_PROP);
+ String leaderBaseUrl = leaderprops.getStr(ZkStateReader.BASE_URL_PROP);
ZkCoreNodeProps leaderCNodeProps = new ZkCoreNodeProps(leaderprops);
String leaderUrl = leaderCNodeProps.getCoreUrl();
@@ -318,8 +318,8 @@ public class RecoveryStrategy extends Thread implements ClosableThread {
ZkNodeProps leaderprops = zkStateReader.getLeaderProps(
cloudDesc.getCollectionName(), cloudDesc.getShardId());
- String leaderBaseUrl = leaderprops.get(ZkStateReader.BASE_URL_PROP);
- String leaderCoreName = leaderprops.get(ZkStateReader.CORE_NAME_PROP);
+ String leaderBaseUrl = leaderprops.getStr(ZkStateReader.BASE_URL_PROP);
+ String leaderCoreName = leaderprops.getStr(ZkStateReader.CORE_NAME_PROP);
String leaderUrl = ZkCoreNodeProps.getCoreUrl(leaderBaseUrl, leaderCoreName);
diff --git a/solr/core/src/java/org/apache/solr/cloud/SyncStrategy.java b/solr/core/src/java/org/apache/solr/cloud/SyncStrategy.java
index 36ce890c064..694b118b381 100644
--- a/solr/core/src/java/org/apache/solr/cloud/SyncStrategy.java
+++ b/solr/core/src/java/org/apache/solr/cloud/SyncStrategy.java
@@ -29,6 +29,7 @@ import org.apache.solr.client.solrj.impl.HttpSolrServer;
import org.apache.solr.client.solrj.request.CoreAdminRequest.RequestRecovery;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.ZkCoreNodeProps;
import org.apache.solr.common.cloud.ZkNodeProps;
@@ -136,9 +137,9 @@ public class SyncStrategy {
ClusterState clusterState = zkController.getZkStateReader().getClusterState();
Map slices = clusterState.getSlices(collection);
Slice slice = slices.get(shardId);
- Map shards = slice.getShards();
- for (Map.Entry shard : shards.entrySet()) {
- String state = shard.getValue().get(ZkStateReader.STATE_PROP);
+ Map shards = slice.getReplicasMap();
+ for (Map.Entry shard : shards.entrySet()) {
+ String state = shard.getValue().getStr(ZkStateReader.STATE_PROP);
// System.out.println("state:"
// + state
// + shard.getValue().get(ZkStateReader.NODE_NAME_PROP)
@@ -146,8 +147,8 @@ public class SyncStrategy {
// + clusterState.liveNodesContain(shard.getValue().get(
// ZkStateReader.NODE_NAME_PROP)));
if ((state.equals(ZkStateReader.ACTIVE))
- && clusterState.liveNodesContain(shard.getValue().get(
- ZkStateReader.NODE_NAME_PROP))
+ && clusterState.liveNodesContain(shard.getValue().getStr(
+ ZkStateReader.NODE_NAME_PROP))
&& !new ZkCoreNodeProps(shard.getValue()).getCoreUrl().equals(
new ZkCoreNodeProps(leaderProps).getCoreUrl())) {
return true;
@@ -161,8 +162,8 @@ public class SyncStrategy {
ZkNodeProps props, String collection, String shardId) {
List nodes = zkController.getZkStateReader()
.getReplicaProps(collection, shardId,
- props.get(ZkStateReader.NODE_NAME_PROP),
- props.get(ZkStateReader.CORE_NAME_PROP), ZkStateReader.ACTIVE); // TODO:
+ props.getStr(ZkStateReader.NODE_NAME_PROP),
+ props.getStr(ZkStateReader.CORE_NAME_PROP), ZkStateReader.ACTIVE); // TODO:
// TODO should there be a state filter?
if (nodes == null) {
@@ -191,8 +192,8 @@ public class SyncStrategy {
List nodes = zkController
.getZkStateReader()
.getReplicaProps(collection, shardId,
- leaderProps.get(ZkStateReader.NODE_NAME_PROP),
- leaderProps.get(ZkStateReader.CORE_NAME_PROP), ZkStateReader.ACTIVE);
+ leaderProps.getStr(ZkStateReader.NODE_NAME_PROP),
+ leaderProps.getStr(ZkStateReader.CORE_NAME_PROP), ZkStateReader.ACTIVE);
if (nodes == null) {
log.info(ZkCoreNodeProps.getCoreUrl(leaderProps) + " has no replicas");
return;
diff --git a/solr/core/src/java/org/apache/solr/cloud/ZkController.java b/solr/core/src/java/org/apache/solr/cloud/ZkController.java
index 6b3e3567748..373e66024fb 100644
--- a/solr/core/src/java/org/apache/solr/cloud/ZkController.java
+++ b/solr/core/src/java/org/apache/solr/cloud/ZkController.java
@@ -41,7 +41,6 @@ import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.OnReconnect;
import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkClientConnectionStrategy;
import org.apache.solr.common.cloud.ZkCmdExecutor;
import org.apache.solr.common.cloud.ZkCoreNodeProps;
import org.apache.solr.common.cloud.ZkNodeProps;
@@ -492,7 +491,7 @@ public final class ZkController {
if(data != null) {
ZkNodeProps props = ZkNodeProps.load(data);
- configName = props.get(CONFIGNAME_PROP);
+ configName = props.getStr(CONFIGNAME_PROP);
}
if (configName != null && !zkClient.exists(CONFIGS_ZKNODE + "/" + configName, true)) {
@@ -539,7 +538,7 @@ public final class ZkController {
String shardId = cloudDesc.getShardId();
- Map props = new HashMap();
+ Map props = new HashMap();
// we only put a subset of props into the leader node
props.put(ZkStateReader.BASE_URL_PROP, baseUrl);
props.put(ZkStateReader.CORE_NAME_PROP, coreName);
@@ -695,7 +694,7 @@ public final class ZkController {
String shardId = cd.getCloudDescriptor().getShardId();
- Map props = new HashMap();
+ Map props = new HashMap();
// we only put a subset of props into the leader node
props.put(ZkStateReader.BASE_URL_PROP, getBaseUrl());
props.put(ZkStateReader.CORE_NAME_PROP, cd.getName());
@@ -872,7 +871,7 @@ public final class ZkController {
SolrParams params = cd.getParams();
try {
- Map collectionProps = new HashMap();
+ Map collectionProps = new HashMap();
// TODO: if collection.configName isn't set, and there isn't already a conf in zk, just use that?
String defaultConfigName = System.getProperty(COLLECTION_PARAM_PREFIX+CONFIGNAME_PROP, collection);
@@ -937,7 +936,7 @@ public final class ZkController {
private void getConfName(String collection, String collectionPath,
- Map collectionProps) throws KeeperException,
+ Map collectionProps) throws KeeperException,
InterruptedException {
// check for configName
log.info("Looking for collection configName");
@@ -1168,7 +1167,7 @@ public final class ZkController {
ZkNodeProps props = null;
if(data != null) {
props = ZkNodeProps.load(data);
- Map newProps = new HashMap();
+ Map newProps = new HashMap();
newProps.putAll(props.getProperties());
newProps.put(CONFIGNAME_PROP, confSetName);
props = new ZkNodeProps(newProps);
diff --git a/solr/core/src/java/org/apache/solr/handler/UpdateRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/UpdateRequestHandler.java
index 473c1547714..3d136ebb28d 100644
--- a/solr/core/src/java/org/apache/solr/handler/UpdateRequestHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/UpdateRequestHandler.java
@@ -74,7 +74,7 @@ public class UpdateRequestHandler extends ContentStreamHandlerBase {
if(type == null) {
type = stream.getContentType();
}
- if( type == null ) { // Normal requests will not get here.
+ if( type == null ) { // Normal requests will not get here.
throw new SolrException(ErrorCode.BAD_REQUEST, "Missing ContentType");
}
int idx = type.indexOf(';');
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java
index 1c299a1e301..999f9957da3 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java
@@ -851,7 +851,7 @@ public class CoreAdminHandler extends RequestHandlerBase {
if (core != null) {
SyncStrategy syncStrategy = new SyncStrategy();
- Map props = new HashMap();
+ Map props = new HashMap();
props.put(ZkStateReader.BASE_URL_PROP, zkController.getBaseUrl());
props.put(ZkStateReader.CORE_NAME_PROP, cname);
props.put(ZkStateReader.NODE_NAME_PROP, zkController.getNodeName());
@@ -934,9 +934,9 @@ public class CoreAdminHandler extends RequestHandlerBase {
Slice slice = clusterState.getSlice(collection,
cloudDescriptor.getShardId());
if (slice != null) {
- ZkNodeProps nodeProps = slice.getShards().get(coreNodeName);
+ ZkNodeProps nodeProps = slice.getReplicasMap().get(coreNodeName);
if (nodeProps != null) {
- state = nodeProps.get(ZkStateReader.STATE_PROP);
+ state = nodeProps.getStr(ZkStateReader.STATE_PROP);
live = clusterState.liveNodesContain(nodeName);
if (nodeProps != null && state.equals(waitForState)) {
if (checkLive == null) {
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/ShowFileRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/ShowFileRequestHandler.java
index 5e07218a76a..34c42c77bf2 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/ShowFileRequestHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/ShowFileRequestHandler.java
@@ -70,7 +70,7 @@ import java.util.Set;
* the registered invariant param for wt.
*
* If you want to override the contentType header returned for a given file, you can
- * set it directly using: {@link #USE_CONTENT_TYPE}. For example, to get a plain text
+ * set it directly using: {@link #USE_CONTENT_TYPE}. For example, to get a plain text
* version of schema.xml, try:
*
* http://localhost:8983/solr/admin/file?file=schema.xml&contentType=text/plain
diff --git a/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandler.java b/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandler.java
index e38abe8bc76..2fb6efb7e46 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandler.java
@@ -42,6 +42,7 @@ import org.apache.solr.cloud.ZkController;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.ZkCoreNodeProps;
import org.apache.solr.common.cloud.ZkNodeProps;
@@ -350,7 +351,7 @@ public class HttpShardHandler extends ShardHandler {
// throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "no such shard: " + sliceName);
}
- Map sliceShards = slice.getShards();
+ Map sliceShards = slice.getReplicasMap();
// For now, recreate the | delimited list of equivalent servers
Set liveNodes = clusterState.getLiveNodes();
diff --git a/solr/core/src/java/org/apache/solr/schema/FieldType.java b/solr/core/src/java/org/apache/solr/schema/FieldType.java
index ced6220f3a3..d7c54caee52 100644
--- a/solr/core/src/java/org/apache/solr/schema/FieldType.java
+++ b/solr/core/src/java/org/apache/solr/schema/FieldType.java
@@ -541,7 +541,7 @@ public abstract class FieldType extends FieldProperties {
public abstract SortField getSortField(SchemaField field, boolean top);
/**
- * Utility usable by subclasses when they want to get basic String sorting
+ * Utility usable by subclasses when they want to get basic String sorting
* using common checks.
* @see SchemaField#checkSortability
*/
diff --git a/solr/core/src/java/org/apache/solr/schema/RandomSortField.java b/solr/core/src/java/org/apache/solr/schema/RandomSortField.java
index 0a520e5abcb..3ee86674bd6 100644
--- a/solr/core/src/java/org/apache/solr/schema/RandomSortField.java
+++ b/solr/core/src/java/org/apache/solr/schema/RandomSortField.java
@@ -78,7 +78,7 @@ public class RandomSortField extends FieldType {
}
/**
- * Given a field name and an IndexReader, get a random hash seed.
+ * Given a field name and an IndexReader, get a random hash seed.
* Using dynamic fields, you can force the random order to change
*/
private static int getSeed(String fieldName, AtomicReaderContext context) {
diff --git a/solr/core/src/java/org/apache/solr/schema/SchemaField.java b/solr/core/src/java/org/apache/solr/schema/SchemaField.java
index f67667945e4..93a0eab0f5b 100644
--- a/solr/core/src/java/org/apache/solr/schema/SchemaField.java
+++ b/solr/core/src/java/org/apache/solr/schema/SchemaField.java
@@ -159,7 +159,7 @@ public final class SchemaField extends FieldProperties {
/**
* Sanity checks that the properties of this field type are plausible
- * for a field that may be used to get a FieldCacheSource, throwing
+ * for a field that may be used to get a FieldCacheSource, throwing
* an appropriate exception (including the field name) if it is not.
* FieldType subclasses can choose to call this method in their
* getValueSource implementation
diff --git a/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java b/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java
index e59fa526efc..8c2ce1dd5e5 100644
--- a/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java
+++ b/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java
@@ -40,6 +40,7 @@ import javax.servlet.http.HttpServletResponse;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.ZkNodeProps;
import org.apache.solr.common.cloud.ZkStateReader;
@@ -335,10 +336,10 @@ public class SolrDispatchFilter implements Filter
}
// check everyone then
- Map shards = entry.getValue().getShards();
- Set> shardEntries = shards.entrySet();
- for (Entry shardEntry : shardEntries) {
- ZkNodeProps zkProps = shardEntry.getValue();
+ Map shards = entry.getValue().getReplicasMap();
+ Set> shardEntries = shards.entrySet();
+ for (Entry shardEntry : shardEntries) {
+ Replica zkProps = shardEntry.getValue();
core = checkProps(cores, path, zkProps);
if (core != null) {
break done;
@@ -352,8 +353,8 @@ public class SolrDispatchFilter implements Filter
ZkNodeProps zkProps) {
String corename;
SolrCore core = null;
- if (cores.getZkController().getNodeName().equals(zkProps.get(ZkStateReader.NODE_NAME_PROP))) {
- corename = zkProps.get(ZkStateReader.CORE_NAME_PROP);
+ if (cores.getZkController().getNodeName().equals(zkProps.getStr(ZkStateReader.NODE_NAME_PROP))) {
+ corename = zkProps.getStr(ZkStateReader.CORE_NAME_PROP);
core = cores.getCore(corename);
}
return core;
diff --git a/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java b/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
index 3779040c6ce..eefccfe3306 100644
--- a/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
+++ b/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
@@ -37,6 +37,7 @@ import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.SolrInputField;
import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.ZkCoreNodeProps;
import org.apache.solr.common.cloud.ZkNodeProps;
@@ -1062,9 +1063,9 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
for (Map.Entry sliceEntry : slices.entrySet()) {
Slice replicas = slices.get(sliceEntry.getKey());
- Map shardMap = replicas.getShards();
+ Map shardMap = replicas.getReplicasMap();
- for (Entry entry : shardMap.entrySet()) {
+ for (Entry entry : shardMap.entrySet()) {
ZkCoreNodeProps nodeProps = new ZkCoreNodeProps(entry.getValue());
if (clusterState.liveNodesContain(nodeProps.getNodeName()) && !entry.getKey().equals(shardZkNodeName)) {
urls.add(new StdNode(nodeProps));
diff --git a/solr/core/src/java/org/apache/solr/update/processor/FieldMutatingUpdateProcessorFactory.java b/solr/core/src/java/org/apache/solr/update/processor/FieldMutatingUpdateProcessorFactory.java
index 14a4b3d9f57..f68b3cd2e35 100644
--- a/solr/core/src/java/org/apache/solr/update/processor/FieldMutatingUpdateProcessorFactory.java
+++ b/solr/core/src/java/org/apache/solr/update/processor/FieldMutatingUpdateProcessorFactory.java
@@ -68,7 +68,7 @@ import org.apache.solr.util.plugin.SolrCoreAware;
* containing any of the above criteria, identifying fields to be excluded
* from seelction even if they match the selection criteria. As with the main
* selection critiera a field must match all of criteria in a single exclusion
- * in order to be excluded, but multiple exclusions may be specified to get an
+ * in order to be excluded, but multiple exclusions may be specified to get an
* OR
behavior
*
*
diff --git a/solr/core/src/test/org/apache/solr/analysis/TestReversedWildcardFilterFactory.java b/solr/core/src/test/org/apache/solr/analysis/TestReversedWildcardFilterFactory.java
index 60db59b54ba..0a3ff54d3c3 100644
--- a/solr/core/src/test/org/apache/solr/analysis/TestReversedWildcardFilterFactory.java
+++ b/solr/core/src/test/org/apache/solr/analysis/TestReversedWildcardFilterFactory.java
@@ -162,7 +162,7 @@ public class TestReversedWildcardFilterFactory extends SolrTestCaseJ4 {
Query q = qp.parse(query);
if (!(q instanceof AutomatonQuery))
return false;
- // this is a hack to get the protected Automaton field in AutomatonQuery,
+ // this is a hack to get the protected Automaton field in AutomatonQuery,
// may break in later lucene versions - we have no getter... for good reasons.
final Field automatonField = AutomatonQuery.class.getDeclaredField("automaton");
automatonField.setAccessible(true);
diff --git a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZk2Test.java b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZk2Test.java
index 8522cc1b2d9..cdb94080482 100644
--- a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZk2Test.java
+++ b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZk2Test.java
@@ -34,9 +34,6 @@ import org.apache.solr.common.params.CommonParams;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.servlet.SolrDispatchFilter;
-import com.carrotsearch.randomizedtesting.annotations.ThreadLeakAction;
-import com.carrotsearch.randomizedtesting.annotations.ThreadLeakAction.Action;
-
/**
* This test simply does a bunch of basic things in solrcloud mode and asserts things
* work as expected.
@@ -106,7 +103,7 @@ public class BasicDistributedZk2Test extends AbstractFullDistribZkTestBase {
ZkNodeProps leaderProps = zkStateReader.getLeaderProps(
DEFAULT_COLLECTION, SHARD2);
- String nodeName = leaderProps.get(ZkStateReader.NODE_NAME_PROP);
+ String nodeName = leaderProps.getStr(ZkStateReader.NODE_NAME_PROP);
chaosMonkey.stopShardExcept(SHARD2, nodeName);
SolrServer client = getClient(nodeName);
diff --git a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
index bfb19682504..6f713165bb6 100644
--- a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
@@ -58,6 +58,7 @@ import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.ZkCoreNodeProps;
import org.apache.solr.common.cloud.ZkNodeProps;
@@ -70,9 +71,6 @@ import org.apache.solr.common.util.NamedList;
import org.apache.solr.update.SolrCmdDistributor.Request;
import org.apache.solr.util.DefaultSolrThreadFactory;
-import com.carrotsearch.randomizedtesting.annotations.ThreadLeakAction;
-import com.carrotsearch.randomizedtesting.annotations.ThreadLeakAction.Action;
-
/**
* This test simply does a bunch of basic things in solrcloud mode and asserts things
* work as expected.
@@ -462,11 +460,11 @@ public class BasicDistributedZkTest extends AbstractDistribZkTestBase {
Iterator> it = slices.entrySet().iterator();
while (it.hasNext()) {
Entry sliceEntry = it.next();
- Map sliceShards = sliceEntry.getValue().getShards();
- Iterator> shardIt = sliceShards.entrySet()
+ Map sliceShards = sliceEntry.getValue().getReplicasMap();
+ Iterator> shardIt = sliceShards.entrySet()
.iterator();
while (shardIt.hasNext()) {
- Entry shardEntry = shardIt.next();
+ Entry shardEntry = shardIt.next();
ZkCoreNodeProps coreProps = new ZkCoreNodeProps(shardEntry.getValue());
CoreAdminResponse mcr = CoreAdminRequest.getStatus(
coreProps.getCoreName(),
@@ -491,11 +489,11 @@ public class BasicDistributedZkTest extends AbstractDistribZkTestBase {
for (Map.Entry entry : slices.entrySet()) {
Slice slice = entry.getValue();
- Map shards = slice.getShards();
- Set> shardEntries = shards.entrySet();
- for (Map.Entry shardEntry : shardEntries) {
+ Map shards = slice.getReplicasMap();
+ Set> shardEntries = shards.entrySet();
+ for (Map.Entry shardEntry : shardEntries) {
final ZkNodeProps node = shardEntry.getValue();
- if (clusterState.liveNodesContain(node.get(ZkStateReader.NODE_NAME_PROP))) {
+ if (clusterState.liveNodesContain(node.getStr(ZkStateReader.NODE_NAME_PROP))) {
return new ZkCoreNodeProps(node).getCoreUrl();
}
}
@@ -551,13 +549,13 @@ public class BasicDistributedZkTest extends AbstractDistribZkTestBase {
Iterator> it = slices.entrySet().iterator();
while (it.hasNext()) {
Entry sliceEntry = it.next();
- Map sliceShards = sliceEntry.getValue()
- .getShards();
- Iterator> shardIt = sliceShards
+ Map sliceShards = sliceEntry.getValue()
+ .getReplicasMap();
+ Iterator> shardIt = sliceShards
.entrySet().iterator();
while (shardIt.hasNext()) {
- Entry shardEntry = shardIt.next();
- if (!shardEntry.getValue().get(ZkStateReader.STATE_PROP)
+ Entry shardEntry = shardIt.next();
+ if (!shardEntry.getValue().getStr(ZkStateReader.STATE_PROP)
.equals(ZkStateReader.ACTIVE)) {
found = false;
break;
@@ -745,7 +743,7 @@ public class BasicDistributedZkTest extends AbstractDistribZkTestBase {
assertAllActive(oneInstanceCollection2, solrj.getZkStateReader());
- // TODO: enable when we don't falsely get slice1...
+ // TODO: enable when we don't falsely get slice1...
// solrj.getZkStateReader().getLeaderUrl(oneInstanceCollection2, "slice1", 30000);
// solrj.getZkStateReader().getLeaderUrl(oneInstanceCollection2, "slice2", 30000);
client2.add(getDoc(id, "1"));
@@ -780,7 +778,7 @@ public class BasicDistributedZkTest extends AbstractDistribZkTestBase {
zkStateReader.updateClusterState(true);
Map slices = zkStateReader.getClusterState().getSlices(oneInstanceCollection2);
assertNotNull(slices);
- String roles = slices.get("slice1").getShards().values().iterator().next().get(ZkStateReader.ROLES_PROP);
+ String roles = slices.get("slice1").getReplicasMap().values().iterator().next().getStr(ZkStateReader.ROLES_PROP);
assertEquals("none", roles);
}
diff --git a/solr/core/src/test/org/apache/solr/cloud/ClusterStateTest.java b/solr/core/src/test/org/apache/solr/cloud/ClusterStateTest.java
index 9eecf2956fd..613eb52a950 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ClusterStateTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ClusterStateTest.java
@@ -25,7 +25,7 @@ import java.util.Set;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.ZkStateReader;
import org.junit.Test;
@@ -38,13 +38,13 @@ public class ClusterStateTest extends SolrTestCaseJ4 {
liveNodes.add("node2");
Map slices = new HashMap();
- Map sliceToProps = new HashMap();
- Map props = new HashMap();
+ Map sliceToProps = new HashMap();
+ Map props = new HashMap();
props.put("prop1", "value");
props.put("prop2", "value2");
- ZkNodeProps zkNodeProps = new ZkNodeProps(props);
- sliceToProps.put("node1", zkNodeProps);
+ Replica replica = new Replica("node1", props);
+ sliceToProps.put("node1", replica);
Slice slice = new Slice("shard1", sliceToProps);
slices.put("shard1", slice);
Slice slice2 = new Slice("shard2", sliceToProps);
@@ -60,8 +60,8 @@ public class ClusterStateTest extends SolrTestCaseJ4 {
assertEquals("Provided liveNodes not used properly", 2, loadedClusterState
.getLiveNodes().size());
assertEquals("No collections found", 2, loadedClusterState.getCollections().size());
- assertEquals("Poperties not copied properly", zkNodeProps.get("prop1"), loadedClusterState.getSlice("collection1", "shard1").getShards().get("node1").get("prop1"));
- assertEquals("Poperties not copied properly", zkNodeProps.get("prop2"), loadedClusterState.getSlice("collection1", "shard1").getShards().get("node1").get("prop2"));
+ assertEquals("Poperties not copied properly", replica.getStr("prop1"), loadedClusterState.getSlice("collection1", "shard1").getReplicasMap().get("node1").getStr("prop1"));
+ assertEquals("Poperties not copied properly", replica.getStr("prop2"), loadedClusterState.getSlice("collection1", "shard1").getReplicasMap().get("node1").getStr("prop2"));
loadedClusterState = ClusterState.load(null, new byte[0], liveNodes);
diff --git a/solr/core/src/test/org/apache/solr/cloud/ClusterStateUpdateTest.java b/solr/core/src/test/org/apache/solr/cloud/ClusterStateUpdateTest.java
index bacbb5c3ddc..cfd3ffe4a61 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ClusterStateUpdateTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ClusterStateUpdateTest.java
@@ -25,6 +25,7 @@ import java.util.Set;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.SolrZkClient;
import org.apache.solr.common.cloud.ZkNodeProps;
@@ -139,7 +140,7 @@ public class ClusterStateUpdateTest extends SolrTestCaseJ4 {
System.setProperty("solrcloud.update.delay", "1");
- Map props2 = new HashMap();
+ Map props2 = new HashMap();
props2.put("configName", "conf1");
ZkNodeProps zkProps2 = new ZkNodeProps(props2);
@@ -173,7 +174,7 @@ public class ClusterStateUpdateTest extends SolrTestCaseJ4 {
slices = clusterState2.getSlices("testcore");
if (slices != null && slices.containsKey("shard1")
- && slices.get("shard1").getShards().size() > 0) {
+ && slices.get("shard1").getReplicasMap().size() > 0) {
break;
}
Thread.sleep(500);
@@ -185,17 +186,17 @@ public class ClusterStateUpdateTest extends SolrTestCaseJ4 {
Slice slice = slices.get("shard1");
assertEquals("shard1", slice.getName());
- Map shards = slice.getShards();
+ Map shards = slice.getReplicasMap();
assertEquals(1, shards.size());
- ZkNodeProps zkProps = shards.get(host + ":1661_solr_testcore");
+ Replica zkProps = shards.get(host + ":1661_solr_testcore");
assertNotNull(zkProps);
- assertEquals(host + ":1661_solr", zkProps.get(ZkStateReader.NODE_NAME_PROP));
+ assertEquals(host + ":1661_solr", zkProps.getStr(ZkStateReader.NODE_NAME_PROP));
- assertEquals("http://" + host + ":1661/solr", zkProps.get(ZkStateReader.BASE_URL_PROP));
+ assertEquals("http://" + host + ":1661/solr", zkProps.getStr(ZkStateReader.BASE_URL_PROP));
Set liveNodes = clusterState2.getLiveNodes();
assertNotNull(liveNodes);
diff --git a/solr/core/src/test/org/apache/solr/cloud/LeaderElectionIntegrationTest.java b/solr/core/src/test/org/apache/solr/cloud/LeaderElectionIntegrationTest.java
index d55ed7ec9f0..88d5e8b4c08 100644
--- a/solr/core/src/test/org/apache/solr/cloud/LeaderElectionIntegrationTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/LeaderElectionIntegrationTest.java
@@ -249,7 +249,7 @@ public class LeaderElectionIntegrationTest extends SolrTestCaseJ4 {
private String getLeader() throws InterruptedException {
ZkNodeProps props = reader.getLeaderProps("collection1", "shard1", 30000);
- String leader = props.get(ZkStateReader.NODE_NAME_PROP);
+ String leader = props.getStr(ZkStateReader.NODE_NAME_PROP);
return leader;
}
diff --git a/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java b/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java
index 6f954c1e76a..be44ce47d6d 100644
--- a/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java
@@ -152,7 +152,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
collection);
if (slices != null) {
for (Slice slice : slices.values()) {
- if (slice.getShards().containsKey(nodeName + "_" + coreName)) {
+ if (slice.getReplicasMap().containsKey(nodeName + "_" + coreName)) {
return slice.getName();
}
}
@@ -204,9 +204,9 @@ public class OverseerTest extends SolrTestCaseJ4 {
assertNotNull("shard got no id?", zkController.publishState("core" + (i+1), ZkStateReader.ACTIVE, 3));
}
- assertEquals(2, reader.getClusterState().getSlice("collection1", "shard1").getShards().size());
- assertEquals(2, reader.getClusterState().getSlice("collection1", "shard2").getShards().size());
- assertEquals(2, reader.getClusterState().getSlice("collection1", "shard3").getShards().size());
+ assertEquals(2, reader.getClusterState().getSlice("collection1", "shard1").getReplicasMap().size());
+ assertEquals(2, reader.getClusterState().getSlice("collection1", "shard2").getReplicasMap().size());
+ assertEquals(2, reader.getClusterState().getSlice("collection1", "shard3").getReplicasMap().size());
//make sure leaders are in cloud state
assertNotNull(reader.getLeaderUrl("collection1", "shard1", 15000));
@@ -303,7 +303,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
ClusterState state = reader.getClusterState();
Map slices = state.getSlices("collection1");
for (String name : slices.keySet()) {
- cloudStateSliceCount += slices.get(name).getShards().size();
+ cloudStateSliceCount += slices.get(name).getReplicasMap().size();
}
if (coreCount == cloudStateSliceCount) break;
Thread.sleep(200);
@@ -435,8 +435,8 @@ public class OverseerTest extends SolrTestCaseJ4 {
waitForCollections(reader, "collection1");
assertEquals(reader.getClusterState().toString(), ZkStateReader.RECOVERING,
- reader.getClusterState().getSlice("collection1", "shard1").getShards()
- .get("node1_core1").get(ZkStateReader.STATE_PROP));
+ reader.getClusterState().getSlice("collection1", "shard1").getReplicasMap()
+ .get("node1_core1").getStr(ZkStateReader.STATE_PROP));
//publish node state (active)
m = new ZkNodeProps(Overseer.QUEUE_OPERATION, "state",
@@ -467,7 +467,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
while(maxIterations-->0) {
Slice slice = reader.getClusterState().getSlice("collection1", "shard1");
if(slice!=null) {
- coreState = slice.getShards().get("node1_core1").get(ZkStateReader.STATE_PROP);
+ coreState = slice.getReplicasMap().get("node1_core1").getStr(ZkStateReader.STATE_PROP);
if(coreState.equals(expectedState)) {
return;
}
@@ -483,14 +483,14 @@ public class OverseerTest extends SolrTestCaseJ4 {
reader.updateClusterState(true); // poll state
ZkNodeProps props = reader.getClusterState().getLeader(collection, shard);
if(props!=null) {
- if(expectedCore.equals(props.get(ZkStateReader.CORE_NAME_PROP))) {
+ if(expectedCore.equals(props.getStr(ZkStateReader.CORE_NAME_PROP))) {
return;
}
}
Thread.sleep(100);
}
- assertEquals("Unexpected shard leader coll:" + collection + " shard:" + shard, expectedCore, (reader.getClusterState().getLeader(collection, shard)!=null)?reader.getClusterState().getLeader(collection, shard).get(ZkStateReader.CORE_NAME_PROP):null);
+ assertEquals("Unexpected shard leader coll:" + collection + " shard:" + shard, expectedCore, (reader.getClusterState().getLeader(collection, shard)!=null)?reader.getClusterState().getLeader(collection, shard).getStr(ZkStateReader.CORE_NAME_PROP):null);
}
@Test
@@ -547,7 +547,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
assertEquals("Live nodes count does not match", 1, reader.getClusterState()
.getLiveNodes().size());
assertEquals("Shard count does not match", 1, reader.getClusterState()
- .getSlice("collection1", "shard1").getShards().size());
+ .getSlice("collection1", "shard1").getReplicasMap().size());
version = getClusterStateVersion(controllerClient);
mockController.publishState("core1", null,1);
while(version == getClusterStateVersion(controllerClient));
@@ -714,7 +714,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
int numFound = 0;
for (Map collection : state.getCollectionStates().values()) {
for (Slice slice : collection.values()) {
- if (slice.getShards().get("node1_core1") != null) {
+ if (slice.getReplicasMap().get("node1_core1") != null) {
numFound++;
}
}
@@ -834,11 +834,11 @@ public class OverseerTest extends SolrTestCaseJ4 {
for(int i=0;i<100;i++) {
Slice s = reader.getClusterState().getSlice("collection1", "s1");
- if(s!=null && s.getShards().size()==3) break;
+ if(s!=null && s.getReplicasMap().size()==3) break;
Thread.sleep(100);
}
assertNotNull(reader.getClusterState().getSlice("collection1", "s1"));
- assertEquals(3, reader.getClusterState().getSlice("collection1", "s1").getShards().size());
+ assertEquals(3, reader.getClusterState().getSlice("collection1", "s1").getReplicasMap().size());
} finally {
close(overseerClient);
close(zkClient);
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestHashPartitioner.java b/solr/core/src/test/org/apache/solr/cloud/TestHashPartitioner.java
index 5ff8d19ffab..2ed3f937ff9 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestHashPartitioner.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestHashPartitioner.java
@@ -30,7 +30,7 @@ public class TestHashPartitioner extends SolrTestCaseJ4 {
List ranges;
// make sure the partitioner uses the "natural" boundaries and doesn't suffer from an off-by-one
- ranges = hp.partitionRange(2, Integer.MIN_VALUE, Integer.MAX_VALUE);
+ ranges = hp.partitionRange(2, hp.fullRange());
assertEquals(Integer.MIN_VALUE, ranges.get(0).min);
assertEquals(0x80000000, ranges.get(0).min);
assertEquals(0xffffffff, ranges.get(0).max);
@@ -44,7 +44,7 @@ public class TestHashPartitioner extends SolrTestCaseJ4 {
assertEquals(0x7fffffff, ranges.get(1).max);
for (int i = 1; i <= 30000; i += 13) {
- ranges = hp.partitionRange(i, Integer.MIN_VALUE, Integer.MAX_VALUE);
+ ranges = hp.partitionRange(i, hp.fullRange());
assertEquals(i, ranges.size());
assertTrue("First range does not start before " + Integer.MIN_VALUE
+ " it is:" + ranges.get(0).min,
@@ -52,6 +52,14 @@ public class TestHashPartitioner extends SolrTestCaseJ4 {
assertTrue("Last range does not end after " + Integer.MAX_VALUE
+ " it is:" + ranges.get(ranges.size() - 1).max,
ranges.get(ranges.size() - 1).max >= Integer.MAX_VALUE);
+
+ for (Range range : ranges) {
+ String s = range.toString();
+ Range newRange = hp.fromString(s);
+ assertEquals(range, newRange);
+ }
+
+
}
}
diff --git a/solr/core/src/test/org/apache/solr/cloud/ZkCLITest.java b/solr/core/src/test/org/apache/solr/cloud/ZkCLITest.java
index 787eae23506..e246e451be2 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ZkCLITest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ZkCLITest.java
@@ -20,7 +20,6 @@ package org.apache.solr.cloud;
import java.io.File;
import java.util.List;
-import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.common.cloud.SolrZkClient;
import org.apache.solr.common.cloud.ZkNodeProps;
@@ -136,7 +135,7 @@ public class ZkCLITest extends SolrTestCaseJ4 {
ZkNodeProps collectionProps = ZkNodeProps.load(zkClient.getData(ZkStateReader.COLLECTIONS_ZKNODE + "/collection1", null, null, true));
assertTrue(collectionProps.containsKey("configName"));
- assertEquals(confsetname, collectionProps.get("configName"));
+ assertEquals(confsetname, collectionProps.getStr("configName"));
// test down config
File confDir = new File(TEMP_DIR,
diff --git a/solr/core/src/test/org/apache/solr/cloud/ZkControllerTest.java b/solr/core/src/test/org/apache/solr/cloud/ZkControllerTest.java
index 42e957f4fa8..f85a9acf97e 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ZkControllerTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ZkControllerTest.java
@@ -67,7 +67,7 @@ public class ZkControllerTest extends SolrTestCaseJ4 {
zkClient.makePath(ZkController.CONFIGS_ZKNODE + "/" + actualConfigName, true);
- Map props = new HashMap();
+ Map props = new HashMap();
props.put("configName", actualConfigName);
ZkNodeProps zkProps = new ZkNodeProps(props);
zkClient.makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/"
diff --git a/solr/core/src/test/org/apache/solr/cloud/ZkNodePropsTest.java b/solr/core/src/test/org/apache/solr/cloud/ZkNodePropsTest.java
index e01e9007c68..f47091acc8a 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ZkNodePropsTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ZkNodePropsTest.java
@@ -30,7 +30,7 @@ public class ZkNodePropsTest extends SolrTestCaseJ4 {
@Test
public void testBasic() throws IOException {
- Map props = new HashMap();
+ Map props = new HashMap();
props.put("prop1", "value1");
props.put("prop2", "value2");
props.put("prop3", "value3");
@@ -42,11 +42,11 @@ public class ZkNodePropsTest extends SolrTestCaseJ4 {
byte[] bytes = ZkStateReader.toJSON(zkProps);
ZkNodeProps props2 = ZkNodeProps.load(bytes);
- assertEquals("value1", props2.get("prop1"));
- assertEquals("value2", props2.get("prop2"));
- assertEquals("value3", props2.get("prop3"));
- assertEquals("value4", props2.get("prop4"));
- assertEquals("value5", props2.get("prop5"));
- assertEquals("value6", props2.get("prop6"));
+ assertEquals("value1", props2.getStr("prop1"));
+ assertEquals("value2", props2.getStr("prop2"));
+ assertEquals("value3", props2.getStr("prop3"));
+ assertEquals("value4", props2.getStr("prop4"));
+ assertEquals("value5", props2.getStr("prop5"));
+ assertEquals("value6", props2.getStr("prop6"));
}
}
diff --git a/solr/core/src/test/org/apache/solr/search/QueryEqualityTest.java b/solr/core/src/test/org/apache/solr/search/QueryEqualityTest.java
index 33e678ce921..258b75fc221 100644
--- a/solr/core/src/test/org/apache/solr/search/QueryEqualityTest.java
+++ b/solr/core/src/test/org/apache/solr/search/QueryEqualityTest.java
@@ -34,7 +34,7 @@ import org.junit.BeforeClass;
* Sanity checks that queries (generated by the QParser and ValueSourceParser
* framework) are appropraitely {@link Object#equals} and
* {@link Object#hashCode()} equivilent. If you are adding a new default
- * QParser or ValueSourceParser, you will most likely get a failure from
+ * QParser or ValueSourceParser, you will most likely get a failure from
* {@link #testParserCoverage} until you add a new test method to this class.
*
* @see ValueSourceParser#standardValueSourceParsers
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/BinaryRequestWriter.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/BinaryRequestWriter.java
index 2ada3d8344a..36570e857e6 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/BinaryRequestWriter.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/BinaryRequestWriter.java
@@ -104,7 +104,7 @@ public class BinaryRequestWriter extends RequestWriter {
}
/*
- * A hack to get access to the protected internal buffer and avoid an additional copy
+ * A hack to get access to the protected internal buffer and avoid an additional copy
*/
class BAOS extends ByteArrayOutputStream {
byte[] getbuf() {
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrServer.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrServer.java
index 79bfb1f0b71..be02a47e48e 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrServer.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrServer.java
@@ -208,7 +208,7 @@ public class CloudSolrServer extends SolrServer {
Map nodes = new HashMap();
List urlList = new ArrayList();
for (Slice slice : slices.values()) {
- for (ZkNodeProps nodeProps : slice.getShards().values()) {
+ for (ZkNodeProps nodeProps : slice.getReplicasMap().values()) {
ZkCoreNodeProps coreNodeProps = new ZkCoreNodeProps(nodeProps);
String node = coreNodeProps.getNodeName();
if (!liveNodes.contains(coreNodeProps.getNodeName())
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/response/QueryResponse.java b/solr/solrj/src/java/org/apache/solr/client/solrj/response/QueryResponse.java
index 20a312bc3d9..4c03bb1f006 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/response/QueryResponse.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/response/QueryResponse.java
@@ -443,7 +443,7 @@ public class QueryResponse extends SolrResponseBase
return _facetPivot;
}
- /** get
+ /** get
*
* @param name the name of the
* @return the FacetField by name or null if it does not exist
diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterState.java b/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterState.java
index cf23d1c6d97..8a255918cd9 100644
--- a/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterState.java
+++ b/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterState.java
@@ -92,9 +92,9 @@ public class ClusterState implements JSONWriter.Writable {
Set> slices = state.entrySet();
for (Entry sliceEntry : slices) {
Slice slice = sliceEntry.getValue();
- Map shards = slice.getShards();
- Set> shardsEntries = shards.entrySet();
- for (Entry shardEntry : shardsEntries) {
+ Map shards = slice.getReplicasMap();
+ Set> shardsEntries = shards.entrySet();
+ for (Entry shardEntry : shardsEntries) {
ZkNodeProps props = shardEntry.getValue();
if (props.containsKey(ZkStateReader.LEADER_PROP)) {
Map leadersForCollection = leaders.get(collection.getKey());
@@ -122,11 +122,11 @@ public class ClusterState implements JSONWriter.Writable {
/**
* Get shard properties or null if shard is not found.
*/
- public ZkNodeProps getShardProps(final String collection, final String coreNodeName) {
+ public Replica getShardProps(final String collection, final String coreNodeName) {
Map slices = getSlices(collection);
for(Slice slice: slices.values()) {
- if(slice.getShards().get(coreNodeName)!=null) {
- return slice.getShards().get(coreNodeName);
+ if(slice.getReplicasMap().get(coreNodeName)!=null) {
+ return slice.getReplicasMap().get(coreNodeName);
}
}
return null;
@@ -185,7 +185,7 @@ public class ClusterState implements JSONWriter.Writable {
public String getShardId(String coreNodeName) {
for (Entry> states: collectionStates.entrySet()){
for(Entry slices: states.getValue().entrySet()) {
- for(Entry shards: slices.getValue().getShards().entrySet()){
+ for(Entry shards: slices.getValue().getReplicasMap().entrySet()){
if(coreNodeName.equals(shards.getKey())) {
return slices.getKey();
}
@@ -294,10 +294,10 @@ public class ClusterState implements JSONWriter.Writable {
Map collection = (Map)stateMap.get(collectionName);
Map slices = new LinkedHashMap();
for(String sliceName: collection.keySet()) {
- Map> sliceMap = (Map>)collection.get(sliceName);
- Map shards = new LinkedHashMap();
+ Map> sliceMap = (Map>)collection.get(sliceName);
+ Map shards = new LinkedHashMap();
for(String shardName: sliceMap.keySet()) {
- shards.put(shardName, new ZkNodeProps(sliceMap.get(shardName)));
+ shards.put(shardName, new Replica(shardName, sliceMap.get(shardName)));
}
Slice slice = new Slice(sliceName, shards);
slices.put(sliceName, slice);
diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/HashPartitioner.java b/solr/solrj/src/java/org/apache/solr/common/cloud/HashPartitioner.java
index f55f1132a8a..0ed9873caa9 100644
--- a/solr/solrj/src/java/org/apache/solr/common/cloud/HashPartitioner.java
+++ b/solr/solrj/src/java/org/apache/solr/common/cloud/HashPartitioner.java
@@ -17,23 +17,26 @@ package org.apache.solr.common.cloud;
* limitations under the License.
*/
+import org.apache.noggit.JSONWriter;
+
import java.util.ArrayList;
import java.util.List;
/**
* Class to partition int range into n ranges.
- *
+ *
*/
public class HashPartitioner {
// Hash ranges can't currently "wrap" - i.e. max must be greater or equal to min.
// TODO: ranges may not be all contiguous in the future (either that or we will
// need an extra class to model a collection of ranges)
- public static class Range {
+ public static class Range implements JSONWriter.Writable {
public int min; // inclusive
public int max; // inclusive
-
+
public Range(int min, int max) {
+ assert min <= max;
this.min = min;
this.max = max;
}
@@ -46,12 +49,39 @@ public class HashPartitioner {
return Integer.toHexString(min) + '-' + Integer.toHexString(max);
}
- public static Range fromString(String range) {
- return null; // TODO
+
+ @Override
+ public int hashCode() {
+ // difficult numbers to hash... only the highest bits will tend to differ.
+ // ranges will only overlap during a split, so we can just hash the lower range.
+ return (min>>28) + (min>>25) + (min>>21) + min;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (obj.getClass() != getClass()) return false;
+ Range other = (Range)obj;
+ return this.min == other.min && this.max == other.max;
+ }
+
+ @Override
+ public void write(JSONWriter writer) {
+ writer.write(toString());
}
}
+ public Range fromString(String range) {
+ int middle = range.indexOf('-');
+ String minS = range.substring(0, middle);
+ String maxS = range.substring(middle+1);
+ long min = Long.parseLong(minS, 16); // use long to prevent the parsing routines from potentially worrying about overflow
+ long max = Long.parseLong(maxS, 16);
+ return new Range((int)min, (int)max);
+ }
+ public Range fullRange() {
+ return new Range(Integer.MIN_VALUE, Integer.MAX_VALUE);
+ }
public List partitionRange(int partitions, Range range) {
return partitionRange(partitions, range.min, range.max);
diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/Replica.java b/solr/solrj/src/java/org/apache/solr/common/cloud/Replica.java
new file mode 100644
index 00000000000..5c8bf7de78a
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/common/cloud/Replica.java
@@ -0,0 +1,38 @@
+package org.apache.solr.common.cloud;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Map;
+
+
+public class Replica extends ZkNodeProps {
+ private final String name;
+
+ public Replica(String name, Map propMap) { // TODO: back compat for handling Map
+ super(propMap);
+ this.name = name;
+ String nodeName = (String)propMap.get(ZkStateReader.NODE_NAME_PROP);
+ assert nodeName == null || name.startsWith(nodeName);
+ }
+
+ public String getName() {
+ return name;
+ }
+
+
+}
diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/Slice.java b/solr/solrj/src/java/org/apache/solr/common/cloud/Slice.java
index bb64b849db5..1487862f70f 100644
--- a/solr/solrj/src/java/org/apache/solr/common/cloud/Slice.java
+++ b/solr/solrj/src/java/org/apache/solr/common/cloud/Slice.java
@@ -19,46 +19,72 @@ package org.apache.solr.common.cloud;
import org.apache.noggit.JSONWriter;
-import java.util.Collections;
+import java.util.Collection;
import java.util.HashMap;
+import java.util.LinkedHashMap;
import java.util.Map;
/**
- * A Slice contains immutable information about all shards that share the same
- * shard id (shard leader and replicas).
+ * A Slice contains immutable information about a logical shard (all replicas that share the same shard id).
*/
-public class Slice implements JSONWriter.Writable {
- private final Map shards;
+public class Slice extends ZkNodeProps {
+ public static String REPLICAS = "replicas";
+ public static String RANGE = "range";
+ public static String LEADER = "leader";
+
private final String name;
+ private final HashPartitioner.Range range;
+ // private final Integer replicationFactor;
+ private final Map replicas;
+ private final Replica leader;
- public Slice(String name, Map shards) {
- this.shards = shards;
+ public Slice(String name, Map replicas) {
+ this(name, replicas, null);
+ }
+
+ public Slice(String name, Map replicas, Map props) {
+ super( props==null ? new LinkedHashMap(2) : new LinkedHashMap(props));
this.name = name;
- }
-
- /**
- * Get properties for all shards in this slice.
- *
- * @return map containing coreNodeName as the key, see
- * {@link ZkStateReader#getCoreNodeName(String, String)}, ZKNodeProps
- * as the value.
- */
- public Map getShards() {
- return Collections.unmodifiableMap(shards);
+ this.replicas = replicas != null ? replicas : makeReplicas((Map)propMap.get(REPLICAS));
+ propMap.put(REPLICAS, replicas);
+
+ String rangeStr = (String)propMap.get(RANGE);
+ HashPartitioner.Range tmpRange = null;
+ if (rangeStr != null) {
+ HashPartitioner hp = new HashPartitioner();
+ tmpRange = hp.fromString(rangeStr);
+ }
+
+ range = tmpRange;
+ // replicationFactor = null; // future
+ leader = findLeader();
}
- /**
- * Get a copy of the shards data this object holds.
- */
- public Map getShardsCopy() {
- Map shards = new HashMap();
- for (Map.Entry entry : this.shards.entrySet()) {
- ZkNodeProps zkProps = new ZkNodeProps(entry.getValue());
- shards.put(entry.getKey(), zkProps);
+
+ private Map makeReplicas(Map genericReplicas) {
+ if (genericReplicas == null) return new HashMap(1);
+ Map result = new LinkedHashMap(genericReplicas.size());
+ for (Map.Entry entry : genericReplicas.entrySet()) {
+ String name = entry.getKey();
+ Object val = entry.getValue();
+ Replica r;
+ if (val instanceof Replica) {
+ r = (Replica)val;
+ } else {
+ r = new Replica(name, (Map)val);
+ }
+ result.put(name, r);
}
- return shards;
+ return result;
}
-
+
+ private Replica findLeader() {
+ for (Replica replica : replicas.values()) {
+ if (replica.getStr(LEADER) != null) return replica;
+ }
+ return null;
+ }
+
/**
* Return slice name (shard id).
*/
@@ -66,13 +92,57 @@ public class Slice implements JSONWriter.Writable {
return name;
}
+ /**
+ * Gets the list of replicas for this slice.
+ */
+ public Collection getReplicas() {
+ return replicas.values();
+ }
+
+ /**
+ * Get the map of coreNodeName to replicas for this slice.
+ *
+ * @return map containing coreNodeName as the key, see
+ * {@link ZkStateReader#getCoreNodeName(String, String)}, Replica
+ * as the value.
+ */
+ public Map getReplicasMap() {
+ return replicas;
+ }
+
+ public Map getReplicasCopy() {
+ return new LinkedHashMap(replicas);
+ }
+
+ public Replica getLeader() {
+ return leader;
+ }
+
+ /***
+ // returns a copy of this slice containing the new replica
+ public Slice addReplica(Replica replica) {
+ Map newProps = new LinkedHashMap(props);
+ Map replicas = getReplicasMap();
+ Map newReplicas = replicas == null ? new HashMap(1) : new LinkedHashMap(replicas);
+// newReplicas.put(replica.getName(), replica);
+ newProps.put(REPLICAS, replicas);
+ return new Slice(name, newProps); // TODO: new constructor that takes replicas as-is w/o rebuilding
+ }
+
+ public static Slice newSlice(String name) {
+ Map props = new HashMap(1);
+ props.put("replicas", new HashMap(1));
+ return new Slice(name, props);
+ }
+ ***/
+
@Override
public String toString() {
- return "Slice [shards=" + shards + ", name=" + name + "]";
+ return "Slice [replicas=" + replicas + ", name=" + name + "]";
}
@Override
public void write(JSONWriter jsonWriter) {
- jsonWriter.write(shards);
+ jsonWriter.write(replicas);
}
}
diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkCoreNodeProps.java b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkCoreNodeProps.java
index facb216bcfb..98b99565bb3 100644
--- a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkCoreNodeProps.java
+++ b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkCoreNodeProps.java
@@ -25,27 +25,27 @@ public class ZkCoreNodeProps {
}
public String getCoreUrl() {
- return getCoreUrl(nodeProps.get(ZkStateReader.BASE_URL_PROP), nodeProps.get(ZkStateReader.CORE_NAME_PROP));
+ return getCoreUrl(nodeProps.getStr(ZkStateReader.BASE_URL_PROP), nodeProps.getStr(ZkStateReader.CORE_NAME_PROP));
}
public String getNodeName() {
- return nodeProps.get(ZkStateReader.NODE_NAME_PROP);
+ return nodeProps.getStr(ZkStateReader.NODE_NAME_PROP);
}
public String getState() {
- return nodeProps.get(ZkStateReader.STATE_PROP);
+ return nodeProps.getStr(ZkStateReader.STATE_PROP);
}
public String getBaseUrl() {
- return nodeProps.get(ZkStateReader.BASE_URL_PROP);
+ return nodeProps.getStr(ZkStateReader.BASE_URL_PROP);
}
public String getCoreName() {
- return nodeProps.get(ZkStateReader.CORE_NAME_PROP);
+ return nodeProps.getStr(ZkStateReader.CORE_NAME_PROP);
}
public static String getCoreUrl(ZkNodeProps nodeProps) {
- return getCoreUrl(nodeProps.get(ZkStateReader.BASE_URL_PROP), nodeProps.get(ZkStateReader.CORE_NAME_PROP));
+ return getCoreUrl(nodeProps.getStr(ZkStateReader.BASE_URL_PROP), nodeProps.getStr(ZkStateReader.CORE_NAME_PROP));
}
public static String getCoreUrl(String baseUrl, String coreName) {
diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkNodeProps.java b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkNodeProps.java
index a89e6e071e6..28ab500fcd0 100644
--- a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkNodeProps.java
+++ b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkNodeProps.java
@@ -17,51 +17,54 @@ package org.apache.solr.common.cloud;
* limitations under the License.
*/
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-import java.util.Map.Entry;
-
import org.apache.noggit.JSONWriter;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
/**
- * ZkNodeProps contains immutable properties for a shard/solr core.
+ * ZkNodeProps contains generic immutable properties.
*/
public class ZkNodeProps implements JSONWriter.Writable {
- private final Map propMap;
+ protected final Map propMap;
/**
* Construct ZKNodeProps from map.
*/
- public ZkNodeProps(Map propMap) {
- this.propMap = new HashMap();
- this.propMap.putAll(propMap);
+ public ZkNodeProps(Map propMap) { // TODO: back compat for handling Map
+ this.propMap = propMap;
}
-
- /**
- * Construct ZKNodeProps from information of an existingZKNodeProps.
- */
- public ZkNodeProps(ZkNodeProps zkNodeProps) {
- this.propMap = new HashMap();
- this.propMap.putAll(zkNodeProps.propMap);
- }
-
+
+
/**
* Constructor that populates the from array of Strings in form key1, value1,
* key2, value2, ..., keyN, valueN
*/
public ZkNodeProps(String... keyVals) {
- if (keyVals.length % 2 != 0) {
+ this( makeMap(keyVals) );
+ }
+
+ public static ZkNodeProps fromKeyVals(Object... keyVals) {
+ return new ZkNodeProps( makeMap(keyVals) );
+ }
+
+ public static Map makeMap(Object... keyVals) {
+ if ((keyVals.length & 0x01) != 0) {
throw new IllegalArgumentException("arguments should be key,value");
}
- propMap = new HashMap();
+ Map propMap = new HashMap(keyVals.length>>1);
for (int i = 0; i < keyVals.length; i+=2) {
- propMap.put(keyVals[i], keyVals[i+1]);
+ propMap.put(keyVals[i].toString(), keyVals[i+1]);
}
+ return propMap;
}
-
+
+
/**
* Get property keys.
*/
@@ -72,15 +75,20 @@ public class ZkNodeProps implements JSONWriter.Writable {
/**
* Get all properties as map.
*/
- public Map getProperties() {
+ public Map getProperties() {
return Collections.unmodifiableMap(propMap);
}
+ /** Returns a shallow writable copy of the properties */
+ public Map shallowCopy() {
+ return new LinkedHashMap(propMap);
+ }
+
/**
- * Create ZkNodeProps from json string that is typically stored in zookeeper.
+ * Create Replica from json string that is typically stored in zookeeper.
*/
public static ZkNodeProps load(byte[] bytes) {
- Map props = (Map) ZkStateReader.fromJSON(bytes);
+ Map props = (Map) ZkStateReader.fromJSON(bytes);
return new ZkNodeProps(props);
}
@@ -90,17 +98,22 @@ public class ZkNodeProps implements JSONWriter.Writable {
}
/**
- * Get property value.
+ * Get a string property value.
*/
- public String get(String key) {
+ public String getStr(String key) {
+ Object o = propMap.get(key);
+ return o == null ? null : o.toString();
+ }
+
+ public Object get(String key,int foo) {
return propMap.get(key);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
- Set> entries = propMap.entrySet();
- for(Entry entry : entries) {
+ Set> entries = propMap.entrySet();
+ for(Entry entry : entries) {
sb.append(entry.getKey() + "=" + entry.getValue() + "\n");
}
return sb.toString();
diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java
index 8b4e2cb6740..1f7189ab811 100644
--- a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java
+++ b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java
@@ -458,10 +458,10 @@ public class ZkStateReader {
throw new ZooKeeperException(ErrorCode.BAD_REQUEST, "Could not find shardId in zk: " + shardId);
}
- Map shardMap = replicas.getShards();
+ Map shardMap = replicas.getReplicasMap();
List nodes = new ArrayList(shardMap.size());
String filterNodeName = thisNodeName + "_" + coreName;
- for (Entry entry : shardMap.entrySet()) {
+ for (Entry entry : shardMap.entrySet()) {
ZkCoreNodeProps nodeProps = new ZkCoreNodeProps(entry.getValue());
String coreNodeName = nodeProps.getNodeName() + "_" + nodeProps.getCoreName();
if (clusterState.liveNodesContain(nodeProps.getNodeName()) && !coreNodeName.equals(filterNodeName)) {
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java
index c89ee8f4009..8b6a35b5045 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java
@@ -24,11 +24,10 @@ import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.io.FileUtils;
import org.apache.solr.BaseDistributedSearchTestCase;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.cloud.ZkTestServer;
import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkNodeProps;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.servlet.SolrDispatchFilter;
import org.apache.zookeeper.KeeperException;
@@ -130,18 +129,18 @@ public abstract class AbstractDistribZkTestBase extends BaseDistributedSearchTes
ClusterState clusterState = zkStateReader.getClusterState();
Map slices = clusterState.getSlices(collection);
for (Map.Entry entry : slices.entrySet()) {
- Map shards = entry.getValue().getShards();
- for (Map.Entry shard : shards.entrySet()) {
+ Map shards = entry.getValue().getReplicasMap();
+ for (Map.Entry shard : shards.entrySet()) {
if (verbose) System.out.println("rstate:"
- + shard.getValue().get(ZkStateReader.STATE_PROP)
+ + shard.getValue().getStr(ZkStateReader.STATE_PROP)
+ " live:"
- + clusterState.liveNodesContain(shard.getValue().get(
- ZkStateReader.NODE_NAME_PROP)));
- String state = shard.getValue().get(ZkStateReader.STATE_PROP);
+ + clusterState.liveNodesContain(shard.getValue().getStr(
+ ZkStateReader.NODE_NAME_PROP)));
+ String state = shard.getValue().getStr(ZkStateReader.STATE_PROP);
if ((state.equals(ZkStateReader.RECOVERING) || state
.equals(ZkStateReader.SYNC) || state.equals(ZkStateReader.DOWN))
- && clusterState.liveNodesContain(shard.getValue().get(
- ZkStateReader.NODE_NAME_PROP))) {
+ && clusterState.liveNodesContain(shard.getValue().getStr(
+ ZkStateReader.NODE_NAME_PROP))) {
sawLiveRecovering = true;
}
}
@@ -176,10 +175,10 @@ public abstract class AbstractDistribZkTestBase extends BaseDistributedSearchTes
throw new IllegalArgumentException("Cannot find collection:" + collection);
}
for (Map.Entry entry : slices.entrySet()) {
- Map shards = entry.getValue().getShards();
- for (Map.Entry shard : shards.entrySet()) {
+ Map shards = entry.getValue().getReplicasMap();
+ for (Map.Entry shard : shards.entrySet()) {
- String state = shard.getValue().get(ZkStateReader.STATE_PROP);
+ String state = shard.getValue().getStr(ZkStateReader.STATE_PROP);
if (!state.equals(ZkStateReader.ACTIVE)) {
fail("Not all shards are ACTIVE - found a shard that is: " + state);
}
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java
index 017f0bc003b..49b21fa3691 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java
@@ -45,6 +45,7 @@ import org.apache.solr.common.SolrDocumentList;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.ZkCoreNodeProps;
import org.apache.solr.common.cloud.ZkNodeProps;
@@ -340,7 +341,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
Map slices = this.zkStateReader.getClusterState().getSlices(defaultCollection);
int cnt = 0;
for (Map.Entry entry : slices.entrySet()) {
- cnt += entry.getValue().getShards().size();
+ cnt += entry.getValue().getReplicasMap().size();
}
return cnt;
@@ -378,8 +379,8 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
nextClient:
// we find ou state by simply matching ports...
for (Map.Entry slice : slices.entrySet()) {
- Map theShards = slice.getValue().getShards();
- for (Map.Entry shard : theShards.entrySet()) {
+ Map theShards = slice.getValue().getReplicasMap();
+ for (Map.Entry shard : theShards.entrySet()) {
int port = new URI(((HttpSolrServer) client).getBaseURL())
.getPort();
@@ -387,7 +388,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
CloudSolrServerClient csc = new CloudSolrServerClient();
csc.solrClient = client;
csc.port = port;
- csc.shardName = shard.getValue().get(ZkStateReader.NODE_NAME_PROP);
+ csc.shardName = shard.getValue().getStr(ZkStateReader.NODE_NAME_PROP);
csc.info = shard.getValue();
theClients .add(csc);
@@ -406,8 +407,8 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
nextJetty:
for (Map.Entry slice : slices.entrySet()) {
- Map theShards = slice.getValue().getShards();
- for (Map.Entry shard : theShards.entrySet()) {
+ Map theShards = slice.getValue().getReplicasMap();
+ for (Map.Entry shard : theShards.entrySet()) {
if (shard.getKey().contains(":" + port + "_")) {
List list = shardToJetty.get(slice.getKey());
if (list == null) {
@@ -419,9 +420,9 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
CloudJettyRunner cjr = new CloudJettyRunner();
cjr.jetty = jetty;
cjr.info = shard.getValue();
- cjr.nodeName = shard.getValue().get(ZkStateReader.NODE_NAME_PROP);
+ cjr.nodeName = shard.getValue().getStr(ZkStateReader.NODE_NAME_PROP);
cjr.coreNodeName = shard.getKey();
- cjr.url = shard.getValue().get(ZkStateReader.BASE_URL_PROP) + "/" + shard.getValue().get(ZkStateReader.CORE_NAME_PROP);
+ cjr.url = shard.getValue().getStr(ZkStateReader.BASE_URL_PROP) + "/" + shard.getValue().getStr(ZkStateReader.CORE_NAME_PROP);
cjr.client = findClientByPort(port, theClients);
list.add(cjr);
if (isLeader) {
@@ -442,7 +443,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
List jetties = shardToJetty.get(slice.getKey());
assertNotNull("Test setup problem: We found no jetties for shard: " + slice.getKey()
+ " just:" + shardToJetty.keySet(), jetties);
- assertEquals(slice.getValue().getShards().size(), jetties.size());
+ assertEquals(slice.getValue().getReplicasMap().size(), jetties.size());
}
}
@@ -752,7 +753,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
"The client count does not match up with the shard count for slice:"
+ shard,
zkStateReader.getClusterState().getSlice(DEFAULT_COLLECTION, shard)
- .getShards().size(), solrJetties.size());
+ .getReplicasMap().size(), solrJetties.size());
CloudJettyRunner lastJetty = null;
for (CloudJettyRunner cjetty : solrJetties) {
@@ -775,7 +776,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
}
boolean live = false;
- String nodeName = props.get(ZkStateReader.NODE_NAME_PROP);
+ String nodeName = props.getStr(ZkStateReader.NODE_NAME_PROP);
if (zkStateReader.getClusterState().liveNodesContain(nodeName)) {
live = true;
}
@@ -783,7 +784,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
if (verbose) System.err.println(" num:" + num + "\n");
- boolean active = props.get(ZkStateReader.STATE_PROP).equals(
+ boolean active = props.getStr(ZkStateReader.STATE_PROP).equals(
ZkStateReader.ACTIVE);
if (active && live) {
if (lastNum > -1 && lastNum != num && failMessage == null) {
@@ -877,7 +878,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
CloudJettyRunner cjetty = shardToJetty.get(s).get(i);
ZkNodeProps props = cjetty.info;
SolrServer client = cjetty.client.solrClient;
- boolean active = props.get(ZkStateReader.STATE_PROP).equals(
+ boolean active = props.getStr(ZkStateReader.STATE_PROP).equals(
ZkStateReader.ACTIVE);
if (active) {
SolrQuery query = new SolrQuery("*:*");
@@ -886,7 +887,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
if (verbose) System.err.println(new ZkCoreNodeProps(props)
.getCoreUrl() + " : " + results);
if (verbose) System.err.println("shard:"
- + props.get(ZkStateReader.SHARD_ID_PROP));
+ + props.getStr(ZkStateReader.SHARD_ID_PROP));
cnt += results;
break;
}
@@ -948,8 +949,8 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
for (CloudJettyRunner cjetty : cloudJettys) {
CloudSolrServerClient client = cjetty.client;
for (Map.Entry slice : slices.entrySet()) {
- Map theShards = slice.getValue().getShards();
- for (Map.Entry shard : theShards.entrySet()) {
+ Map theShards = slice.getValue().getReplicasMap();
+ for (Map.Entry shard : theShards.entrySet()) {
String shardName = new URI(
((HttpSolrServer) client.solrClient).getBaseURL()).getPort()
+ "_solr_";
@@ -961,11 +962,11 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
}
long count = 0;
- String currentState = cjetty.info.get(ZkStateReader.STATE_PROP);
+ String currentState = cjetty.info.getStr(ZkStateReader.STATE_PROP);
if (currentState != null
&& currentState.equals(ZkStateReader.ACTIVE)
&& zkStateReader.getClusterState().liveNodesContain(
- cjetty.info.get(ZkStateReader.NODE_NAME_PROP))) {
+ cjetty.info.getStr(ZkStateReader.NODE_NAME_PROP))) {
SolrQuery query = new SolrQuery("*:*");
query.set("distrib", false);
count = client.solrClient.query(query).getResults().getNumFound();
@@ -1209,7 +1210,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
CloudJettyRunner cjetty) throws InterruptedException {
int tries = 0;
while (zkStateReader.getClusterState()
- .liveNodesContain(cjetty.info.get(ZkStateReader.NODE_NAME_PROP))) {
+ .liveNodesContain(cjetty.info.getStr(ZkStateReader.NODE_NAME_PROP))) {
if (tries++ == 120) {
fail("Shard still reported as live in zk");
}
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractZkTestCase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractZkTestCase.java
index f248ad76582..1a4d5929a01 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractZkTestCase.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractZkTestCase.java
@@ -93,7 +93,7 @@ public abstract class AbstractZkTestCase extends SolrTestCaseJ4 {
zkClient = new SolrZkClient(zkAddress, AbstractZkTestCase.TIMEOUT);
- Map props = new HashMap();
+ Map props = new HashMap();
props.put("configName", "conf1");
final ZkNodeProps zkProps = new ZkNodeProps(props);
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/ChaosMonkey.java b/solr/test-framework/src/java/org/apache/solr/cloud/ChaosMonkey.java
index 94bb2b33024..ac518ad017b 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/ChaosMonkey.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/ChaosMonkey.java
@@ -259,13 +259,13 @@ public class ChaosMonkey {
Slice theShards = zkStateReader.getClusterState().getSlices(collection)
.get(slice);
- ZkNodeProps props = theShards.getShards().get(cloudJetty.coreNodeName);
+ ZkNodeProps props = theShards.getReplicasMap().get(cloudJetty.coreNodeName);
if (props == null) {
- throw new RuntimeException("shard name " + cloudJetty.coreNodeName + " not found in " + theShards.getShards().keySet());
+ throw new RuntimeException("shard name " + cloudJetty.coreNodeName + " not found in " + theShards.getReplicasMap().keySet());
}
- String state = props.get(ZkStateReader.STATE_PROP);
- String nodeName = props.get(ZkStateReader.NODE_NAME_PROP);
+ String state = props.getStr(ZkStateReader.STATE_PROP);
+ String nodeName = props.getStr(ZkStateReader.NODE_NAME_PROP);
if (!cloudJetty.jetty.isRunning()
@@ -309,7 +309,7 @@ public class ChaosMonkey {
cjetty = jetties.get(index);
ZkNodeProps leader = zkStateReader.getLeaderProps(collection, slice);
- boolean isLeader = leader.get(ZkStateReader.NODE_NAME_PROP).equals(jetties.get(index).nodeName);
+ boolean isLeader = leader.getStr(ZkStateReader.NODE_NAME_PROP).equals(jetties.get(index).nodeName);
if (!aggressivelyKillLeaders && isLeader) {
// we don't kill leaders...
monkeyLog("abort! I don't kill leaders");