SOLR-12723: Reduce object creation in HashBasedRouter.

This commit is contained in:
Andrzej Bialecki 2018-09-04 18:16:00 +02:00
parent d997e8b4a2
commit e0eb7bac02
20 changed files with 70 additions and 55 deletions

View File

@ -337,6 +337,8 @@ Optimizations
maxConnectionsPerHost as 100k (20 previously). They are now consisent with the UpdateShardHandler defaults.
(Varun Thacker)
* SOLR-12723: Reduce object creation in HashBasedRouter. (ab)
Other Changes
----------------------

View File

@ -107,7 +107,7 @@ public class AnalyticsShardRequestManager {
ClusterState clusterState = zkStateReader.getClusterState();
Set<String> liveNodes = clusterState.getLiveNodes();
Collection<Slice> slices = clusterState.getCollection(collection).getActiveSlices();
Slice[] slices = clusterState.getCollection(collection).getActiveSlicesArr();
for(Slice slice : slices) {
Collection<Replica> replicas = slice.getReplicas();

View File

@ -528,16 +528,16 @@ public class MetricsHistoryHandler extends RequestHandlerBase implements Permiss
Map<String, Number> perReg = totals
.computeIfAbsent(Group.collection, g -> new HashMap<>())
.computeIfAbsent(registry, r -> new HashMap<>());
Collection<Slice> slices = coll.getActiveSlices();
perReg.put(NUM_SHARDS_KEY, slices.size());
Slice[] slices = coll.getActiveSlicesArr();
perReg.put(NUM_SHARDS_KEY, slices.length);
DoubleAdder numActiveReplicas = new DoubleAdder();
slices.forEach(s -> {
for (Slice s : slices) {
s.forEach(r -> {
if (r.isActive(state.getLiveNodes())) {
numActiveReplicas.add(1.0);
}
});
});
}
perReg.put(NUM_REPLICAS_KEY, numActiveReplicas);
});
} catch (IOException e) {

View File

@ -289,8 +289,8 @@ public final class ManagedIndexSchema extends IndexSchema {
ClusterState clusterState = zkStateReader.getClusterState();
Set<String> liveNodes = clusterState.getLiveNodes();
final DocCollection docCollection = clusterState.getCollectionOrNull(collection);
if (docCollection != null && docCollection.getActiveSlices() != null && docCollection.getActiveSlices().size() > 0) {
final Collection<Slice> activeSlices = docCollection.getActiveSlices();
if (docCollection != null && docCollection.getActiveSlicesArr().length > 0) {
final Slice[] activeSlices = docCollection.getActiveSlicesArr();
for (Slice next : activeSlices) {
Map<String, Replica> replicasMap = next.getReplicasMap();
if (replicasMap != null) {

View File

@ -302,7 +302,7 @@ public class ScoreJoinQParserPlugin extends QParserPlugin {
String fromReplica = null;
String nodeName = zkController.getNodeName();
for (Slice slice : zkController.getClusterState().getCollection(fromIndex).getActiveSlices()) {
for (Slice slice : zkController.getClusterState().getCollection(fromIndex).getActiveSlicesArr()) {
if (fromReplica != null)
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
"SolrCloud join: multiple shards not yet supported " + fromIndex);

View File

@ -862,9 +862,9 @@ public class HttpSolrCall {
Collection<Slice> slices, boolean activeSlices) {
if (activeSlices) {
for (Map.Entry<String, DocCollection> entry : clusterState.getCollectionsMap().entrySet()) {
final Collection<Slice> activeCollectionSlices = entry.getValue().getActiveSlices();
if (activeCollectionSlices != null) {
slices.addAll(activeCollectionSlices);
final Slice[] activeCollectionSlices = entry.getValue().getActiveSlicesArr();
for (Slice s : activeCollectionSlices) {
slices.add(s);
}
}
} else {
@ -880,45 +880,48 @@ public class HttpSolrCall {
private String getRemotCoreUrl(String collectionName, String origCorename) {
ClusterState clusterState = cores.getZkController().getClusterState();
final DocCollection docCollection = clusterState.getCollectionOrNull(collectionName);
Collection<Slice> slices = (docCollection != null) ? docCollection.getActiveSlices() : null;
Slice[] slices = (docCollection != null) ? docCollection.getActiveSlicesArr() : null;
List<Slice> activeSlices = new ArrayList<>();
boolean byCoreName = false;
if (slices == null) {
slices = new ArrayList<>();
activeSlices = new ArrayList<>();
// look by core name
byCoreName = true;
getSlicesForCollections(clusterState, slices, true);
if (slices.isEmpty()) {
getSlicesForCollections(clusterState, slices, false);
getSlicesForCollections(clusterState, activeSlices, true);
if (activeSlices.isEmpty()) {
getSlicesForCollections(clusterState, activeSlices, false);
}
} else {
for (Slice s : slices) {
activeSlices.add(s);
}
}
if (slices.isEmpty()) {
if (activeSlices.isEmpty()) {
return null;
}
collectionsList.add(collectionName);
String coreUrl = getCoreUrl(collectionName, origCorename, clusterState,
slices, byCoreName, true);
activeSlices, byCoreName, true);
if (coreUrl == null) {
coreUrl = getCoreUrl(collectionName, origCorename, clusterState,
slices, byCoreName, false);
activeSlices, byCoreName, false);
}
return coreUrl;
}
private String getCoreUrl(String collectionName,
String origCorename, ClusterState clusterState, Collection<Slice> slices,
String origCorename, ClusterState clusterState, List<Slice> slices,
boolean byCoreName, boolean activeReplicas) {
String coreUrl;
Set<String> liveNodes = clusterState.getLiveNodes();
List<Slice> randomizedSlices = new ArrayList<>(slices.size());
randomizedSlices.addAll(slices);
Collections.shuffle(randomizedSlices, random);
Collections.shuffle(slices, random);
for (Slice slice : randomizedSlices) {
for (Slice slice : slices) {
List<Replica> randomizedReplicas = new ArrayList<>();
randomizedReplicas.addAll(slice.getReplicas());
Collections.shuffle(randomizedReplicas, random);

View File

@ -481,9 +481,9 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
for (Entry<String, RoutingRule> entry : routingRules.entrySet()) {
String targetCollectionName = entry.getValue().getTargetCollectionName();
final DocCollection docCollection = cstate.getCollectionOrNull(targetCollectionName);
if (docCollection != null && docCollection.getActiveSlices() != null && !docCollection.getActiveSlices().isEmpty()) {
final Collection<Slice> activeSlices = docCollection.getActiveSlices();
Slice any = activeSlices.iterator().next();
if (docCollection != null && docCollection.getActiveSlicesArr().length > 0) {
final Slice[] activeSlices = docCollection.getActiveSlicesArr();
Slice any = activeSlices[0];
if (nodes == null) nodes = new ArrayList<>();
nodes.add(new StdNode(new ZkCoreNodeProps(any.getLeader())));
}

View File

@ -20,6 +20,7 @@ import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
@ -32,6 +33,7 @@ import org.apache.solr.cloud.CloudDescriptor;
import org.apache.solr.cloud.ZkController;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.util.ExecutorUtil;
@ -470,12 +472,13 @@ public final class DocExpirationUpdateProcessorFactory
CloudDescriptor desc = core.getCoreDescriptor().getCloudDescriptor();
String col = desc.getCollectionName();
List<Slice> slices = new ArrayList<Slice>(zk.getClusterState().getCollection(col).getActiveSlices());
Collections.sort(slices, COMPARE_SLICES_BY_NAME);
if (slices.isEmpty()) {
DocCollection docCollection = zk.getClusterState().getCollection(col);
if (docCollection.getActiveSlicesArr().length == 0) {
log.error("Collection {} has no active Slices?", col);
return false;
}
List<Slice> slices = new ArrayList<>(Arrays.asList(docCollection.getActiveSlicesArr()));
Collections.sort(slices, COMPARE_SLICES_BY_NAME);
Replica firstSliceLeader = slices.get(0).getLeader();
if (null == firstSliceLeader) {
log.warn("Slice in charge of periodic deletes for {} does not currently have a leader",

View File

@ -20,7 +20,6 @@ package org.apache.solr.update.processor;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.time.Instant;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.List;
@ -386,11 +385,11 @@ public class TimeRoutedAliasUpdateProcessor extends UpdateRequestProcessor {
}
private SolrCmdDistributor.Node lookupShardLeaderOfCollection(String collection) {
final Collection<Slice> activeSlices = zkController.getClusterState().getCollection(collection).getActiveSlices();
if (activeSlices.isEmpty()) {
final Slice[] activeSlices = zkController.getClusterState().getCollection(collection).getActiveSlicesArr();
if (activeSlices.length == 0) {
throw new SolrException(SolrException.ErrorCode.SERVICE_UNAVAILABLE, "Cannot route to collection " + collection);
}
final Slice slice = activeSlices.iterator().next();
final Slice slice = activeSlices[0];
return getLeaderNode(collection, slice);
}

View File

@ -611,10 +611,8 @@ public class CloudSolrClient extends SolrClient {
private Map<String,List<String>> buildUrlMap(DocCollection col) {
Map<String, List<String>> urlMap = new HashMap<>();
Collection<Slice> slices = col.getActiveSlices();
Iterator<Slice> sliceIterator = slices.iterator();
while (sliceIterator.hasNext()) {
Slice slice = sliceIterator.next();
Slice[] slices = col.getActiveSlicesArr();
for (Slice slice : slices) {
String name = slice.getName();
List<String> urls = new ArrayList<>();
Replica leader = slice.getLeader();
@ -1262,7 +1260,7 @@ public class CloudSolrClient extends SolrClient {
NamedList routes = ((CloudSolrClient.RouteResponse)resp).getRouteResponses();
DocCollection coll = getDocCollection(collection, null);
Map<String,String> leaders = new HashMap<String,String>();
for (Slice slice : coll.getActiveSlices()) {
for (Slice slice : coll.getActiveSlicesArr()) {
Replica leader = slice.getLeader();
if (leader != null) {
ZkCoreNodeProps zkProps = new ZkCoreNodeProps(leader);

View File

@ -78,7 +78,7 @@ class StatementImpl implements Statement {
protected SolrStream constructStream(String sql) throws IOException {
try {
ZkStateReader zkStateReader = this.connection.getClient().getZkStateReader();
Collection<Slice> slices = CloudSolrStream.getSlices(this.connection.getCollection(), zkStateReader, true);
Slice[] slices = CloudSolrStream.getSlices(this.connection.getCollection(), zkStateReader, true);
List<Replica> shuffler = new ArrayList<>();
for(Slice slice : slices) {

View File

@ -18,7 +18,7 @@ package org.apache.solr.client.solrj.io.stream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
@ -325,7 +325,7 @@ public class CloudSolrStream extends TupleStream implements Expressible {
}
}
public static Collection<Slice> getSlices(String collectionName, ZkStateReader zkStateReader, boolean checkAlias) throws IOException {
public static Slice[] getSlices(String collectionName, ZkStateReader zkStateReader, boolean checkAlias) throws IOException {
ClusterState clusterState = zkStateReader.getClusterState();
Map<String, DocCollection> collectionsMap = clusterState.getCollectionsMap();
@ -341,16 +341,16 @@ public class CloudSolrStream extends TupleStream implements Expressible {
List<Slice> slices = collections.stream()
.map(collectionsMap::get)
.filter(Objects::nonNull)
.flatMap(docCol -> docCol.getActiveSlices().stream())
.flatMap(docCol -> Arrays.stream(docCol.getActiveSlicesArr()))
.collect(Collectors.toList());
if (!slices.isEmpty()) {
return slices;
return slices.toArray(new Slice[slices.size()]);
}
// Check collection case insensitive
for(String collectionMapKey : collectionsMap.keySet()) {
if(collectionMapKey.equalsIgnoreCase(collectionName)) {
return collectionsMap.get(collectionMapKey).getActiveSlices();
return collectionsMap.get(collectionMapKey).getActiveSlicesArr();
}
}

View File

@ -259,7 +259,7 @@ public class FeaturesSelectionStream extends TupleStream implements Expressible{
try {
ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader();
Collection<Slice> slices = CloudSolrStream.getSlices(this.collection, zkStateReader, false);
Slice[] slices = CloudSolrStream.getSlices(this.collection, zkStateReader, false);
ClusterState clusterState = zkStateReader.getClusterState();
Set<String> liveNodes = clusterState.getLiveNodes();

View File

@ -341,7 +341,7 @@ public class TextLogitStream extends TupleStream implements Expressible {
try {
ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader();
Collection<Slice> slices = CloudSolrStream.getSlices(this.collection, zkStateReader, false);
Slice[] slices = CloudSolrStream.getSlices(this.collection, zkStateReader, false);
ClusterState clusterState = zkStateReader.getClusterState();
Set<String> liveNodes = clusterState.getLiveNodes();

View File

@ -395,7 +395,7 @@ public class TopicStream extends CloudSolrStream implements Expressible {
this.checkpoints = new HashMap<>();
ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader();
Collection<Slice> slices = CloudSolrStream.getSlices(this.collection, zkStateReader, false);
Slice[] slices = CloudSolrStream.getSlices(this.collection, zkStateReader, false);
ClusterState clusterState = zkStateReader.getClusterState();
Set<String> liveNodes = clusterState.getLiveNodes();
@ -474,7 +474,7 @@ public class TopicStream extends CloudSolrStream implements Expressible {
private void getPersistedCheckpoints() throws IOException {
ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader();
Collection<Slice> slices = CloudSolrStream.getSlices(checkpointCollection, zkStateReader, false);
Slice[] slices = CloudSolrStream.getSlices(checkpointCollection, zkStateReader, false);
ClusterState clusterState = zkStateReader.getClusterState();
Set<String> liveNodes = clusterState.getLiveNodes();
@ -506,7 +506,7 @@ public class TopicStream extends CloudSolrStream implements Expressible {
protected void constructStreams() throws IOException {
try {
ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader();
Collection<Slice> slices = CloudSolrStream.getSlices(this.collection, zkStateReader, false);
Slice[] slices = CloudSolrStream.getSlices(this.collection, zkStateReader, false);
ModifiableSolrParams mParams = new ModifiableSolrParams(params);
mParams.set(DISTRIB, "false"); // We are the aggregator.

View File

@ -133,7 +133,7 @@ public abstract class TupleStream implements Closeable, Serializable, MapWriter
CloudSolrClient cloudSolrClient = streamContext.getSolrClientCache().getCloudSolrClient(zkHost);
ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader();
ClusterState clusterState = zkStateReader.getClusterState();
Collection<Slice> slices = CloudSolrStream.getSlices(collection, zkStateReader, true);
Slice[] slices = CloudSolrStream.getSlices(collection, zkStateReader, true);
Set<String> liveNodes = clusterState.getLiveNodes();
for(Slice slice : slices) {
Collection<Replica> replicas = slice.getReplicas();

View File

@ -217,8 +217,8 @@ public class ClusterStateUtil {
}
public static int getLiveAndActiveReplicaCount(ZkStateReader zkStateReader, String collection) {
Collection<Slice> slices;
slices = zkStateReader.getClusterState().getCollection(collection).getActiveSlices();
Slice[] slices;
slices = zkStateReader.getClusterState().getCollection(collection).getActiveSlicesArr();
int liveAndActive = 0;
for (Slice slice : slices) {
for (Replica replica : slice.getReplicas()) {

View File

@ -90,7 +90,7 @@ public class CompositeIdRouter extends HashBasedRouter {
Range completeRange = new KeyParser(id).getRange();
List<Slice> targetSlices = new ArrayList<>(1);
for (Slice slice : collection.getActiveSlices()) {
for (Slice slice : collection.getActiveSlicesArr()) {
Range range = slice.getRange();
if (range != null && range.overlaps(completeRange)) {
targetSlices.add(slice);

View File

@ -58,6 +58,7 @@ public class DocCollection extends ZkNodeProps implements Iterable<Slice> {
private final String name;
private final Map<String, Slice> slices;
private final Map<String, Slice> activeSlices;
private final Slice[] activeSlicesArr;
private final Map<String, List<Replica>> nodeNameReplicas;
private final Map<String, List<Replica>> nodeNameLeaderReplicas;
private final DocRouter router;
@ -112,6 +113,7 @@ public class DocCollection extends ZkNodeProps implements Iterable<Slice> {
addNodeNameReplica(replica);
}
}
this.activeSlicesArr = activeSlices.values().toArray(new Slice[activeSlices.size()]);
this.router = router;
this.znode = znode == null? ZkStateReader.CLUSTER_STATE : znode;
assert name != null && slices != null;
@ -197,6 +199,13 @@ public class DocCollection extends ZkNodeProps implements Iterable<Slice> {
return activeSlices.values();
}
/**
* Return array of active slices for this collection (performance optimization).
*/
public Slice[] getActiveSlicesArr() {
return activeSlicesArr;
}
/**
* Get the map of all slices (sliceName-&gt;Slice) for this collection.
*/

View File

@ -59,7 +59,8 @@ public abstract class HashBasedRouter extends DocRouter {
}
protected Slice hashToSlice(int hash, DocCollection collection) {
for (Slice slice : collection.getActiveSlices()) {
final Slice[] slices = collection.getActiveSlicesArr();
for (Slice slice : slices) {
Range range = slice.getRange();
if (range != null && range.includes(hash)) return slice;
}