mirror of https://github.com/apache/lucene.git
Merge branch 'apache-https-master' into jira/solr-8593
This commit is contained in:
commit
3c62f81b79
|
@ -25,7 +25,6 @@ import org.apache.lucene.index.LeafReaderContext;
|
|||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.LongBitSet;
|
||||
|
||||
/**
|
||||
|
@ -74,9 +73,9 @@ public final class DocValuesRewriteMethod extends MultiTermQuery.RewriteMethod {
|
|||
|
||||
@Override
|
||||
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
|
||||
return new RandomAccessWeight(this, boost) {
|
||||
return new ConstantScoreWeight(this, boost) {
|
||||
@Override
|
||||
protected Bits getMatchingDocs(LeafReaderContext context) throws IOException {
|
||||
public Scorer scorer(LeafReaderContext context) throws IOException {
|
||||
final SortedSetDocValues fcsi = DocValues.getSortedSet(context.reader(), query.field);
|
||||
TermsEnum termsEnum = query.getTermsEnum(new Terms() {
|
||||
|
||||
|
@ -141,33 +140,23 @@ public final class DocValuesRewriteMethod extends MultiTermQuery.RewriteMethod {
|
|||
}
|
||||
} while (termsEnum.next() != null);
|
||||
|
||||
return new Bits() {
|
||||
return new ConstantScoreScorer(this, score(), new TwoPhaseIterator(fcsi) {
|
||||
|
||||
@Override
|
||||
public boolean get(int doc) {
|
||||
try {
|
||||
if (doc > fcsi.docID()) {
|
||||
fcsi.advance(doc);
|
||||
}
|
||||
if (doc == fcsi.docID()) {
|
||||
public boolean matches() throws IOException {
|
||||
for (long ord = fcsi.nextOrd(); ord != SortedSetDocValues.NO_MORE_ORDS; ord = fcsi.nextOrd()) {
|
||||
if (termSet.get(ord)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
} catch (IOException ioe) {
|
||||
throw new RuntimeException(ioe);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int length() {
|
||||
return context.reader().maxDoc();
|
||||
public float matchCost() {
|
||||
return 3; // lookup in a bitset
|
||||
}
|
||||
|
||||
};
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,76 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.search;
|
||||
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.Bits.MatchNoBits;
|
||||
|
||||
/**
|
||||
* Base class to build {@link Weight}s that are based on random-access
|
||||
* structures such as live docs or doc values. Such weights return a
|
||||
* {@link Scorer} which consists of an approximation that matches
|
||||
* everything, and a confirmation phase that first checks live docs and
|
||||
* then the {@link Bits} returned by {@link #getMatchingDocs(LeafReaderContext)}.
|
||||
* @lucene.internal
|
||||
*/
|
||||
public abstract class RandomAccessWeight extends ConstantScoreWeight {
|
||||
|
||||
/** Sole constructor. */
|
||||
protected RandomAccessWeight(Query query, float boost) {
|
||||
super(query, boost);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a {@link Bits} instance representing documents that match this
|
||||
* weight on the given context. A return value of {@code null} indicates
|
||||
* that no documents matched.
|
||||
* Note: it is not needed to care about live docs as they will be checked
|
||||
* before the returned bits.
|
||||
*/
|
||||
protected abstract Bits getMatchingDocs(LeafReaderContext context) throws IOException;
|
||||
|
||||
@Override
|
||||
public final Scorer scorer(LeafReaderContext context) throws IOException {
|
||||
final Bits matchingDocs = getMatchingDocs(context);
|
||||
if (matchingDocs == null || matchingDocs instanceof MatchNoBits) {
|
||||
return null;
|
||||
}
|
||||
final DocIdSetIterator approximation = DocIdSetIterator.all(context.reader().maxDoc());
|
||||
final TwoPhaseIterator twoPhase = new TwoPhaseIterator(approximation) {
|
||||
|
||||
@Override
|
||||
public boolean matches() throws IOException {
|
||||
final int doc = approximation.docID();
|
||||
|
||||
return matchingDocs.get(doc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public float matchCost() {
|
||||
return 10; // TODO: use some cost of matchingDocs
|
||||
}
|
||||
};
|
||||
|
||||
return new ConstantScoreScorer(this, score(), twoPhase);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -46,19 +46,22 @@ import org.apache.lucene.index.Term;
|
|||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanClause.Occur;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.ConstantScoreScorer;
|
||||
import org.apache.lucene.search.ConstantScoreWeight;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.RandomAccessWeight;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.search.SimpleCollector;
|
||||
import org.apache.lucene.search.Sort;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.apache.lucene.search.TwoPhaseIterator;
|
||||
import org.apache.lucene.search.Weight;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.InPlaceMergeSorter;
|
||||
|
@ -651,29 +654,28 @@ public class TestDrillSideways extends FacetTestCase {
|
|||
|
||||
@Override
|
||||
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
|
||||
return new RandomAccessWeight(this, boost) {
|
||||
@Override
|
||||
protected Bits getMatchingDocs(final LeafReaderContext context) throws IOException {
|
||||
return new Bits() {
|
||||
return new ConstantScoreWeight(this, boost) {
|
||||
|
||||
@Override
|
||||
public boolean get(int docID) {
|
||||
try {
|
||||
public Scorer scorer(LeafReaderContext context) throws IOException {
|
||||
DocIdSetIterator approximation = DocIdSetIterator.all(context.reader().maxDoc());
|
||||
return new ConstantScoreScorer(this, score(), new TwoPhaseIterator(approximation) {
|
||||
|
||||
@Override
|
||||
public boolean matches() throws IOException {
|
||||
int docID = approximation.docID();
|
||||
return (Integer.parseInt(context.reader().document(docID).get("id")) & 1) == 0;
|
||||
} catch (NumberFormatException | IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int length() {
|
||||
return context.reader().maxDoc();
|
||||
public float matchCost() {
|
||||
return 1000f;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString(String field) {
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.apache.lucene.document.SortedNumericDocValuesField;
|
|||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.SortedNumericDocValues;
|
||||
import org.apache.lucene.util.Bits;
|
||||
|
||||
/**
|
||||
* Like {@link DocValuesTermsQuery}, but this query only
|
||||
|
@ -96,38 +95,29 @@ public class DocValuesNumbersQuery extends Query {
|
|||
|
||||
@Override
|
||||
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
|
||||
return new RandomAccessWeight(this, boost) {
|
||||
return new ConstantScoreWeight(this, boost) {
|
||||
|
||||
@Override
|
||||
protected Bits getMatchingDocs(LeafReaderContext context) throws IOException {
|
||||
public Scorer scorer(LeafReaderContext context) throws IOException {
|
||||
final SortedNumericDocValues values = DocValues.getSortedNumeric(context.reader(), field);
|
||||
return new Bits() {
|
||||
return new ConstantScoreScorer(this, score(), new TwoPhaseIterator(values) {
|
||||
|
||||
@Override
|
||||
public boolean get(int doc) {
|
||||
try {
|
||||
if (doc > values.docID()) {
|
||||
values.advance(doc);
|
||||
}
|
||||
if (doc == values.docID()) {
|
||||
public boolean matches() throws IOException {
|
||||
int count = values.docValueCount();
|
||||
for(int i=0;i<count;i++) {
|
||||
if (numbers.contains(values.nextValue())) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (IOException ioe) {
|
||||
throw new RuntimeException(ioe);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int length() {
|
||||
return context.reader().maxDoc();
|
||||
public float matchCost() {
|
||||
return 5; // lookup in the set
|
||||
}
|
||||
};
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
|
|
|
@ -25,7 +25,6 @@ import org.apache.lucene.index.IndexReader;
|
|||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.SortedNumericDocValues;
|
||||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
||||
/**
|
||||
|
@ -143,10 +142,19 @@ public final class DocValuesRangeQuery extends Query {
|
|||
if (lowerVal == null && upperVal == null) {
|
||||
throw new IllegalStateException("Both min and max values must not be null, call rewrite first");
|
||||
}
|
||||
return new RandomAccessWeight(DocValuesRangeQuery.this, boost) {
|
||||
|
||||
return new ConstantScoreWeight(DocValuesRangeQuery.this, boost) {
|
||||
|
||||
@Override
|
||||
protected Bits getMatchingDocs(LeafReaderContext context) throws IOException {
|
||||
public Scorer scorer(LeafReaderContext context) throws IOException {
|
||||
final TwoPhaseIterator iterator = createTwoPhaseIterator(context);
|
||||
if (iterator == null) {
|
||||
return null;
|
||||
}
|
||||
return new ConstantScoreScorer(this, score(), iterator);
|
||||
}
|
||||
|
||||
private TwoPhaseIterator createTwoPhaseIterator(LeafReaderContext context) throws IOException {
|
||||
if (lowerVal instanceof Long || upperVal instanceof Long) {
|
||||
|
||||
final SortedNumericDocValues values = DocValues.getSortedNumeric(context.reader(), field);
|
||||
|
@ -179,32 +187,24 @@ public final class DocValuesRangeQuery extends Query {
|
|||
return null;
|
||||
}
|
||||
|
||||
return new Bits() {
|
||||
return new TwoPhaseIterator(values) {
|
||||
|
||||
@Override
|
||||
public boolean get(int doc) {
|
||||
try {
|
||||
if (doc > values.docID()) {
|
||||
values.advance(doc);
|
||||
}
|
||||
if (doc == values.docID()) {
|
||||
public boolean matches() throws IOException {
|
||||
final int count = values.docValueCount();
|
||||
assert count > 0;
|
||||
for (int i = 0; i < count; ++i) {
|
||||
final long value = values.nextValue();
|
||||
if (value >= min && value <= max) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (IOException ioe) {
|
||||
throw new RuntimeException(ioe);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int length() {
|
||||
return context.reader().maxDoc();
|
||||
public float matchCost() {
|
||||
return 2; // 2 comparisons
|
||||
}
|
||||
|
||||
};
|
||||
|
@ -245,32 +245,22 @@ public final class DocValuesRangeQuery extends Query {
|
|||
return null;
|
||||
}
|
||||
|
||||
return new Bits() {
|
||||
return new TwoPhaseIterator(values) {
|
||||
|
||||
@Override
|
||||
public boolean get(int doc) {
|
||||
try {
|
||||
if (doc > values.docID()) {
|
||||
values.advance(doc);
|
||||
}
|
||||
if (doc == values.docID()) {
|
||||
public boolean matches() throws IOException {
|
||||
for (long ord = values.nextOrd(); ord != SortedSetDocValues.NO_MORE_ORDS; ord = values.nextOrd()) {
|
||||
if (ord >= minOrd && ord <= maxOrd) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (IOException ioe) {
|
||||
throw new RuntimeException(ioe);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int length() {
|
||||
return context.reader().maxDoc();
|
||||
public float matchCost() {
|
||||
return 2; // 2 comparisons
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
} else {
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.apache.lucene.index.IndexReader;
|
|||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.apache.lucene.util.ArrayUtil;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.FixedBitSet;
|
||||
import org.apache.lucene.util.LongBitSet;
|
||||
|
@ -149,45 +148,41 @@ public class DocValuesTermsQuery extends Query {
|
|||
|
||||
@Override
|
||||
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
|
||||
return new RandomAccessWeight(this, boost) {
|
||||
return new ConstantScoreWeight(this, boost) {
|
||||
|
||||
@Override
|
||||
protected Bits getMatchingDocs(LeafReaderContext context) throws IOException {
|
||||
public Scorer scorer(LeafReaderContext context) throws IOException {
|
||||
final SortedSetDocValues values = DocValues.getSortedSet(context.reader(), field);
|
||||
final LongBitSet bits = new LongBitSet(values.getValueCount());
|
||||
boolean matchesAtLeastOneTerm = false;
|
||||
for (BytesRef term : terms) {
|
||||
final long ord = values.lookupTerm(term);
|
||||
if (ord >= 0) {
|
||||
matchesAtLeastOneTerm = true;
|
||||
bits.set(ord);
|
||||
}
|
||||
}
|
||||
return new Bits() {
|
||||
if (matchesAtLeastOneTerm == false) {
|
||||
return null;
|
||||
}
|
||||
return new ConstantScoreScorer(this, score(), new TwoPhaseIterator(values) {
|
||||
|
||||
@Override
|
||||
public boolean get(int doc) {
|
||||
try {
|
||||
if (doc > values.docID()) {
|
||||
values.advance(doc);
|
||||
}
|
||||
if (doc == values.docID()) {
|
||||
public boolean matches() throws IOException {
|
||||
for (long ord = values.nextOrd(); ord != SortedSetDocValues.NO_MORE_ORDS; ord = values.nextOrd()) {
|
||||
if (bits.get(ord)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (IOException ioe) {
|
||||
throw new RuntimeException(ioe);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int length() {
|
||||
return context.reader().maxDoc();
|
||||
public float matchCost() {
|
||||
return 3; // lookup in a bitset
|
||||
}
|
||||
|
||||
};
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
|
|
|
@ -30,17 +30,19 @@ import org.apache.lucene.index.BinaryDocValues;
|
|||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.queries.function.FunctionValues;
|
||||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.search.ConstantScoreScorer;
|
||||
import org.apache.lucene.search.ConstantScoreWeight;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.Explanation;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.RandomAccessWeight;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.search.TwoPhaseIterator;
|
||||
import org.apache.lucene.search.Weight;
|
||||
import org.apache.lucene.spatial.SpatialStrategy;
|
||||
import org.apache.lucene.spatial.query.SpatialArgs;
|
||||
import org.apache.lucene.spatial.util.DistanceToShapeValueSource;
|
||||
import org.apache.lucene.spatial.util.ShapePredicateValueSource;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.BytesRefBuilder;
|
||||
import org.locationtech.spatial4j.context.SpatialContext;
|
||||
|
@ -136,25 +138,25 @@ public class SerializedDVStrategy extends SpatialStrategy {
|
|||
|
||||
@Override
|
||||
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
|
||||
return new RandomAccessWeight(this, boost) {
|
||||
return new ConstantScoreWeight(this, boost) {
|
||||
@Override
|
||||
protected Bits getMatchingDocs(LeafReaderContext context) throws IOException {
|
||||
public Scorer scorer(LeafReaderContext context) throws IOException {
|
||||
DocIdSetIterator approximation = DocIdSetIterator.all(context.reader().maxDoc());
|
||||
final FunctionValues predFuncValues = predicateValueSource.getValues(null, context);
|
||||
return new Bits() {
|
||||
return new ConstantScoreScorer(this, score(), new TwoPhaseIterator(approximation) {
|
||||
|
||||
@Override
|
||||
public boolean get(int index) {
|
||||
try {
|
||||
return predFuncValues.boolVal(index);
|
||||
} catch (IOException ioe) {
|
||||
throw new RuntimeException(ioe);
|
||||
}
|
||||
public boolean matches() throws IOException {
|
||||
final int docID = approximation.docID();
|
||||
return predFuncValues.boolVal(docID);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int length() {
|
||||
return context.reader().maxDoc();
|
||||
public float matchCost() {
|
||||
// TODO: what is the cost of the predicateValueSource
|
||||
return 100f;
|
||||
}
|
||||
};
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
|
|
|
@ -113,6 +113,9 @@ New Features
|
|||
|
||||
* SOLR-9038: Add a command-line tool to manage the snapshots functionality (Hrishikesh Gadre via yonik)
|
||||
|
||||
* SOLR-9633: Limit memory consumed by FastLRUCache with a new 'maxRamMB' config parameter.
|
||||
(yonik, Michael Sun, shalin)
|
||||
|
||||
Optimizations
|
||||
----------------------
|
||||
* SOLR-9704: Facet Module / JSON Facet API: Optimize blockChildren facets that have
|
||||
|
@ -147,6 +150,14 @@ Bug Fixes
|
|||
after the first expanded value in the same multi-valued parameter to be dropped.
|
||||
(Erik Hatcher, yonik)
|
||||
|
||||
* SOLR-9751: PreAnalyzedField can cause managed schema corruption. (Steve Rowe)
|
||||
|
||||
* SOLR-9736: Solr resolves the collection name against the first available leader or first replica
|
||||
of the first slice. This puts undue pressure on leader cores and likely on the wrong ones. This is
|
||||
fixed to randomly pick a leader on updates or a replica core otherwise. (Cao Manh Dat via shalin)
|
||||
|
||||
* SOLR-9284: The HDFS BlockDirectoryCache should not let it's keysToRelease or names maps grow indefinitely.
|
||||
(Mark Miller, Michael Sun)
|
||||
|
||||
Other Changes
|
||||
----------------------
|
||||
|
|
|
@ -864,6 +864,11 @@ public abstract class FieldType extends FieldProperties {
|
|||
namedPropertyValues.add(SIMILARITY, getSimilarityFactory().getNamedPropertyValues());
|
||||
}
|
||||
|
||||
if (this instanceof HasImplicitIndexAnalyzer) {
|
||||
if (isExplicitQueryAnalyzer()) {
|
||||
namedPropertyValues.add(QUERY_ANALYZER, getAnalyzerProperties(getQueryAnalyzer()));
|
||||
}
|
||||
} else {
|
||||
if (isExplicitAnalyzer()) {
|
||||
String analyzerProperty = isExplicitQueryAnalyzer() ? INDEX_ANALYZER : ANALYZER;
|
||||
namedPropertyValues.add(analyzerProperty, getAnalyzerProperties(getIndexAnalyzer()));
|
||||
|
@ -872,6 +877,7 @@ public abstract class FieldType extends FieldProperties {
|
|||
String analyzerProperty = isExplicitAnalyzer() ? QUERY_ANALYZER : ANALYZER;
|
||||
namedPropertyValues.add(analyzerProperty, getAnalyzerProperties(getQueryAnalyzer()));
|
||||
}
|
||||
}
|
||||
if (this instanceof TextField) {
|
||||
if (((TextField)this).isExplicitMultiTermAnalyzer()) {
|
||||
namedPropertyValues.add(MULTI_TERM_ANALYZER, getAnalyzerProperties(((TextField) this).getMultiTermAnalyzer()));
|
||||
|
|
|
@ -111,6 +111,20 @@ public final class FieldTypePluginLoader
|
|||
ft.setSimilarity(simFactory);
|
||||
}
|
||||
|
||||
if (ft instanceof HasImplicitIndexAnalyzer) {
|
||||
ft.setIsExplicitAnalyzer(false);
|
||||
if (null != queryAnalyzer && null != analyzer) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("Ignoring index-time analyzer for field: " + name);
|
||||
}
|
||||
} else if (null == queryAnalyzer) { // Accept non-query-time analyzer as a query-time analyzer
|
||||
queryAnalyzer = analyzer;
|
||||
}
|
||||
if (null != queryAnalyzer) {
|
||||
ft.setIsExplicitQueryAnalyzer(true);
|
||||
ft.setQueryAnalyzer(queryAnalyzer);
|
||||
}
|
||||
} else {
|
||||
if (null == queryAnalyzer) {
|
||||
queryAnalyzer = analyzer;
|
||||
ft.setIsExplicitQueryAnalyzer(false);
|
||||
|
@ -137,6 +151,7 @@ public final class FieldTypePluginLoader
|
|||
((TextField)ft).setMultiTermAnalyzer(multiAnalyzer);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (ft instanceof SchemaAware){
|
||||
schemaAware.add((SchemaAware) ft);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,25 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.solr.schema;
|
||||
|
||||
/**
|
||||
* Marker interface for field types that have an implicit (non-user-configurable)
|
||||
* index-time schema.
|
||||
*/
|
||||
public interface HasImplicitIndexAnalyzer {
|
||||
}
|
|
@ -50,7 +50,7 @@ import static org.apache.solr.common.params.CommonParams.JSON;
|
|||
* Pre-analyzed field type provides a way to index a serialized token stream,
|
||||
* optionally with an independent stored value of a field.
|
||||
*/
|
||||
public class PreAnalyzedField extends TextField {
|
||||
public class PreAnalyzedField extends TextField implements HasImplicitIndexAnalyzer {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
/** Init argument name. Value is a fully-qualified class name of the parser
|
||||
|
|
|
@ -43,7 +43,7 @@ import java.util.concurrent.TimeUnit;
|
|||
* @see org.apache.solr.search.SolrCache
|
||||
* @since solr 1.4
|
||||
*/
|
||||
public class FastLRUCache<K,V> extends SolrCacheBase implements SolrCache<K,V> {
|
||||
public class FastLRUCache<K, V> extends SolrCacheBase implements SolrCache<K,V> {
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
// contains the statistics objects for all open caches of the same type
|
||||
|
@ -55,6 +55,8 @@ public class FastLRUCache<K,V> extends SolrCacheBase implements SolrCache<K,V> {
|
|||
private ConcurrentLRUCache<K,V> cache;
|
||||
private int showItems = 0;
|
||||
|
||||
private long maxRamBytes;
|
||||
|
||||
@Override
|
||||
public Object init(Map args, Object persistence, CacheRegenerator regenerator) {
|
||||
super.init(args, regenerator);
|
||||
|
@ -87,8 +89,18 @@ public class FastLRUCache<K,V> extends SolrCacheBase implements SolrCache<K,V> {
|
|||
|
||||
str = (String) args.get("showItems");
|
||||
showItems = str == null ? 0 : Integer.parseInt(str);
|
||||
|
||||
str = (String) args.get("maxRamMB");
|
||||
this.maxRamBytes = str == null ? Long.MAX_VALUE : (long) (Double.parseDouble(str) * 1024L * 1024L);
|
||||
if (maxRamBytes != Long.MAX_VALUE) {
|
||||
int ramLowerWatermark = (int) (maxRamBytes * 0.8);
|
||||
description = generateDescription(maxRamBytes, ramLowerWatermark, newThread);
|
||||
cache = new ConcurrentLRUCache<K, V>(ramLowerWatermark, maxRamBytes, newThread, null);
|
||||
} else {
|
||||
description = generateDescription(limit, initialSize, minLimit, acceptableLimit, newThread);
|
||||
cache = new ConcurrentLRUCache<>(limit, minLimit, acceptableLimit, initialSize, newThread, false, null);
|
||||
}
|
||||
|
||||
cache.setAlive(false);
|
||||
|
||||
statsList = (List<ConcurrentLRUCache.Stats>) persistence;
|
||||
|
@ -118,6 +130,16 @@ public class FastLRUCache<K,V> extends SolrCacheBase implements SolrCache<K,V> {
|
|||
return description;
|
||||
}
|
||||
|
||||
protected String generateDescription(long maxRamBytes, long ramLowerWatermark, boolean newThread) {
|
||||
String description = "Concurrent LRU Cache(ramMinSize=" + ramLowerWatermark + ", ramMaxSize" + maxRamBytes
|
||||
+ ", cleanupThread=" + newThread;
|
||||
if (isAutowarmingOn()) {
|
||||
description += ", " + getAutowarmDescription();
|
||||
}
|
||||
description += ')';
|
||||
return description;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size() {
|
||||
return cache.size();
|
||||
|
|
|
@ -46,9 +46,9 @@ public class LRUCache<K,V> extends SolrCacheBase implements SolrCache<K,V>, Acco
|
|||
/// Copied from Lucene's LRUQueryCache
|
||||
|
||||
// memory usage of a simple term query
|
||||
static final long DEFAULT_RAM_BYTES_USED = 192;
|
||||
public static final long DEFAULT_RAM_BYTES_USED = 192;
|
||||
|
||||
static final long HASHTABLE_RAM_BYTES_PER_ENTRY =
|
||||
public static final long HASHTABLE_RAM_BYTES_PER_ENTRY =
|
||||
2 * RamUsageEstimator.NUM_BYTES_OBJECT_REF // key + value
|
||||
* 2; // hash tables need to be oversized to avoid collisions, assume 2x capacity
|
||||
|
||||
|
|
|
@ -191,7 +191,7 @@ public class HttpSolrCall {
|
|||
return queryParams;
|
||||
}
|
||||
|
||||
private void init() throws Exception {
|
||||
void init() throws Exception {
|
||||
//The states of client that is invalid in this request
|
||||
Aliases aliases = null;
|
||||
String corename = "";
|
||||
|
@ -271,7 +271,11 @@ public class HttpSolrCall {
|
|||
|
||||
if (core == null && cores.isZooKeeperAware()) {
|
||||
// we couldn't find the core - lets make sure a collection was not specified instead
|
||||
core = getCoreByCollection(corename);
|
||||
boolean isPreferLeader = false;
|
||||
if (path.endsWith("/update") || path.contains("/update/")) {
|
||||
isPreferLeader = true;
|
||||
}
|
||||
core = getCoreByCollection(corename, isPreferLeader);
|
||||
if (core != null) {
|
||||
// we found a core, update the path
|
||||
path = path.substring(idx);
|
||||
|
@ -753,7 +757,7 @@ public class HttpSolrCall {
|
|||
return result;
|
||||
}
|
||||
|
||||
private SolrCore getCoreByCollection(String collectionName) {
|
||||
private SolrCore getCoreByCollection(String collectionName, boolean isPreferLeader) {
|
||||
ZkStateReader zkStateReader = cores.getZkController().getZkStateReader();
|
||||
|
||||
ClusterState clusterState = zkStateReader.getClusterState();
|
||||
|
@ -761,37 +765,27 @@ public class HttpSolrCall {
|
|||
if (collection == null) {
|
||||
return null;
|
||||
}
|
||||
Map<String, Slice> slices = collection.getActiveSlicesMap();
|
||||
if (slices == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
Set<String> liveNodes = clusterState.getLiveNodes();
|
||||
// look for a core on this node
|
||||
Set<Map.Entry<String, Slice>> entries = slices.entrySet();
|
||||
SolrCore core = null;
|
||||
|
||||
//Hitting the leaders is useful when it's an update request.
|
||||
//For queries it doesn't matter and hence we don't distinguish here.
|
||||
for (Map.Entry<String, Slice> entry : entries) {
|
||||
// first see if we have the leader
|
||||
Replica leaderProps = collection.getLeader(entry.getKey());
|
||||
if (leaderProps != null && liveNodes.contains(leaderProps.getNodeName()) && leaderProps.getState() == Replica.State.ACTIVE) {
|
||||
core = checkProps(leaderProps);
|
||||
if (core != null) {
|
||||
return core;
|
||||
}
|
||||
if (isPreferLeader) {
|
||||
List<Replica> leaderReplicas = collection.getLeaderReplicas(cores.getZkController().getNodeName());
|
||||
SolrCore core = randomlyGetSolrCore(liveNodes, leaderReplicas);
|
||||
if (core != null) return core;
|
||||
}
|
||||
|
||||
// check everyone then
|
||||
Map<String, Replica> shards = entry.getValue().getReplicasMap();
|
||||
Set<Map.Entry<String, Replica>> shardEntries = shards.entrySet();
|
||||
for (Map.Entry<String, Replica> shardEntry : shardEntries) {
|
||||
Replica zkProps = shardEntry.getValue();
|
||||
if (liveNodes.contains(zkProps.getNodeName()) && zkProps.getState() == Replica.State.ACTIVE) {
|
||||
core = checkProps(zkProps);
|
||||
if (core != null) {
|
||||
return core;
|
||||
List<Replica> replicas = collection.getReplicas(cores.getZkController().getNodeName());
|
||||
return randomlyGetSolrCore(liveNodes, replicas);
|
||||
}
|
||||
|
||||
private SolrCore randomlyGetSolrCore(Set<String> liveNodes, List<Replica> replicas) {
|
||||
if (replicas != null) {
|
||||
RandomIterator<Replica> it = new RandomIterator<>(random, replicas);
|
||||
while (it.hasNext()) {
|
||||
Replica replica = it.next();
|
||||
if (liveNodes.contains(replica.getNodeName()) && replica.getState() == Replica.State.ACTIVE) {
|
||||
SolrCore core = checkProps(replica);
|
||||
if (core != null) return core;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1027,4 +1021,35 @@ public class HttpSolrCall {
|
|||
static final String CONNECTION_HEADER = "Connection";
|
||||
static final String TRANSFER_ENCODING_HEADER = "Transfer-Encoding";
|
||||
static final String CONTENT_LENGTH_HEADER = "Content-Length";
|
||||
|
||||
/**
|
||||
* A faster method for randomly picking items when you do not need to
|
||||
* consume all items.
|
||||
*/
|
||||
private static class RandomIterator<E> implements Iterator<E> {
|
||||
private Random rand;
|
||||
private ArrayList<E> elements;
|
||||
private int size;
|
||||
|
||||
public RandomIterator(Random rand, Collection<E> elements) {
|
||||
this.rand = rand;
|
||||
this.elements = new ArrayList<>(elements);
|
||||
this.size = elements.size();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return size > 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public E next() {
|
||||
int idx = rand.nextInt(size);
|
||||
E e1 = elements.get(idx);
|
||||
E e2 = elements.get(size-1);
|
||||
elements.set(idx,e2);
|
||||
size--;
|
||||
return e1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,6 +38,11 @@ public class BlockCache {
|
|||
private final int numberOfBlocksPerBank;
|
||||
private final int maxEntries;
|
||||
private final Metrics metrics;
|
||||
private volatile OnRelease onRelease;
|
||||
|
||||
public static interface OnRelease {
|
||||
public void release(BlockCacheKey blockCacheKey);
|
||||
}
|
||||
|
||||
public BlockCache(Metrics metrics, boolean directAllocation, long totalMemory) {
|
||||
this(metrics, directAllocation, totalMemory, _128M);
|
||||
|
@ -69,7 +74,7 @@ public class BlockCache {
|
|||
}
|
||||
|
||||
RemovalListener<BlockCacheKey,BlockCacheLocation> listener =
|
||||
notification -> releaseLocation(notification.getValue());
|
||||
notification -> releaseLocation(notification.getKey(), notification.getValue());
|
||||
cache = Caffeine.newBuilder()
|
||||
.removalListener(listener)
|
||||
.maximumSize(maxEntries)
|
||||
|
@ -81,7 +86,7 @@ public class BlockCache {
|
|||
cache.invalidate(key);
|
||||
}
|
||||
|
||||
private void releaseLocation(BlockCacheLocation location) {
|
||||
private void releaseLocation(BlockCacheKey blockCacheKey, BlockCacheLocation location) {
|
||||
if (location == null) {
|
||||
return;
|
||||
}
|
||||
|
@ -90,6 +95,9 @@ public class BlockCache {
|
|||
location.setRemoved(true);
|
||||
locks[bankId].clear(block);
|
||||
lockCounters[bankId].decrementAndGet();
|
||||
if (onRelease != null) {
|
||||
onRelease.release(blockCacheKey);
|
||||
}
|
||||
metrics.blockCacheEviction.incrementAndGet();
|
||||
metrics.blockCacheSize.decrementAndGet();
|
||||
}
|
||||
|
@ -200,4 +208,8 @@ public class BlockCache {
|
|||
public int getSize() {
|
||||
return cache.asMap().size();
|
||||
}
|
||||
|
||||
void setOnRelease(OnRelease onRelease) {
|
||||
this.onRelease = onRelease;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,18 +17,22 @@
|
|||
package org.apache.solr.store.blockcache;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import org.apache.solr.store.blockcache.BlockCache.OnRelease;
|
||||
|
||||
import com.github.benmanes.caffeine.cache.Caffeine;
|
||||
|
||||
|
||||
/**
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public class BlockDirectoryCache implements Cache {
|
||||
private final BlockCache blockCache;
|
||||
private final AtomicInteger counter = new AtomicInteger();
|
||||
private final Map<String,Integer> names = new ConcurrentHashMap<>(8192, 0.75f, 512);
|
||||
private final com.github.benmanes.caffeine.cache.Cache<String,Integer> names;
|
||||
private Set<BlockCacheKey> keysToRelease;
|
||||
private final String path;
|
||||
private final Metrics metrics;
|
||||
|
@ -41,8 +45,18 @@ public class BlockDirectoryCache implements Cache {
|
|||
this.blockCache = blockCache;
|
||||
this.path = path;
|
||||
this.metrics = metrics;
|
||||
|
||||
names = Caffeine.newBuilder().maximumSize(50000).build();
|
||||
|
||||
if (releaseBlocks) {
|
||||
keysToRelease = Collections.newSetFromMap(new ConcurrentHashMap<BlockCacheKey,Boolean>(1024, 0.75f, 512));
|
||||
blockCache.setOnRelease(new OnRelease() {
|
||||
|
||||
@Override
|
||||
public void release(BlockCacheKey key) {
|
||||
keysToRelease.remove(key);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -57,13 +71,13 @@ public class BlockDirectoryCache implements Cache {
|
|||
|
||||
@Override
|
||||
public void delete(String name) {
|
||||
names.remove(name);
|
||||
names.invalidate(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void update(String name, long blockId, int blockOffset, byte[] buffer,
|
||||
int offset, int length) {
|
||||
Integer file = names.get(name);
|
||||
Integer file = names.getIfPresent(name);
|
||||
if (file == null) {
|
||||
file = counter.incrementAndGet();
|
||||
names.put(name, file);
|
||||
|
@ -80,7 +94,7 @@ public class BlockDirectoryCache implements Cache {
|
|||
@Override
|
||||
public boolean fetch(String name, long blockId, int blockOffset, byte[] b,
|
||||
int off, int lengthToReadInBlock) {
|
||||
Integer file = names.get(name);
|
||||
Integer file = names.getIfPresent(name);
|
||||
if (file == null) {
|
||||
return false;
|
||||
}
|
||||
|
@ -105,7 +119,8 @@ public class BlockDirectoryCache implements Cache {
|
|||
|
||||
@Override
|
||||
public void renameCacheFile(String source, String dest) {
|
||||
Integer file = names.remove(source);
|
||||
Integer file = names.getIfPresent(source);
|
||||
names.invalidate(source);
|
||||
// possible if the file is empty
|
||||
if (file != null) {
|
||||
names.put(dest, file);
|
||||
|
|
|
@ -15,14 +15,20 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.solr.util;
|
||||
import org.apache.lucene.util.Accountable;
|
||||
import org.apache.lucene.util.PriorityQueue;
|
||||
import org.apache.lucene.util.RamUsageEstimator;
|
||||
import org.apache.solr.common.util.Cache;
|
||||
import org.apache.solr.search.LRUCache;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.TreeSet;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
@ -45,9 +51,11 @@ import java.lang.ref.WeakReference;
|
|||
*
|
||||
* @since solr 1.4
|
||||
*/
|
||||
public class ConcurrentLRUCache<K,V> implements Cache<K,V> {
|
||||
public class ConcurrentLRUCache<K,V> implements Cache<K,V>, Accountable {
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(ConcurrentLRUCache.class);
|
||||
|
||||
private final ConcurrentHashMap<Object, CacheEntry<K,V>> map;
|
||||
private final int upperWaterMark, lowerWaterMark;
|
||||
private final ReentrantLock markAndSweepLock = new ReentrantLock(true);
|
||||
|
@ -58,7 +66,29 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V> {
|
|||
private final int acceptableWaterMark;
|
||||
private long oldestEntry = 0; // not volatile, only accessed in the cleaning method
|
||||
private final EvictionListener<K,V> evictionListener;
|
||||
private CleanupThread cleanupThread ;
|
||||
private CleanupThread cleanupThread;
|
||||
|
||||
private final long ramLowerWatermark, ramUpperWatermark;
|
||||
private final AtomicLong ramBytes = new AtomicLong(0);
|
||||
|
||||
public ConcurrentLRUCache(long ramLowerWatermark, long ramUpperWatermark,
|
||||
boolean runCleanupThread, EvictionListener<K, V> evictionListener) {
|
||||
this.ramLowerWatermark = ramLowerWatermark;
|
||||
this.ramUpperWatermark = ramUpperWatermark;
|
||||
|
||||
this.evictionListener = evictionListener;
|
||||
this.map = new ConcurrentHashMap<>();
|
||||
this.newThreadForCleanup = false;
|
||||
|
||||
this.acceptableWaterMark = -1;
|
||||
this.lowerWaterMark = Integer.MIN_VALUE;
|
||||
this.upperWaterMark = Integer.MAX_VALUE;
|
||||
|
||||
if (runCleanupThread) {
|
||||
cleanupThread = new CleanupThread(this);
|
||||
cleanupThread.start();
|
||||
}
|
||||
}
|
||||
|
||||
public ConcurrentLRUCache(int upperWaterMark, final int lowerWaterMark, int acceptableWatermark,
|
||||
int initialSize, boolean runCleanupThread, boolean runNewThreadForCleanup,
|
||||
|
@ -76,6 +106,8 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V> {
|
|||
cleanupThread = new CleanupThread(this);
|
||||
cleanupThread.start();
|
||||
}
|
||||
this.ramLowerWatermark = Long.MIN_VALUE;
|
||||
this.ramUpperWatermark = Long.MAX_VALUE;
|
||||
}
|
||||
|
||||
public ConcurrentLRUCache(int size, int lowerWatermark) {
|
||||
|
@ -103,6 +135,9 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V> {
|
|||
CacheEntry<K,V> cacheEntry = map.remove(key);
|
||||
if (cacheEntry != null) {
|
||||
stats.size.decrementAndGet();
|
||||
if (ramUpperWatermark != Long.MAX_VALUE) {
|
||||
ramBytes.addAndGet(-cacheEntry.ramBytesUsed() - LRUCache.HASHTABLE_RAM_BYTES_PER_ENTRY);
|
||||
}
|
||||
return cacheEntry.value;
|
||||
}
|
||||
return null;
|
||||
|
@ -116,8 +151,23 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V> {
|
|||
int currentSize;
|
||||
if (oldCacheEntry == null) {
|
||||
currentSize = stats.size.incrementAndGet();
|
||||
if (ramUpperWatermark != Long.MAX_VALUE) {
|
||||
ramBytes.addAndGet(e.ramBytesUsed() + LRUCache.HASHTABLE_RAM_BYTES_PER_ENTRY); // added key + value + entry
|
||||
}
|
||||
} else {
|
||||
currentSize = stats.size.get();
|
||||
if (ramUpperWatermark != Long.MAX_VALUE) {
|
||||
if (oldCacheEntry.value instanceof Accountable) {
|
||||
ramBytes.addAndGet(-((Accountable)oldCacheEntry.value).ramBytesUsed());
|
||||
} else {
|
||||
ramBytes.addAndGet(-LRUCache.DEFAULT_RAM_BYTES_USED);
|
||||
}
|
||||
if (val instanceof Accountable) {
|
||||
ramBytes.addAndGet(((Accountable)val).ramBytesUsed());
|
||||
} else {
|
||||
ramBytes.addAndGet(LRUCache.DEFAULT_RAM_BYTES_USED);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (islive) {
|
||||
stats.putCounter.increment();
|
||||
|
@ -135,7 +185,7 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V> {
|
|||
//
|
||||
// Thread safety note: isCleaning read is piggybacked (comes after) other volatile reads
|
||||
// in this method.
|
||||
if (currentSize > upperWaterMark && !isCleaning) {
|
||||
if ((currentSize > upperWaterMark || ramBytes.get() > ramUpperWatermark) && !isCleaning) {
|
||||
if (newThreadForCleanup) {
|
||||
new Thread(this::markAndSweep).start();
|
||||
} else if (cleanupThread != null){
|
||||
|
@ -169,6 +219,47 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V> {
|
|||
|
||||
if (!markAndSweepLock.tryLock()) return;
|
||||
try {
|
||||
if (upperWaterMark != Integer.MAX_VALUE) {
|
||||
markAndSweepByCacheSize();
|
||||
} else if (ramUpperWatermark != Long.MAX_VALUE) {
|
||||
markAndSweepByRamSize();
|
||||
} else {
|
||||
// should never happen
|
||||
throw new AssertionError("ConcurrentLRUCache initialized with neither size limits nor ram limits");
|
||||
}
|
||||
} finally {
|
||||
isCleaning = false; // set before markAndSweep.unlock() for visibility
|
||||
markAndSweepLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Must be called after acquiring markAndSweeoLock
|
||||
*/
|
||||
private void markAndSweepByRamSize() {
|
||||
List<CacheEntry<K, V>> entriesInAccessOrder = new ArrayList<>(map.size());
|
||||
map.forEach((o, kvCacheEntry) -> {
|
||||
kvCacheEntry.lastAccessedCopy = kvCacheEntry.lastAccessed; // important because we want to avoid volatile read during comparisons
|
||||
entriesInAccessOrder.add(kvCacheEntry);
|
||||
});
|
||||
|
||||
Collections.sort(entriesInAccessOrder); // newer access is smaller, older access is bigger
|
||||
|
||||
// iterate in oldest to newest order
|
||||
for (int i = entriesInAccessOrder.size() - 1; i >= 0; i--) {
|
||||
CacheEntry<K, V> kvCacheEntry = entriesInAccessOrder.get(i);
|
||||
evictEntry(kvCacheEntry.key);
|
||||
ramBytes.addAndGet(-(kvCacheEntry.ramBytesUsed() + LRUCache.HASHTABLE_RAM_BYTES_PER_ENTRY));
|
||||
if (ramBytes.get() <= ramLowerWatermark) {
|
||||
break; // we are done!
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Must be called after acquiring markAndSweeoLock
|
||||
*/
|
||||
private void markAndSweepByCacheSize() {
|
||||
long oldestEntry = this.oldestEntry;
|
||||
isCleaning = true;
|
||||
this.oldestEntry = oldestEntry; // volatile write to make isCleaning visible
|
||||
|
@ -272,7 +363,6 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V> {
|
|||
}
|
||||
|
||||
|
||||
|
||||
// if we still didn't remove enough entries, then make another pass while
|
||||
// inserting into a priority queue
|
||||
if (sz - numRemoved > acceptableWaterMark) {
|
||||
|
@ -346,10 +436,6 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V> {
|
|||
|
||||
oldestEntry = newOldestEntry == Long.MAX_VALUE ? oldestEntry : newOldestEntry;
|
||||
this.oldestEntry = oldestEntry;
|
||||
} finally {
|
||||
isCleaning = false; // set before markAndSweep.unlock() for visibility
|
||||
markAndSweepLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
private static class PQueue<K,V> extends PriorityQueue<CacheEntry<K,V>> {
|
||||
|
@ -477,7 +563,9 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V> {
|
|||
return map;
|
||||
}
|
||||
|
||||
public static class CacheEntry<K,V> implements Comparable<CacheEntry<K,V>> {
|
||||
public static class CacheEntry<K,V> implements Comparable<CacheEntry<K,V>>, Accountable {
|
||||
public static long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOf(CacheEntry.class);
|
||||
|
||||
K key;
|
||||
V value;
|
||||
volatile long lastAccessed = 0;
|
||||
|
@ -514,6 +602,27 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V> {
|
|||
public String toString() {
|
||||
return "key: " + key + " value: " + value + " lastAccessed:" + lastAccessed;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long ramBytesUsed() {
|
||||
long ramBytes = BASE_RAM_BYTES_USED;
|
||||
if (key instanceof Accountable) {
|
||||
ramBytes += ((Accountable) key).ramBytesUsed();
|
||||
} else {
|
||||
ramBytes += LRUCache.DEFAULT_RAM_BYTES_USED;
|
||||
}
|
||||
if (value instanceof Accountable) {
|
||||
ramBytes += ((Accountable) value).ramBytesUsed();
|
||||
} else {
|
||||
ramBytes += LRUCache.DEFAULT_RAM_BYTES_USED;
|
||||
}
|
||||
return ramBytes;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<Accountable> getChildResources() {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
}
|
||||
|
||||
private boolean isDestroyed = false;
|
||||
|
@ -632,4 +741,14 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V> {
|
|||
super.finalize();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public long ramBytesUsed() {
|
||||
return BASE_RAM_BYTES_USED + ramBytes.get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<Accountable> getChildResources() {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,41 @@
|
|||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
contributor license agreements. See the NOTICE file distributed with
|
||||
this work for additional information regarding copyright ownership.
|
||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
(the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
<schema name="managed-preanalyzed" version="1.6">
|
||||
<fieldType name="string" class="solr.StrField"/>
|
||||
<fieldType name="int" class="solr.TrieIntField" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
|
||||
<fieldType name="long" class="solr.TrieLongField" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
|
||||
|
||||
<fieldType name="preanalyzed-no-analyzer" class="solr.PreAnalyzedField" parserImpl="json"/>
|
||||
<fieldType name="preanalyzed-with-analyzer" class="solr.PreAnalyzedField">
|
||||
<analyzer>
|
||||
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
|
||||
</analyzer>
|
||||
</fieldType>
|
||||
<fieldType name="preanalyzed-with-query-analyzer" class="solr.PreAnalyzedField">
|
||||
<analyzer type="query">
|
||||
<tokenizer class="solr.StandardTokenizerFactory"/>
|
||||
<filter class="solr.LowerCaseFilterFactory"/>
|
||||
</analyzer>
|
||||
</fieldType>
|
||||
|
||||
<!-- for versioning -->
|
||||
<field name="_version_" type="long" indexed="true" stored="true"/>
|
||||
<field name="_root_" type="int" indexed="true" stored="true" multiValued="false" required="false"/>
|
||||
<field name="id" type="string" indexed="true" stored="true"/>
|
||||
<uniqueKey>id</uniqueKey>
|
||||
</schema>
|
|
@ -0,0 +1,51 @@
|
|||
<?xml version="1.0" ?>
|
||||
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
contributor license agreements. See the NOTICE file distributed with
|
||||
this work for additional information regarding copyright ownership.
|
||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
(the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
|
||||
<!-- Minimal solrconfig.xml with /select, /admin and /update only -->
|
||||
|
||||
<config>
|
||||
|
||||
<dataDir>${solr.data.dir:}</dataDir>
|
||||
|
||||
<directoryFactory name="DirectoryFactory"
|
||||
class="${solr.directoryFactory:solr.NRTCachingDirectoryFactory}"/>
|
||||
|
||||
<schemaFactory class="ManagedIndexSchemaFactory">
|
||||
<bool name="mutable">${managed.schema.mutable:true}</bool>
|
||||
<str name="managedSchemaResourceName">managed-schema</str>
|
||||
</schemaFactory>
|
||||
|
||||
<luceneMatchVersion>${tests.luceneMatchVersion:LATEST}</luceneMatchVersion>
|
||||
|
||||
<updateHandler class="solr.DirectUpdateHandler2">
|
||||
<commitWithin>
|
||||
<softCommit>${solr.commitwithin.softcommit:true}</softCommit>
|
||||
</commitWithin>
|
||||
<updateLog></updateLog>
|
||||
</updateHandler>
|
||||
|
||||
<requestHandler name="/select" class="solr.SearchHandler">
|
||||
<lst name="defaults">
|
||||
<str name="echoParams">explicit</str>
|
||||
<str name="indent">true</str>
|
||||
<str name="df">text</str>
|
||||
</lst>
|
||||
|
||||
</requestHandler>
|
||||
</config>
|
|
@ -0,0 +1,73 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.solr.schema;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
|
||||
import org.apache.solr.client.solrj.request.schema.SchemaRequest;
|
||||
import org.apache.solr.client.solrj.response.schema.SchemaResponse.FieldResponse;
|
||||
import org.apache.solr.client.solrj.response.schema.SchemaResponse.UpdateResponse;
|
||||
import org.apache.solr.cloud.SolrCloudTestCase;
|
||||
import org.apache.solr.common.cloud.DocCollection;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
public class PreAnalyzedFieldManagedSchemaCloudTest extends SolrCloudTestCase {
|
||||
|
||||
private static final String COLLECTION = "managed-preanalyzed";
|
||||
private static final String CONFIG = "cloud-managed-preanalyzed";
|
||||
|
||||
@BeforeClass
|
||||
public static void setupCluster() throws Exception {
|
||||
configureCluster(2).addConfig(CONFIG, configset(CONFIG)).configure();
|
||||
CollectionAdminRequest.createCollection(COLLECTION, CONFIG, 2, 1)
|
||||
.setMaxShardsPerNode(1)
|
||||
.process(cluster.getSolrClient());
|
||||
cluster.getSolrClient().waitForState(COLLECTION, DEFAULT_TIMEOUT, TimeUnit.SECONDS,
|
||||
(n, c) -> DocCollection.isFullyActive(n, c, 2, 1));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAdd2Fields() throws Exception {
|
||||
addField(keyValueArrayToMap("name", "field1", "type", "string"));
|
||||
addField(keyValueArrayToMap("name", "field2", "type", "string"));
|
||||
}
|
||||
|
||||
private void addField(Map<String,Object> field) throws Exception {
|
||||
CloudSolrClient client = cluster.getSolrClient();
|
||||
UpdateResponse addFieldResponse = new SchemaRequest.AddField(field).process(client, COLLECTION);
|
||||
assertNotNull(addFieldResponse);
|
||||
assertEquals(0, addFieldResponse.getStatus());
|
||||
assertNull(addFieldResponse.getResponse().get("errors"));
|
||||
FieldResponse fieldResponse = new SchemaRequest.Field(field.get("name").toString()).process(client, COLLECTION);
|
||||
assertNotNull(fieldResponse);
|
||||
assertEquals(0, fieldResponse.getStatus());
|
||||
}
|
||||
|
||||
private Map<String,Object> keyValueArrayToMap(String... alternatingKeysAndValues) {
|
||||
Map<String,Object> map = new HashMap<>();
|
||||
for (int i = 0 ; i < alternatingKeysAndValues.length ; i += 2)
|
||||
map.put(alternatingKeysAndValues[i], alternatingKeysAndValues[i + 1]);
|
||||
return map;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,167 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.solr.servlet;
|
||||
|
||||
import javax.servlet.ReadListener;
|
||||
import javax.servlet.ServletInputStream;
|
||||
import javax.servlet.ServletOutputStream;
|
||||
import javax.servlet.WriteListener;
|
||||
import java.io.IOException;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
||||
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
|
||||
import org.apache.solr.cloud.AbstractDistribZkTestBase;
|
||||
import org.apache.solr.cloud.SolrCloudTestCase;
|
||||
import org.eclipse.jetty.server.Request;
|
||||
import org.eclipse.jetty.server.Response;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
public class HttpSolrCallGetCoreTest extends SolrCloudTestCase {
|
||||
private static final String COLLECTION = "collection1";
|
||||
private static final int NUM_SHARD = 3;
|
||||
private static final int REPLICA_FACTOR = 2;
|
||||
|
||||
@BeforeClass
|
||||
public static void setupCluster() throws Exception {
|
||||
configureCluster(1)
|
||||
.addConfig("config", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf"))
|
||||
.configure();
|
||||
|
||||
CollectionAdminRequest
|
||||
.createCollection(COLLECTION, "config", NUM_SHARD, REPLICA_FACTOR)
|
||||
.setMaxShardsPerNode(NUM_SHARD * REPLICA_FACTOR)
|
||||
.process(cluster.getSolrClient());
|
||||
AbstractDistribZkTestBase.waitForRecoveriesToFinish(COLLECTION, cluster.getSolrClient().getZkStateReader(),
|
||||
false, true, 30);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void test() throws Exception {
|
||||
assertCoreChosen(NUM_SHARD, new TestRequest("/collection1/update"));
|
||||
assertCoreChosen(NUM_SHARD, new TestRequest("/collection1/update/json"));
|
||||
assertCoreChosen(NUM_SHARD * REPLICA_FACTOR, new TestRequest("/collection1/select"));
|
||||
}
|
||||
|
||||
private void assertCoreChosen(int numCores, TestRequest testRequest) {
|
||||
JettySolrRunner jettySolrRunner = cluster.getJettySolrRunner(0);
|
||||
Set<String> coreNames = new HashSet<>();
|
||||
SolrDispatchFilter dispatchFilter = jettySolrRunner.getSolrDispatchFilter();
|
||||
for (int i = 0; i < NUM_SHARD * REPLICA_FACTOR * 20; i++) {
|
||||
if (coreNames.size() == numCores) return;
|
||||
HttpSolrCall httpSolrCall = new HttpSolrCall(dispatchFilter, dispatchFilter.getCores(), testRequest, new TestResponse(), false);
|
||||
try {
|
||||
httpSolrCall.init();
|
||||
} catch (Exception e) {
|
||||
} finally {
|
||||
coreNames.add(httpSolrCall.core.getName());
|
||||
httpSolrCall.destroy();
|
||||
}
|
||||
}
|
||||
assertEquals(numCores, coreNames.size());
|
||||
}
|
||||
|
||||
private static class TestResponse extends Response {
|
||||
|
||||
public TestResponse() {
|
||||
super(null, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ServletOutputStream getOutputStream() throws IOException {
|
||||
return new ServletOutputStream() {
|
||||
@Override
|
||||
public boolean isReady() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setWriteListener(WriteListener writeListener) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(int b) throws IOException {
|
||||
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isCommitted() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
private static class TestRequest extends Request {
|
||||
private String path;
|
||||
|
||||
public TestRequest(String path) {
|
||||
super(null, null);
|
||||
this.path = path;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getQueryString() {
|
||||
return "wt=json&version=2";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getContentType() {
|
||||
return "application/json";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getServletPath() {
|
||||
return path;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getRequestURI() {
|
||||
return path;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ServletInputStream getInputStream() throws IOException {
|
||||
return new ServletInputStream() {
|
||||
@Override
|
||||
public boolean isFinished() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isReady() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setReadListener(ReadListener readListener) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read() throws IOException {
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -110,7 +110,18 @@ public class BlockDirectoryTest extends SolrTestCaseJ4 {
|
|||
file = createTempDir().toFile();
|
||||
FSDirectory dir = FSDirectory.open(new File(file, "base").toPath());
|
||||
mapperCache = new MapperCache();
|
||||
|
||||
if (random().nextBoolean()) {
|
||||
Metrics metrics = new Metrics();
|
||||
int blockSize = 8192;
|
||||
int slabSize = blockSize * 32768;
|
||||
long totalMemory = 2 * slabSize;
|
||||
BlockCache blockCache = new BlockCache(metrics, true, totalMemory, slabSize, blockSize);
|
||||
BlockDirectoryCache cache = new BlockDirectoryCache(blockCache, "/collection1", metrics, true);
|
||||
directory = new BlockDirectory("test", dir, cache, null, true, false);
|
||||
} else {
|
||||
directory = new BlockDirectory("test", dir, mapperCache, null, true, true);
|
||||
}
|
||||
random = random();
|
||||
}
|
||||
|
||||
|
|
|
@ -436,6 +436,9 @@
|
|||
the cache. (see java.util.HashMap)
|
||||
autowarmCount - the number of entries to prepopulate from
|
||||
and old cache.
|
||||
maxRamMB - the maximum amount of RAM (in MB) that this cache is allowed
|
||||
to occupy. Note that when this option is specified, the size
|
||||
and initialSize parameters are ignored.
|
||||
-->
|
||||
<filterCache class="solr.FastLRUCache"
|
||||
size="512"
|
||||
|
|
|
@ -436,6 +436,9 @@
|
|||
the cache. (see java.util.HashMap)
|
||||
autowarmCount - the number of entries to prepopulate from
|
||||
and old cache.
|
||||
maxRamMB - the maximum amount of RAM (in MB) that this cache is allowed
|
||||
to occupy. Note that when this option is specified, the size
|
||||
and initialSize parameters are ignored.
|
||||
-->
|
||||
<filterCache class="solr.FastLRUCache"
|
||||
size="512"
|
||||
|
|
|
@ -450,6 +450,9 @@
|
|||
the cache. (see java.util.HashMap)
|
||||
autowarmCount - the number of entries to prepopulate from
|
||||
and old cache.
|
||||
maxRamMB - the maximum amount of RAM (in MB) that this cache is allowed
|
||||
to occupy. Note that when this option is specified, the size
|
||||
and initialSize parameters are ignored.
|
||||
-->
|
||||
<filterCache class="solr.FastLRUCache"
|
||||
size="512"
|
||||
|
|
|
@ -51,6 +51,8 @@ public class DocCollection extends ZkNodeProps implements Iterable<Slice> {
|
|||
private final String name;
|
||||
private final Map<String, Slice> slices;
|
||||
private final Map<String, Slice> activeSlices;
|
||||
private final Map<String, List<Replica>> nodeNameReplicas;
|
||||
private final Map<String, List<Replica>> nodeNameLeaderReplicas;
|
||||
private final DocRouter router;
|
||||
private final String znode;
|
||||
|
||||
|
@ -76,6 +78,8 @@ public class DocCollection extends ZkNodeProps implements Iterable<Slice> {
|
|||
|
||||
this.slices = slices;
|
||||
this.activeSlices = new HashMap<>();
|
||||
this.nodeNameLeaderReplicas = new HashMap<>();
|
||||
this.nodeNameReplicas = new HashMap<>();
|
||||
this.replicationFactor = (Integer) verifyProp(props, REPLICATION_FACTOR);
|
||||
this.maxShardsPerNode = (Integer) verifyProp(props, MAX_SHARDS_PER_NODE);
|
||||
Boolean autoAddReplicas = (Boolean) verifyProp(props, AUTO_ADD_REPLICAS);
|
||||
|
@ -86,14 +90,36 @@ public class DocCollection extends ZkNodeProps implements Iterable<Slice> {
|
|||
|
||||
while (iter.hasNext()) {
|
||||
Map.Entry<String, Slice> slice = iter.next();
|
||||
if (slice.getValue().getState() == Slice.State.ACTIVE)
|
||||
if (slice.getValue().getState() == Slice.State.ACTIVE) {
|
||||
this.activeSlices.put(slice.getKey(), slice.getValue());
|
||||
}
|
||||
for (Replica replica : slice.getValue()) {
|
||||
addNodeNameReplica(replica);
|
||||
}
|
||||
}
|
||||
this.router = router;
|
||||
this.znode = znode == null? ZkStateReader.CLUSTER_STATE : znode;
|
||||
assert name != null && slices != null;
|
||||
}
|
||||
|
||||
private void addNodeNameReplica(Replica replica) {
|
||||
List<Replica> replicas = nodeNameReplicas.get(replica.getNodeName());
|
||||
if (replicas == null) {
|
||||
replicas = new ArrayList<>();
|
||||
nodeNameReplicas.put(replica.getNodeName(), replicas);
|
||||
}
|
||||
replicas.add(replica);
|
||||
|
||||
if (replica.getStr(Slice.LEADER) != null) {
|
||||
List<Replica> leaderReplicas = nodeNameLeaderReplicas.get(replica.getNodeName());
|
||||
if (leaderReplicas == null) {
|
||||
leaderReplicas = new ArrayList<>();
|
||||
nodeNameLeaderReplicas.put(replica.getNodeName(), leaderReplicas);
|
||||
}
|
||||
leaderReplicas.add(replica);
|
||||
}
|
||||
}
|
||||
|
||||
public static Object verifyProp(Map<String, Object> props, String propName) {
|
||||
Object o = props.get(propName);
|
||||
if (o == null) return null;
|
||||
|
@ -160,6 +186,20 @@ public class DocCollection extends ZkNodeProps implements Iterable<Slice> {
|
|||
return activeSlices;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the list of replicas hosted on the given node or <code>null</code> if none.
|
||||
*/
|
||||
public List<Replica> getReplicas(String nodeName) {
|
||||
return nodeNameReplicas.get(nodeName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the list of all leaders hosted on the given node or <code>null</code> if none.
|
||||
*/
|
||||
public List<Replica> getLeaderReplicas(String nodeName) {
|
||||
return nodeNameLeaderReplicas.get(nodeName);
|
||||
}
|
||||
|
||||
public int getZNodeVersion(){
|
||||
return znodeVersion;
|
||||
}
|
||||
|
|
|
@ -23,14 +23,13 @@ import java.util.Arrays;
|
|||
import java.util.Date;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.solr.client.solrj.SolrQuery;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.io.SolrClientCache;
|
||||
import org.apache.solr.client.solrj.io.Tuple;
|
||||
import org.apache.solr.client.solrj.io.comp.ComparatorOrder;
|
||||
|
@ -48,10 +47,8 @@ import org.apache.solr.client.solrj.io.stream.metrics.MinMetric;
|
|||
import org.apache.solr.client.solrj.io.stream.metrics.SumMetric;
|
||||
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
|
||||
import org.apache.solr.client.solrj.request.UpdateRequest;
|
||||
import org.apache.solr.client.solrj.response.QueryResponse;
|
||||
import org.apache.solr.cloud.AbstractDistribZkTestBase;
|
||||
import org.apache.solr.cloud.SolrCloudTestCase;
|
||||
import org.apache.solr.common.SolrDocument;
|
||||
import org.apache.solr.common.params.ModifiableSolrParams;
|
||||
import org.apache.solr.common.params.SolrParams;
|
||||
import org.junit.Before;
|
||||
|
@ -986,7 +983,7 @@ public class StreamingTest extends SolrCloudTestCase {
|
|||
pairs.add("d_sing");
|
||||
pairs.add(Double.toString(iSeq + 5)); // 105
|
||||
pairs.add("dt_sing");
|
||||
pairs.add(String.format("2000-01-01T%02d:00:00Z", base)); // Works as long as we add fewer than 60 docs
|
||||
pairs.add(String.format(Locale.ROOT, "2000-01-01T%02d:00:00Z", base)); // Works as long as we add fewer than 60 docs
|
||||
pairs.add("b_sing");
|
||||
pairs.add((base % 2) == 0 ? "T" : "F"); // Tricky
|
||||
|
||||
|
|
Loading…
Reference in New Issue