Merge branch 'master' into mockfilesystem

This commit is contained in:
Robert Muir 2015-04-17 07:55:48 -04:00
commit 89b9f0e541
26 changed files with 186 additions and 208 deletions

View File

@ -148,6 +148,8 @@ launch_service()
LANG=en_US.UTF-8
LC_ALL=en_US.UTF-8
export HOSTNAME=`hostname -s`
# The es-foreground option will tell Elasticsearch not to close stdout/stderr, but it's up to us not to daemonize.
if [ "x$daemonized" = "x" ]; then
es_parms="$es_parms -Des.foreground=yes"

View File

@ -33,6 +33,8 @@ FOR /F "usebackq tokens=1* delims= " %%A IN (!params!) DO (
)
)
SET HOSTNAME=%COMPUTERNAME%
CALL "%~dp0elasticsearch.in.bat"
IF ERRORLEVEL 1 (
IF NOT DEFINED nopauseonerror (

View File

@ -208,6 +208,17 @@ node:
name: <NAME OF YOUR NODE>
--------------------------------------------------
The hostname of the machine is provided in the environment
variable `HOSTNAME`. If on your machine you only run a
single elasticsearch node for that cluster, you can set
the node name to the hostname using the `${...}` notation:
[source,yaml]
--------------------------------------------------
node:
name: ${HOSTNAME}
--------------------------------------------------
Internally, all settings are collapsed into "namespaced" settings. For
example, the above gets collapsed into `node.name`. This means that
its easy to support other configuration formats, for example,

View File

@ -32,7 +32,7 @@
<properties>
<lucene.version>5.2.0</lucene.version>
<lucene.maven.version>5.2.0-snapshot-1673726</lucene.maven.version>
<lucene.maven.version>5.2.0-snapshot-1674183</lucene.maven.version>
<tests.jvms>auto</tests.jvms>
<tests.shuffle>true</tests.shuffle>
<tests.output>onerror</tests.output>
@ -66,7 +66,7 @@
<repository>
<id>lucene-snapshots</id>
<name>Lucene Snapshots</name>
<url>https://download.elastic.co/lucenesnapshots/1673726</url>
<url>https://download.elastic.co/lucenesnapshots/1674183</url>
</repository>
</repositories>

View File

@ -19,15 +19,24 @@
package org.apache.lucene.queries;
import com.google.common.primitives.Ints;
import org.apache.lucene.index.*;
import org.apache.lucene.search.*;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermContext;
import org.apache.lucene.index.TermState;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.DisjunctionMaxQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.InPlaceMergeSorter;
import java.io.IOException;
import java.util.Arrays;
import java.util.Comparator;
import java.util.List;
import java.util.Set;
/**
* BlendedTermQuery can be used to unify term statistics across
@ -81,7 +90,7 @@ public abstract class BlendedTermQuery extends Query {
protected abstract Query topLevelQuery(Term[] terms, TermContext[] ctx, int[] docFreqs, int maxDoc);
protected void blend(TermContext[] contexts, int maxDoc, IndexReader reader) throws IOException {
protected void blend(final TermContext[] contexts, int maxDoc, IndexReader reader) throws IOException {
if (contexts.length <= 1) {
return;
}
@ -113,15 +122,23 @@ public abstract class BlendedTermQuery extends Query {
return; // we are done that term doesn't exist at all
}
long sumTTF = minSumTTF == -1 ? -1 : 0;
final TermContext[] tieBreak = new TermContext[contexts.length];
System.arraycopy(contexts, 0, tieBreak, 0, contexts.length);
ArrayUtil.timSort(tieBreak, new Comparator<TermContext>() {
@Override
public int compare(TermContext o1, TermContext o2) {
return Ints.compare(o2.docFreq(), o1.docFreq());
final int[] tieBreak = new int[contexts.length];
for (int i = 0; i < tieBreak.length; ++i) {
tieBreak[i] = i;
}
});
int prev = tieBreak[0].docFreq();
new InPlaceMergeSorter() {
@Override
protected void swap(int i, int j) {
final int tmp = tieBreak[i];
tieBreak[i] = tieBreak[j];
tieBreak[j] = tmp;
}
@Override
protected int compare(int i, int j) {
return Ints.compare(contexts[tieBreak[j]].docFreq(), contexts[tieBreak[i]].docFreq());
}
}.sort(0, tieBreak.length);
int prev = contexts[tieBreak[0]].docFreq();
int actualDf = Math.min(maxDoc, max);
assert actualDf >=0 : "DF must be >= 0";
@ -129,7 +146,8 @@ public abstract class BlendedTermQuery extends Query {
// here we try to add a little bias towards
// the more popular (more frequent) fields
// that acts as a tie breaker
for (TermContext ctx : tieBreak) {
for (int i : tieBreak) {
TermContext ctx = contexts[i];
if (ctx.docFreq() == 0) {
break;
}
@ -137,7 +155,7 @@ public abstract class BlendedTermQuery extends Query {
if (prev > current) {
actualDf++;
}
ctx.setDocFreq(Math.min(maxDoc, actualDf));
contexts[i] = ctx = adjustDF(ctx, Math.min(maxDoc, actualDf));
prev = current;
if (sumTTF >= 0 && ctx.totalTermFreq() >= 0) {
sumTTF += ctx.totalTermFreq();
@ -183,19 +201,40 @@ public abstract class BlendedTermQuery extends Query {
return newTermContext;
}
private static TermContext adjustDF(TermContext ctx, int newDocFreq) {
// Use a value of ttf that is consistent with the doc freq (ie. gte)
long newTTF;
if (ctx.totalTermFreq() < 0) {
newTTF = -1;
} else {
newTTF = Math.max(ctx.totalTermFreq(), newDocFreq);
}
List<LeafReaderContext> leaves = ctx.topReaderContext.leaves();
final int len;
if (leaves == null) {
len = 1;
} else {
len = leaves.size();
}
TermContext newCtx = new TermContext(ctx.topReaderContext);
for (int i = 0; i < len; ++i) {
TermState termState = ctx.get(i);
if (termState == null) {
continue;
}
newCtx.register(termState, i, newDocFreq, newTTF);
newDocFreq = 0;
newTTF = 0;
}
return newCtx;
}
@Override
public String toString(String field) {
return "blended(terms: " + Arrays.toString(terms) + ")";
}
@Override
public void extractTerms(Set<Term> terms) {
for (Term term : this.terms) {
terms.add(term);
}
}
private volatile Term[] equalTerms = null;
private Term[] equalsTerms() {

View File

@ -30,11 +30,11 @@ import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.DisjunctionMaxQuery;
import org.apache.lucene.search.FilteredQuery;
import org.apache.lucene.search.FuzzyQuery;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.MultiPhraseQuery;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.util.automaton.RegExp;
import org.elasticsearch.common.lucene.search.MatchNoDocsQuery;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.common.unit.Fuzziness;
import org.elasticsearch.index.mapper.FieldMapper;

View File

@ -62,10 +62,12 @@ public class XPostingsHighlighter {
// unnecessary.
/** for rewriting: we don't want slow processing from MTQs */
private static final IndexReader EMPTY_INDEXREADER;
private static final IndexSearcher EMPTY_INDEXSEARCHER;
static {
try {
EMPTY_INDEXREADER = new MultiReader();
IndexReader emptyReader = new MultiReader();
EMPTY_INDEXSEARCHER = new IndexSearcher(emptyReader);
EMPTY_INDEXSEARCHER.setQueryCache(null);
} catch (IOException bogus) {
throw new RuntimeException(bogus);
}
@ -291,12 +293,10 @@ public class XPostingsHighlighter {
if (fieldsIn.length != maxPassagesIn.length) {
throw new IllegalArgumentException("invalid number of maxPassagesIn");
}
final IndexReader reader = searcher.getIndexReader();
query = rewrite(query);
SortedSet<Term> queryTerms = new TreeSet<>();
query.extractTerms(queryTerms);
EMPTY_INDEXSEARCHER.createNormalizedWeight(query, false).extractTerms(queryTerms);
IndexReaderContext readerContext = reader.getContext();
IndexReaderContext readerContext = searcher.getIndexReader().getContext();
List<LeafReaderContext> leaves = readerContext.leaves();
// Make our own copies because we sort in-place:
@ -714,19 +714,6 @@ public class XPostingsHighlighter {
public long cost() { return 0; }
};
/**
* we rewrite against an empty indexreader: as we don't want things like
* rangeQueries that don't summarize the document
*/
private static Query rewrite(Query original) throws IOException {
Query query = original;
for (Query rewrittenQuery = query.rewrite(EMPTY_INDEXREADER); rewrittenQuery != query;
rewrittenQuery = query.rewrite(EMPTY_INDEXREADER)) {
query = rewrittenQuery;
}
return query;
}
private static class LimitedStoredFieldVisitor extends StoredFieldVisitor {
private final String fields[];
private final char valueSeparators[];

View File

@ -20,6 +20,7 @@
package org.elasticsearch.action.admin.indices.validate.query;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.Query;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener;
@ -37,7 +38,6 @@ import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lucene.search.MatchNoDocsFilter;
import org.elasticsearch.common.lucene.search.MatchNoDocsQuery;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.index.IndexService;

View File

@ -525,7 +525,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
class NotifyTimeout implements Runnable {
final TimeoutClusterStateListener listener;
final TimeValue timeout;
ScheduledFuture future;
volatile ScheduledFuture future;
NotifyTimeout(TimeoutClusterStateListener listener, TimeValue timeout) {
this.listener = listener;
@ -538,7 +538,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
@Override
public void run() {
if (future.isCancelled()) {
if (future != null && future.isCancelled()) {
return;
}
if (lifecycle.stoppedOrClosed()) {

View File

@ -1,96 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.lucene.search;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.*;
import org.apache.lucene.util.Bits;
import java.io.IOException;
import java.util.Set;
/**
* Query that matches no documents.
*/
public final class MatchNoDocsQuery extends Query {
/**
* Weight implementation that matches no documents.
*/
private class MatchNoDocsWeight extends Weight {
MatchNoDocsWeight(Query parent) {
super(parent);
}
@Override
public String toString() {
return "weight(" + MatchNoDocsQuery.this + ")";
}
@Override
public float getValueForNormalization() throws IOException {
return 0;
}
@Override
public void normalize(float norm, float topLevelBoost) {
}
@Override
public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
return null;
}
@Override
public Explanation explain(final LeafReaderContext context,
final int doc) {
return new ComplexExplanation(false, 0, "MatchNoDocs matches nothing");
}
}
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
return new MatchNoDocsWeight(this);
}
@Override
public void extractTerms(final Set<Term> terms) {
}
@Override
public String toString(final String field) {
return "MatchNoDocsQuery";
}
@Override
public boolean equals(final Object o) {
if (o instanceof MatchNoDocsQuery) {
return getBoost() == ((MatchNoDocsQuery) o).getBoost();
}
return false;
}
@Override
public int hashCode() {
return getClass().hashCode() ^ Float.floatToIntBits(getBoost());
}
}

View File

@ -20,7 +20,13 @@
package org.elasticsearch.common.lucene.search;
import com.carrotsearch.hppc.ObjectOpenHashSet;
import org.apache.lucene.index.*;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.MultiPhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.util.BytesRef;
@ -28,7 +34,12 @@ import org.apache.lucene.util.StringHelper;
import org.apache.lucene.util.ToStringUtils;
import java.io.IOException;
import java.util.*;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.ListIterator;
public class MultiPhrasePrefixQuery extends Query {

View File

@ -48,7 +48,7 @@ public class Queries {
/** Return a query that matches no document. */
public static Query newMatchNoDocsQuery() {
return new MatchNoDocsQuery();
return new BooleanQuery();
}
public static boolean isNegativeQuery(Query q) {

View File

@ -114,11 +114,6 @@ public class FiltersFunctionScoreQuery extends Query {
return bq;
}
@Override
public void extractTerms(Set<Term> terms) {
subQuery.extractTerms(terms);
}
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
// TODO: needsScores
@ -136,6 +131,11 @@ public class FiltersFunctionScoreQuery extends Query {
this.subQueryWeight = subQueryWeight;
}
@Override
public void extractTerms(Set<Term> terms) {
subQueryWeight.extractTerms(terms);
}
@Override
public float getValueForNormalization() throws IOException {
float sum = subQueryWeight.getValueForNormalization();

View File

@ -85,11 +85,6 @@ public class FunctionScoreQuery extends Query {
return bq;
}
@Override
public void extractTerms(Set<Term> terms) {
subQuery.extractTerms(terms);
}
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
// TODO: needsScores
@ -107,6 +102,11 @@ public class FunctionScoreQuery extends Query {
this.subQueryWeight = subQueryWeight;
}
@Override
public void extractTerms(Set<Term> terms) {
subQueryWeight.extractTerms(terms);
}
@Override
public float getValueForNormalization() throws IOException {
float sum = subQueryWeight.getValueForNormalization();

View File

@ -85,13 +85,6 @@ public class ChildrenConstantScoreQuery extends Query {
return this;
}
@Override
public void extractTerms(Set<Term> terms) {
if (rewrittenChildQuery != null) {
rewrittenChildQuery.extractTerms(terms);
}
}
@Override
public Query clone() {
ChildrenConstantScoreQuery q = (ChildrenConstantScoreQuery) super.clone();
@ -202,6 +195,10 @@ public class ChildrenConstantScoreQuery extends Query {
this.remaining = remaining;
}
@Override
public void extractTerms(Set<Term> terms) {
}
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
return new Explanation(getBoost(), "not implemented yet...");

View File

@ -159,11 +159,6 @@ public class ChildrenQuery extends Query {
return q;
}
@Override
public void extractTerms(Set<Term> terms) {
rewrittenChildQuery.extractTerms(terms);
}
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
SearchContext sc = SearchContext.current();
@ -262,6 +257,10 @@ public class ChildrenQuery extends Query {
this.maxChildren = maxChildren;
}
@Override
public void extractTerms(Set<Term> terms) {
}
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
return new Explanation(getBoost(), "not implemented yet...");

View File

@ -66,11 +66,6 @@ public class ParentConstantScoreQuery extends Query {
return this;
}
@Override
public void extractTerms(Set<Term> terms) {
rewrittenParentQuery.extractTerms(terms);
}
@Override
public Query clone() {
ParentConstantScoreQuery q = (ParentConstantScoreQuery) super.clone();
@ -165,6 +160,10 @@ public class ParentConstantScoreQuery extends Query {
this.parentOrds = collector.parentOrds;
}
@Override
public void extractTerms(Set<Term> terms) {
}
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
return new Explanation(getBoost(), "not implemented yet...");

View File

@ -107,11 +107,6 @@ public class ParentQuery extends Query {
return this;
}
@Override
public void extractTerms(Set<Term> terms) {
rewrittenParentQuery.extractTerms(terms);
}
@Override
public Query clone() {
ParentQuery q = (ParentQuery) super.clone();
@ -231,6 +226,10 @@ public class ParentQuery extends Query {
this.globalIfd = globalIfd;
}
@Override
public void extractTerms(Set<Term> terms) {
}
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
return new Explanation(getBoost(), "not implemented yet...");

View File

@ -110,11 +110,6 @@ public class TopChildrenQuery extends Query {
return q;
}
@Override
public void extractTerms(Set<Term> terms) {
rewrittenChildQuery.extractTerms(terms);
}
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
ObjectObjectOpenHashMap<Object, ParentDoc[]> parentDocs = new ObjectObjectOpenHashMap<>();
@ -305,6 +300,10 @@ public class TopChildrenQuery extends Query {
this.parentDocs = parentDocs;
}
@Override
public void extractTerms(Set<Term> terms) {
}
@Override
public float getValueForNormalization() throws IOException {
float sum = queryWeight.getValueForNormalization();

View File

@ -91,6 +91,11 @@ public class IncludeNestedDocsQuery extends Query {
this.parentsFilter = parentsFilter;
}
@Override
public void extractTerms(Set<Term> terms) {
parentWeight.extractTerms(terms);
}
@Override
public void normalize(float norm, float topLevelBoost) {
parentWeight.normalize(norm, topLevelBoost);
@ -246,11 +251,6 @@ public class IncludeNestedDocsQuery extends Query {
}
}
@Override
public void extractTerms(Set<Term> terms) {
parentQuery.extractTerms(terms);
}
@Override
public Query rewrite(IndexReader reader) throws IOException {
final Query parentRewrite = parentQuery.rewrite(reader);

View File

@ -61,7 +61,7 @@ public class DfsPhase implements SearchPhase {
context.updateRewriteQuery(context.searcher().rewrite(context.query()));
}
context.query().extractTerms(new DelegateSet(termsSet));
context.searcher().createNormalizedWeight(context.query(), true).extractTerms(new DelegateSet(termsSet));
for (RescoreSearchContext rescoreContext : context.rescore()) {
rescoreContext.rescorer().extractTerms(context, rescoreContext, new DelegateSet(termsSet));
}

View File

@ -20,6 +20,7 @@ package org.elasticsearch.search.highlight;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
@ -31,6 +32,7 @@ import org.apache.lucene.search.MultiTermQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoringRewrite;
import org.apache.lucene.search.TopTermsRewrite;
import org.apache.lucene.search.Weight;
import org.apache.lucene.search.highlight.Encoder;
import org.apache.lucene.search.postingshighlight.CustomPassageFormatter;
import org.apache.lucene.search.postingshighlight.CustomPostingsHighlighter;
@ -84,11 +86,11 @@ public class PostingsHighlighter implements Highlighter {
Query query;
try {
query = rewrite(highlighterContext, hitContext.topLevelReader());
SortedSet<Term> queryTerms = extractTerms(context.searcher().createNormalizedWeight(query, false));
hitContext.cache().put(CACHE_KEY, new HighlighterEntry(queryTerms));
} catch (IOException e) {
throw new FetchPhaseExecutionException(context, "Failed to highlight field [" + highlighterContext.fieldName + "]", e);
}
SortedSet<Term> queryTerms = extractTerms(query);
hitContext.cache().put(CACHE_KEY, new HighlighterEntry(queryTerms));
}
HighlighterEntry highlighterEntry = (HighlighterEntry) hitContext.cache().get(CACHE_KEY);
@ -220,9 +222,9 @@ public class PostingsHighlighter implements Highlighter {
return rewriteMethod instanceof TopTermsRewrite || rewriteMethod instanceof ScoringRewrite;
}
private static SortedSet<Term> extractTerms(Query query) {
private static SortedSet<Term> extractTerms(Weight weight) {
SortedSet<Term> queryTerms = new TreeSet<>();
query.extractTerms(queryTerms);
weight.extractTerms(queryTerms);
return queryTerms;
}

View File

@ -20,8 +20,13 @@
package org.elasticsearch.search.rescore;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.*;
import org.apache.lucene.search.ComplexExplanation;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.elasticsearch.ElasticsearchIllegalArgumentException;
import org.elasticsearch.ElasticsearchIllegalStateException;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentParser.Token;
import org.elasticsearch.index.query.ParsedQuery;
@ -307,7 +312,11 @@ public final class QueryRescorer implements Rescorer {
@Override
public void extractTerms(SearchContext context, RescoreSearchContext rescoreContext, Set<Term> termsSet) {
((QueryRescoreContext) rescoreContext).query().extractTerms(termsSet);
try {
context.searcher().createNormalizedWeight(((QueryRescoreContext) rescoreContext).query(), false).extractTerms(termsSet);
} catch (IOException e) {
throw new ElasticsearchIllegalStateException("Failed to extract terms", e);
}
}
}

View File

@ -26,8 +26,16 @@ import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.MultiReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.*;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.DisjunctionMaxQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.QueryUtils;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.similarities.BM25Similarity;
import org.apache.lucene.search.similarities.DefaultSimilarity;
import org.apache.lucene.search.similarities.Similarity;
@ -37,7 +45,11 @@ import org.elasticsearch.test.ESTestCase;
import org.junit.Test;
import java.io.IOException;
import java.util.*;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import static org.hamcrest.Matchers.containsInAnyOrder;
import static org.hamcrest.Matchers.equalTo;
@ -210,7 +222,7 @@ public class BlendedTermQueryTest extends ESTestCase {
}
@Test
public void testExtractTerms() {
public void testExtractTerms() throws IOException {
Set<Term> terms = new HashSet<>();
int num = scaledRandomIntBetween(1, 10);
for (int i = 0; i < num; i++) {
@ -220,7 +232,8 @@ public class BlendedTermQueryTest extends ESTestCase {
BlendedTermQuery blendedTermQuery = random().nextBoolean() ? BlendedTermQuery.dismaxBlendedQuery(terms.toArray(new Term[0]), random().nextFloat()) :
BlendedTermQuery.booleanBlendedQuery(terms.toArray(new Term[0]), random().nextBoolean());
Set<Term> extracted = new HashSet<>();
blendedTermQuery.extractTerms(extracted);
IndexSearcher searcher = new IndexSearcher(new MultiReader());
searcher.createNormalizedWeight(blendedTermQuery, false).extractTerms(extracted);
assertThat(extracted.size(), equalTo(terms.size()));
assertThat(extracted, containsInAnyOrder(terms.toArray(new Term[0])));
}

View File

@ -33,6 +33,7 @@ import org.elasticsearch.search.highlight.HighlightUtils;
import org.elasticsearch.test.ESTestCase;
import org.junit.Test;
import java.io.IOException;
import java.util.*;
import static org.hamcrest.CoreMatchers.equalTo;
@ -79,7 +80,7 @@ public class CustomPostingsHighlighterTests extends ESTestCase {
IndexSearcher searcher = newSearcher(ir);
Query query = new TermQuery(new Term("body", "highlighting"));
BytesRef[] queryTerms = filterTerms(extractTerms(query), "body", true);
BytesRef[] queryTerms = filterTerms(extractTerms(searcher, query), "body", true);
TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
assertThat(topDocs.totalHits, equalTo(1));
@ -174,7 +175,7 @@ public class CustomPostingsHighlighterTests extends ESTestCase {
IndexSearcher searcher = newSearcher(ir);
Query query = new TermQuery(new Term("body", "highlighting"));
BytesRef[] queryTerms = filterTerms(extractTerms(query), "body", true);
BytesRef[] queryTerms = filterTerms(extractTerms(searcher, query), "body", true);
TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
assertThat(topDocs.totalHits, equalTo(1));
@ -289,7 +290,7 @@ public class CustomPostingsHighlighterTests extends ESTestCase {
IndexSearcher searcher = newSearcher(ir);
Query query = new TermQuery(new Term("body", "highlighting"));
BytesRef[] queryTerms = filterTerms(extractTerms(query), "body", true);
BytesRef[] queryTerms = filterTerms(extractTerms(searcher, query), "body", true);
TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
assertThat(topDocs.totalHits, equalTo(1));
@ -376,9 +377,8 @@ public class CustomPostingsHighlighterTests extends ESTestCase {
iw.close();
Query query = new TermQuery(new Term("none", "highlighting"));
SortedSet<Term> queryTerms = extractTerms(query);
IndexSearcher searcher = newSearcher(ir);
SortedSet<Term> queryTerms = extractTerms(searcher, query);
TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
assertThat(topDocs.totalHits, equalTo(1));
int docId = topDocs.scoreDocs[0].doc;
@ -432,9 +432,9 @@ public class CustomPostingsHighlighterTests extends ESTestCase {
iw.close();
Query query = new TermQuery(new Term("none", "highlighting"));
SortedSet<Term> queryTerms = extractTerms(query);
IndexSearcher searcher = newSearcher(ir);
SortedSet<Term> queryTerms = extractTerms(searcher, query);
TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
assertThat(topDocs.totalHits, equalTo(1));
int docId = topDocs.scoreDocs[0].doc;
@ -458,9 +458,13 @@ public class CustomPostingsHighlighterTests extends ESTestCase {
dir.close();
}
private static SortedSet<Term> extractTerms(Query query) {
private static SortedSet<Term> extractTerms(IndexSearcher searcher, Query query) throws IOException {
return extractTerms(searcher.createNormalizedWeight(query, false));
}
private static SortedSet<Term> extractTerms(Weight weight) {
SortedSet<Term> queryTerms = new TreeSet<>();
query.extractTerms(queryTerms);
weight.extractTerms(queryTerms);
return queryTerms;
}

View File

@ -33,6 +33,7 @@ import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.HashSet;
import java.util.Locale;
import java.util.Set;
import static org.elasticsearch.test.ElasticsearchIntegrationTest.*;
@ -81,10 +82,10 @@ public class IndexTemplateFileLoadingTests extends ElasticsearchIntegrationTest
@Test
public void testThatLoadingTemplateFromFileWorks() throws Exception {
final int iters = scaledRandomIntBetween(5, 20);
final int iters = scaledRandomIntBetween(1, 5);
Set<String> indices = new HashSet<>();
for (int i = 0; i < iters; i++) {
String indexName = "foo" + randomRealisticUnicodeOfLengthBetween(0, 5);
String indexName = "foo" + randomAsciiOfLengthBetween(0, 5).toLowerCase(Locale.ROOT);
if (indices.contains(indexName)) {
continue;
}