Upgrade to lucene-6.5.0-snapshot-f919485. (#23087)

This commit is contained in:
Adrien Grand 2017-02-10 15:08:47 +01:00 committed by GitHub
parent 7018b6ac6f
commit 709cc9ba65
66 changed files with 224 additions and 129 deletions

View File

@ -1,6 +1,6 @@
# When updating elasticsearch, please update 'rest' version in core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy # When updating elasticsearch, please update 'rest' version in core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy
elasticsearch = 6.0.0-alpha1 elasticsearch = 6.0.0-alpha1
lucene = 6.4.1 lucene = 6.5.0-snapshot-f919485
# optional dependencies # optional dependencies
spatial4j = 0.6 spatial4j = 0.6

View File

@ -1 +0,0 @@
c6f0f593503080204e9d33189cdc59320f55db37

View File

@ -0,0 +1 @@
886c1da9adc3347f61ab95ecbf4dbeeaa0e7acb2

View File

@ -1 +0,0 @@
b0ab8aca2b0025b8733411778b6d27afe1c451f5

View File

@ -0,0 +1 @@
df9e94f63ad7d9188f14820c435ea1dc3c28d87a

View File

@ -1 +0,0 @@
2a18924b9e0ed86b318902cb475a0b9ca4d7be5b

View File

@ -0,0 +1 @@
3539f8dc9c3ed8ebe90afcb3daa2e9afcf5108d1

View File

@ -1 +0,0 @@
8297adfa469abd571079ee75a1645fc5124fff5b

View File

@ -0,0 +1 @@
da76338e4f299963da9d7ab33dae7586dfc902c2

View File

@ -1 +0,0 @@
95c0f76fc7893240483b25a5c420bed77a10d05d

View File

@ -0,0 +1 @@
f6318d120236c7ac03fca6bf98825b4cb4347fc8

View File

@ -1 +0,0 @@
6d2f1ff94dba19b9bc6f15930b7104b890cab1ce

View File

@ -0,0 +1 @@
68f045ff272e10c307fe25a1867c2948b614b57c

View File

@ -1 +0,0 @@
825a946902f03a38257851733da908949d69f3da

View File

@ -0,0 +1 @@
b58a7a15267614a9a14f7cf6257454e0c24b146d

View File

@ -1 +0,0 @@
4d147a6f0fcfc54630260d1bb3deecfc0d0d10f7

View File

@ -0,0 +1 @@
d5f00fcd00fee6906b563d201bc00bdea7a92baa

View File

@ -1 +0,0 @@
6de41d984c16185a244b52c4d069b00f5b2b120f

View File

@ -0,0 +1 @@
2664901a494d87e9f4cef65be14cca918da7c4f5

View File

@ -1 +0,0 @@
1fc5795a072770a2c47dce11a3c85a80f3437af6

View File

@ -0,0 +1 @@
476a79293f9a15ea1ee5f93684587205d03480d1

View File

@ -1 +0,0 @@
d4a49664668c58aa23aba74717f3d74a61378d7c

View File

@ -0,0 +1 @@
f4dd70223178cca067b0cade4e58c4d82bec87d6

View File

@ -1 +0,0 @@
d6ceb47fdea913a8f468a240a6ea307368094463

View File

@ -0,0 +1 @@
72c4ec5d811480164db556b54c7a76bd3ea16bd6

View File

@ -1 +0,0 @@
5b764e5f95f610f79622ca0e957bfc23f094e4c7

View File

@ -0,0 +1 @@
f7af3755fdd09df7c258c655aff03ddef9536a04

View File

@ -1 +0,0 @@
2c58459e671040046305dbee698fce1e6a1de71d

View File

@ -0,0 +1 @@
2bf820109203b990e93a05dade8dcebec6aeb71a

View File

@ -1 +0,0 @@
47014b400916eea259645a9e3118558ef6e95441

View File

@ -0,0 +1 @@
fc1f32923ee68761ee05051f4ef6f4a4ab3acdec

View File

@ -112,9 +112,11 @@ public class Version implements Comparable<Version> {
public static final Version V_5_2_1_UNRELEASED = new Version(V_5_2_1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_1); public static final Version V_5_2_1_UNRELEASED = new Version(V_5_2_1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_1);
public static final int V_5_3_0_ID_UNRELEASED = 5030099; public static final int V_5_3_0_ID_UNRELEASED = 5030099;
public static final Version V_5_3_0_UNRELEASED = new Version(V_5_3_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_1); public static final Version V_5_3_0_UNRELEASED = new Version(V_5_3_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_1);
public static final int V_5_4_0_ID_UNRELEASED = 5040099;
public static final Version V_5_4_0_UNRELEASED = new Version(V_5_4_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_5_0);
public static final int V_6_0_0_alpha1_ID_UNRELEASED = 6000001; public static final int V_6_0_0_alpha1_ID_UNRELEASED = 6000001;
public static final Version V_6_0_0_alpha1_UNRELEASED = public static final Version V_6_0_0_alpha1_UNRELEASED =
new Version(V_6_0_0_alpha1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_1); new Version(V_6_0_0_alpha1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_5_0);
public static final Version CURRENT = V_6_0_0_alpha1_UNRELEASED; public static final Version CURRENT = V_6_0_0_alpha1_UNRELEASED;
// unreleased versions must be added to the above list with the suffix _UNRELEASED (with the exception of CURRENT) // unreleased versions must be added to the above list with the suffix _UNRELEASED (with the exception of CURRENT)

View File

@ -20,7 +20,7 @@
package org.elasticsearch.index.analysis; package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.synonym.FlattenGraphFilter; import org.apache.lucene.analysis.core.FlattenGraphFilter;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSettings;

View File

@ -0,0 +1,101 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.miscellaneous.WordDelimiterGraphFilter;
import org.apache.lucene.analysis.miscellaneous.WordDelimiterIterator;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
import java.util.List;
import java.util.Set;
import static org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter.CATENATE_ALL;
import static org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter.CATENATE_NUMBERS;
import static org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter.CATENATE_WORDS;
import static org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter.GENERATE_NUMBER_PARTS;
import static org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter.GENERATE_WORD_PARTS;
import static org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter.PRESERVE_ORIGINAL;
import static org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter.SPLIT_ON_CASE_CHANGE;
import static org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter.SPLIT_ON_NUMERICS;
import static org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter.STEM_ENGLISH_POSSESSIVE;
import static org.elasticsearch.index.analysis.WordDelimiterTokenFilterFactory.parseTypes;
public class WordDelimiterGraphTokenFilterFactory extends AbstractTokenFilterFactory {
private final byte[] charTypeTable;
private final int flags;
private final CharArraySet protoWords;
public WordDelimiterGraphTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
// Sample Format for the type table:
// $ => DIGIT
// % => DIGIT
// . => DIGIT
// \u002C => DIGIT
// \u200D => ALPHANUM
List<String> charTypeTableValues = Analysis.getWordList(env, settings, "type_table");
if (charTypeTableValues == null) {
this.charTypeTable = WordDelimiterIterator.DEFAULT_WORD_DELIM_TABLE;
} else {
this.charTypeTable = parseTypes(charTypeTableValues);
}
int flags = 0;
// If set, causes parts of words to be generated: "PowerShot" => "Power" "Shot"
flags |= getFlag(GENERATE_WORD_PARTS, settings, "generate_word_parts", true);
// If set, causes number subwords to be generated: "500-42" => "500" "42"
flags |= getFlag(GENERATE_NUMBER_PARTS, settings, "generate_number_parts", true);
// 1, causes maximum runs of word parts to be catenated: "wi-fi" => "wifi"
flags |= getFlag(CATENATE_WORDS, settings, "catenate_words", false);
// If set, causes maximum runs of number parts to be catenated: "500-42" => "50042"
flags |= getFlag(CATENATE_NUMBERS, settings, "catenate_numbers", false);
// If set, causes all subword parts to be catenated: "wi-fi-4000" => "wifi4000"
flags |= getFlag(CATENATE_ALL, settings, "catenate_all", false);
// 1, causes "PowerShot" to be two tokens; ("Power-Shot" remains two parts regards)
flags |= getFlag(SPLIT_ON_CASE_CHANGE, settings, "split_on_case_change", true);
// If set, includes original words in subwords: "500-42" => "500" "42" "500-42"
flags |= getFlag(PRESERVE_ORIGINAL, settings, "preserve_original", false);
// 1, causes "j2se" to be three tokens; "j" "2" "se"
flags |= getFlag(SPLIT_ON_NUMERICS, settings, "split_on_numerics", true);
// If set, causes trailing "'s" to be removed for each subword: "O'Neil's" => "O", "Neil"
flags |= getFlag(STEM_ENGLISH_POSSESSIVE, settings, "stem_english_possessive", true);
// If not null is the set of tokens to protect from being delimited
Set<?> protectedWords = Analysis.getWordSet(env, indexSettings.getIndexVersionCreated(), settings, "protected_words");
this.protoWords = protectedWords == null ? null : CharArraySet.copy(protectedWords);
this.flags = flags;
}
@Override
public TokenStream create(TokenStream tokenStream) {
return new WordDelimiterGraphFilter(tokenStream, charTypeTable, flags, protoWords);
}
private int getFlag(int flag, Settings settings, String key, boolean defaultValue) {
if (settings.getAsBoolean(key, defaultValue)) {
return flag;
}
return 0;
}
}

View File

@ -113,7 +113,7 @@ public class WordDelimiterTokenFilterFactory extends AbstractTokenFilterFactory
/** /**
* parses a list of MappingCharFilter style rules into a custom byte[] type table * parses a list of MappingCharFilter style rules into a custom byte[] type table
*/ */
private byte[] parseTypes(Collection<String> rules) { static byte[] parseTypes(Collection<String> rules) {
SortedMap<Character, Byte> typeMap = new TreeMap<>(); SortedMap<Character, Byte> typeMap = new TreeMap<>();
for (String rule : rules) { for (String rule : rules) {
Matcher m = typePattern.matcher(rule); Matcher m = typePattern.matcher(rule);
@ -137,7 +137,7 @@ public class WordDelimiterTokenFilterFactory extends AbstractTokenFilterFactory
return types; return types;
} }
private Byte parseType(String s) { private static Byte parseType(String s) {
if (s.equals("LOWER")) if (s.equals("LOWER"))
return WordDelimiterFilter.LOWER; return WordDelimiterFilter.LOWER;
else if (s.equals("UPPER")) else if (s.equals("UPPER"))
@ -154,9 +154,8 @@ public class WordDelimiterTokenFilterFactory extends AbstractTokenFilterFactory
return null; return null;
} }
char[] out = new char[256]; private static String parseString(String s) {
char[] out = new char[256];
private String parseString(String s) {
int readPos = 0; int readPos = 0;
int len = s.length(); int len = s.length();
int writePos = 0; int writePos = 0;

View File

@ -79,7 +79,7 @@ public class BytesRefFieldComparatorSource extends IndexFieldData.XFieldComparat
protected void setScorer(Scorer scorer) {} protected void setScorer(Scorer scorer) {}
@Override @Override
public FieldComparator<?> newComparator(String fieldname, int numHits, int sortPos, boolean reversed) throws IOException { public FieldComparator<?> newComparator(String fieldname, int numHits, int sortPos, boolean reversed) {
assert indexFieldData == null || fieldname.equals(indexFieldData.getFieldName()); assert indexFieldData == null || fieldname.equals(indexFieldData.getFieldName());
final boolean sortMissingLast = sortMissingLast(missingValue) ^ reversed; final boolean sortMissingLast = sortMissingLast(missingValue) ^ reversed;

View File

@ -64,7 +64,7 @@ public class DoubleValuesComparatorSource extends IndexFieldData.XFieldComparato
protected void setScorer(Scorer scorer) {} protected void setScorer(Scorer scorer) {}
@Override @Override
public FieldComparator<?> newComparator(String fieldname, int numHits, int sortPos, boolean reversed) throws IOException { public FieldComparator<?> newComparator(String fieldname, int numHits, int sortPos, boolean reversed) {
assert indexFieldData == null || fieldname.equals(indexFieldData.getFieldName()); assert indexFieldData == null || fieldname.equals(indexFieldData.getFieldName());
final double dMissingValue = (Double) missingObject(missingValue, reversed); final double dMissingValue = (Double) missingObject(missingValue, reversed);

View File

@ -56,7 +56,7 @@ public class FloatValuesComparatorSource extends IndexFieldData.XFieldComparator
} }
@Override @Override
public FieldComparator<?> newComparator(String fieldname, int numHits, int sortPos, boolean reversed) throws IOException { public FieldComparator<?> newComparator(String fieldname, int numHits, int sortPos, boolean reversed) {
assert indexFieldData == null || fieldname.equals(indexFieldData.getFieldName()); assert indexFieldData == null || fieldname.equals(indexFieldData.getFieldName());
final float dMissingValue = (Float) missingObject(missingValue, reversed); final float dMissingValue = (Float) missingObject(missingValue, reversed);

View File

@ -55,7 +55,7 @@ public class LongValuesComparatorSource extends IndexFieldData.XFieldComparatorS
} }
@Override @Override
public FieldComparator<?> newComparator(String fieldname, int numHits, int sortPos, boolean reversed) throws IOException { public FieldComparator<?> newComparator(String fieldname, int numHits, int sortPos, boolean reversed) {
assert indexFieldData == null || fieldname.equals(indexFieldData.getFieldName()); assert indexFieldData == null || fieldname.equals(indexFieldData.getFieldName());
final Long dMissingValue = (Long) missingObject(missingValue, reversed); final Long dMissingValue = (Long) missingObject(missingValue, reversed);

View File

@ -23,7 +23,6 @@ import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Sort; import org.apache.lucene.search.Sort;
import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.TopFieldDocs; import org.apache.lucene.search.TopFieldDocs;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.Lucene;
@ -102,42 +101,38 @@ public class InternalTopHits extends InternalMetricsAggregation implements TopHi
final TopDocs reducedTopDocs; final TopDocs reducedTopDocs;
final TopDocs[] shardDocs; final TopDocs[] shardDocs;
try { if (topDocs instanceof TopFieldDocs) {
if (topDocs instanceof TopFieldDocs) { Sort sort = new Sort(((TopFieldDocs) topDocs).fields);
Sort sort = new Sort(((TopFieldDocs) topDocs).fields); shardDocs = new TopFieldDocs[aggregations.size()];
shardDocs = new TopFieldDocs[aggregations.size()]; for (int i = 0; i < shardDocs.length; i++) {
for (int i = 0; i < shardDocs.length; i++) { InternalTopHits topHitsAgg = (InternalTopHits) aggregations.get(i);
InternalTopHits topHitsAgg = (InternalTopHits) aggregations.get(i); shardDocs[i] = (TopFieldDocs) topHitsAgg.topDocs;
shardDocs[i] = (TopFieldDocs) topHitsAgg.topDocs; shardHits[i] = topHitsAgg.searchHits;
shardHits[i] = topHitsAgg.searchHits;
}
reducedTopDocs = TopDocs.merge(sort, from, size, (TopFieldDocs[]) shardDocs);
} else {
shardDocs = new TopDocs[aggregations.size()];
for (int i = 0; i < shardDocs.length; i++) {
InternalTopHits topHitsAgg = (InternalTopHits) aggregations.get(i);
shardDocs[i] = topHitsAgg.topDocs;
shardHits[i] = topHitsAgg.searchHits;
}
reducedTopDocs = TopDocs.merge(from, size, shardDocs);
} }
reducedTopDocs = TopDocs.merge(sort, from, size, (TopFieldDocs[]) shardDocs);
final int[] tracker = new int[shardHits.length]; } else {
SearchHit[] hits = new SearchHit[reducedTopDocs.scoreDocs.length]; shardDocs = new TopDocs[aggregations.size()];
for (int i = 0; i < reducedTopDocs.scoreDocs.length; i++) { for (int i = 0; i < shardDocs.length; i++) {
ScoreDoc scoreDoc = reducedTopDocs.scoreDocs[i]; InternalTopHits topHitsAgg = (InternalTopHits) aggregations.get(i);
int position; shardDocs[i] = topHitsAgg.topDocs;
do { shardHits[i] = topHitsAgg.searchHits;
position = tracker[scoreDoc.shardIndex]++;
} while (shardDocs[scoreDoc.shardIndex].scoreDocs[position] != scoreDoc);
hits[i] = shardHits[scoreDoc.shardIndex].getAt(position);
} }
return new InternalTopHits(name, from, size, reducedTopDocs, new SearchHits(hits, reducedTopDocs.totalHits, reducedTopDocs = TopDocs.merge(from, size, shardDocs);
reducedTopDocs.getMaxScore()),
pipelineAggregators(), getMetaData());
} catch (IOException e) {
throw ExceptionsHelper.convertToElastic(e);
} }
final int[] tracker = new int[shardHits.length];
SearchHit[] hits = new SearchHit[reducedTopDocs.scoreDocs.length];
for (int i = 0; i < reducedTopDocs.scoreDocs.length; i++) {
ScoreDoc scoreDoc = reducedTopDocs.scoreDocs[i];
int position;
do {
position = tracker[scoreDoc.shardIndex]++;
} while (shardDocs[scoreDoc.shardIndex].scoreDocs[position] != scoreDoc);
hits[i] = shardHits[scoreDoc.shardIndex].getAt(position);
}
return new InternalTopHits(name, from, size, reducedTopDocs, new SearchHits(hits, reducedTopDocs.totalHits,
reducedTopDocs.getMaxScore()),
pipelineAggregators(), getMetaData());
} }
@Override @Override

View File

@ -554,8 +554,7 @@ public class GeoDistanceSortBuilder extends SortBuilder<GeoDistanceSortBuilder>
} }
@Override @Override
public FieldComparator<?> newComparator(String fieldname, int numHits, int sortPos, boolean reversed) public FieldComparator<?> newComparator(String fieldname, int numHits, int sortPos, boolean reversed) {
throws IOException {
return new FieldComparator.DoubleComparator(numHits, null, null) { return new FieldComparator.DoubleComparator(numHits, null, null) {
@Override @Override
protected NumericDocValues getNumericDocValues(LeafReaderContext context, String field) protected NumericDocValues getNumericDocValues(LeafReaderContext context, String field)

View File

@ -31,7 +31,7 @@ grant codeBase "${codebase.securesm-1.1.jar}" {
//// Very special jar permissions: //// Very special jar permissions:
//// These are dangerous permissions that we don't want to grant to everything. //// These are dangerous permissions that we don't want to grant to everything.
grant codeBase "${codebase.lucene-core-6.4.1.jar}" { grant codeBase "${codebase.lucene-core-6.5.0-snapshot-f919485.jar}" {
// needed to allow MMapDirectory's "unmap hack" (die unmap hack, die) // needed to allow MMapDirectory's "unmap hack" (die unmap hack, die)
// java 8 package // java 8 package
permission java.lang.RuntimePermission "accessClassInPackage.sun.misc"; permission java.lang.RuntimePermission "accessClassInPackage.sun.misc";
@ -42,7 +42,7 @@ grant codeBase "${codebase.lucene-core-6.4.1.jar}" {
permission java.lang.RuntimePermission "accessDeclaredMembers"; permission java.lang.RuntimePermission "accessDeclaredMembers";
}; };
grant codeBase "${codebase.lucene-misc-6.4.1.jar}" { grant codeBase "${codebase.lucene-misc-6.5.0-snapshot-f919485.jar}" {
// needed to allow shard shrinking to use hard-links if possible via lucenes HardlinkCopyDirectoryWrapper // needed to allow shard shrinking to use hard-links if possible via lucenes HardlinkCopyDirectoryWrapper
permission java.nio.file.LinkPermission "hard"; permission java.nio.file.LinkPermission "hard";
}; };

View File

@ -33,7 +33,7 @@ grant codeBase "${codebase.securemock-1.2.jar}" {
permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; permission java.lang.reflect.ReflectPermission "suppressAccessChecks";
}; };
grant codeBase "${codebase.lucene-test-framework-6.4.1.jar}" { grant codeBase "${codebase.lucene-test-framework-6.5.0-snapshot-f919485.jar}" {
// needed by RamUsageTester // needed by RamUsageTester
permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; permission java.lang.reflect.ReflectPermission "suppressAccessChecks";
// needed for testing hardlinks in StoreRecoveryTests since we install MockFS // needed for testing hardlinks in StoreRecoveryTests since we install MockFS

View File

@ -242,7 +242,8 @@ public class HasChildQueryBuilderTests extends AbstractQueryTestCase<HasChildQue
assertThat(booleanQuery.clauses().get(0).getOccur(), equalTo(BooleanClause.Occur.MUST)); assertThat(booleanQuery.clauses().get(0).getOccur(), equalTo(BooleanClause.Occur.MUST));
assertThat(booleanQuery.clauses().get(0).getQuery(), instanceOf(TermsQuery.class)); assertThat(booleanQuery.clauses().get(0).getQuery(), instanceOf(TermsQuery.class));
TermsQuery termsQuery = (TermsQuery) booleanQuery.clauses().get(0).getQuery(); TermsQuery termsQuery = (TermsQuery) booleanQuery.clauses().get(0).getQuery();
Query rewrittenTermsQuery = termsQuery.rewrite(null); // we need to rewrite once for TermsQuery -> TermInSetQuery and than againt TermInSetQuery -> ConstantScoreQuery
Query rewrittenTermsQuery = termsQuery.rewrite(null).rewrite(null);
assertThat(rewrittenTermsQuery, instanceOf(ConstantScoreQuery.class)); assertThat(rewrittenTermsQuery, instanceOf(ConstantScoreQuery.class));
ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) rewrittenTermsQuery; ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) rewrittenTermsQuery;
assertThat(constantScoreQuery.getQuery(), instanceOf(BooleanQuery.class)); assertThat(constantScoreQuery.getQuery(), instanceOf(BooleanQuery.class));

View File

@ -40,6 +40,7 @@ import org.apache.lucene.search.SynonymQuery;
import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TermRangeQuery; import org.apache.lucene.search.TermRangeQuery;
import org.apache.lucene.search.WildcardQuery; import org.apache.lucene.search.WildcardQuery;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.automaton.TooComplexToDeterminizeException; import org.apache.lucene.util.automaton.TooComplexToDeterminizeException;
import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.ParsingException;
@ -397,8 +398,8 @@ public class QueryStringQueryBuilderTests extends AbstractQueryTestCase<QueryStr
Query query = queryParser.parse("guinea pig"); Query query = queryParser.parse("guinea pig");
Query expectedQuery = new GraphQuery( Query expectedQuery = new GraphQuery(
new BooleanQuery.Builder() new BooleanQuery.Builder()
.add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "guinea")), defaultOp)) .add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "guinea")), Occur.MUST))
.add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "pig")), defaultOp)) .add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "pig")), Occur.MUST))
.build(), .build(),
new TermQuery(new Term(STRING_FIELD_NAME, "cavy")) new TermQuery(new Term(STRING_FIELD_NAME, "cavy"))
); );
@ -406,19 +407,17 @@ public class QueryStringQueryBuilderTests extends AbstractQueryTestCase<QueryStr
// simple with additional tokens // simple with additional tokens
query = queryParser.parse("that guinea pig smells"); query = queryParser.parse("that guinea pig smells");
expectedQuery = new GraphQuery( expectedQuery = new BooleanQuery.Builder()
new BooleanQuery.Builder() .add(new TermQuery(new Term(STRING_FIELD_NAME, "that")), defaultOp)
.add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "that")), defaultOp)) .add(new GraphQuery(
.add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "guinea")), defaultOp)) new BooleanQuery.Builder()
.add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "pig")), defaultOp)) .add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "guinea")), Occur.MUST))
.add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "smells")), defaultOp)) .add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "pig")), Occur.MUST))
.build(), .build(),
new BooleanQuery.Builder() new TermQuery(new Term(STRING_FIELD_NAME, "cavy"))
.add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "that")), defaultOp)) ), defaultOp)
.add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "cavy")), defaultOp)) .add(new TermQuery(new Term(STRING_FIELD_NAME, "smells")), defaultOp)
.add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "smells")), defaultOp)) .build();
.build()
);
assertThat(query, Matchers.equalTo(expectedQuery)); assertThat(query, Matchers.equalTo(expectedQuery));
// complex // complex
@ -427,8 +426,8 @@ public class QueryStringQueryBuilderTests extends AbstractQueryTestCase<QueryStr
.add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "that")), BooleanClause.Occur.MUST)) .add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "that")), BooleanClause.Occur.MUST))
.add(new BooleanClause(new GraphQuery( .add(new BooleanClause(new GraphQuery(
new BooleanQuery.Builder() new BooleanQuery.Builder()
.add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "guinea")), defaultOp)) .add(new TermQuery(new Term(STRING_FIELD_NAME, "guinea")), Occur.MUST)
.add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "pig")), defaultOp)) .add(new TermQuery(new Term(STRING_FIELD_NAME, "pig")), Occur.MUST)
.build(), .build(),
new TermQuery(new Term(STRING_FIELD_NAME, "cavy")) new TermQuery(new Term(STRING_FIELD_NAME, "cavy"))
), BooleanClause.Occur.MUST_NOT)) ), BooleanClause.Occur.MUST_NOT))

View File

@ -172,6 +172,7 @@ public class MatchQueryIT extends ESIntegTestCase {
assertSearchHits(searchResponse, "1", "2", "3", "7", "8"); assertSearchHits(searchResponse, "1", "2", "3", "7", "8");
} }
@AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/23102")
public void testCommonTerms() throws ExecutionException, InterruptedException { public void testCommonTerms() throws ExecutionException, InterruptedException {
String route = "commonTermsTest"; String route = "commonTermsTest";
List<IndexRequestBuilder> builders = getDocs(); List<IndexRequestBuilder> builders = getDocs();

View File

@ -36,7 +36,6 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchHits;
import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Comparator; import java.util.Comparator;
@ -188,11 +187,7 @@ public class InternalTopHitsTests extends InternalAggregationTestCase<InternalTo
@SuppressWarnings("rawtypes") @SuppressWarnings("rawtypes")
FieldComparator[] comparators = new FieldComparator[testInstancesSortFields.length]; FieldComparator[] comparators = new FieldComparator[testInstancesSortFields.length];
for (int i = 0; i < testInstancesSortFields.length; i++) { for (int i = 0; i < testInstancesSortFields.length; i++) {
try { comparators[i] = testInstancesSortFields[i].getComparator(0, 0);
comparators[i] = testInstancesSortFields[i].getComparator(0, 0);
} catch (IOException e) {
throw new RuntimeException(e);
}
} }
return (lhs, rhs) -> { return (lhs, rhs) -> {
FieldDoc l = (FieldDoc) lhs; FieldDoc l = (FieldDoc) lhs;

View File

@ -1 +0,0 @@
d56305d2ee8b2484262b1704d802470e6d8f8a8f

View File

@ -0,0 +1 @@
89f1c501f5f6504c53527da76aa18796c2b56492

View File

@ -28,11 +28,13 @@ import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
import org.apache.lucene.index.Terms; import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum; import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermInSetQuery;
import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.BytesRefBuilder;
import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.ParsingException;
@ -194,12 +196,7 @@ public class PercolatorFieldMapper extends FieldMapper {
} }
Query createCandidateQuery(IndexReader indexReader) throws IOException { Query createCandidateQuery(IndexReader indexReader) throws IOException {
List<Term> extractedTerms = new ArrayList<>(); List<BytesRef> extractedTerms = new ArrayList<>();
// include extractionResultField:failed, because docs with this term have no extractedTermsField
// and otherwise we would fail to return these docs. Docs that failed query term extraction
// always need to be verified by MemoryIndex:
extractedTerms.add(new Term(extractionResultField.name(), EXTRACTION_FAILED));
LeafReader reader = indexReader.leaves().get(0).reader(); LeafReader reader = indexReader.leaves().get(0).reader();
Fields fields = reader.fields(); Fields fields = reader.fields();
for (String field : fields) { for (String field : fields) {
@ -215,10 +212,19 @@ public class PercolatorFieldMapper extends FieldMapper {
builder.append(fieldBr); builder.append(fieldBr);
builder.append(FIELD_VALUE_SEPARATOR); builder.append(FIELD_VALUE_SEPARATOR);
builder.append(term); builder.append(term);
extractedTerms.add(new Term(queryTermsField.name(), builder.toBytesRef())); extractedTerms.add(builder.toBytesRef());
} }
} }
return new TermsQuery(extractedTerms); Query extractionSuccess = new TermInSetQuery(queryTermsField.name(), extractedTerms);
// include extractionResultField:failed, because docs with this term have no extractedTermsField
// and otherwise we would fail to return these docs. Docs that failed query term extraction
// always need to be verified by MemoryIndex:
Query extractionFailure = new TermQuery(new Term(extractionResultField.name(), EXTRACTION_FAILED));
return new BooleanQuery.Builder()
.add(extractionSuccess, Occur.SHOULD)
.add(extractionFailure, Occur.SHOULD)
.build();
} }
} }

View File

@ -22,7 +22,6 @@ import org.apache.lucene.index.PrefixCodedTerms;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
import org.apache.lucene.queries.BlendedTermQuery; import org.apache.lucene.queries.BlendedTermQuery;
import org.apache.lucene.queries.CommonTermsQuery; import org.apache.lucene.queries.CommonTermsQuery;
import org.apache.lucene.queries.TermsQuery;
import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.BoostQuery;
@ -32,6 +31,7 @@ import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.search.SynonymQuery; import org.apache.lucene.search.SynonymQuery;
import org.apache.lucene.search.TermInSetQuery;
import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.spans.SpanFirstQuery; import org.apache.lucene.search.spans.SpanFirstQuery;
import org.apache.lucene.search.spans.SpanNearQuery; import org.apache.lucene.search.spans.SpanNearQuery;
@ -62,7 +62,7 @@ public final class QueryAnalyzer {
map.put(ConstantScoreQuery.class, constantScoreQuery()); map.put(ConstantScoreQuery.class, constantScoreQuery());
map.put(BoostQuery.class, boostQuery()); map.put(BoostQuery.class, boostQuery());
map.put(TermQuery.class, termQuery()); map.put(TermQuery.class, termQuery());
map.put(TermsQuery.class, termsQuery()); map.put(TermInSetQuery.class, termInSetQuery());
map.put(CommonTermsQuery.class, commonTermsQuery()); map.put(CommonTermsQuery.class, commonTermsQuery());
map.put(BlendedTermQuery.class, blendedTermQuery()); map.put(BlendedTermQuery.class, blendedTermQuery());
map.put(PhraseQuery.class, phraseQuery()); map.put(PhraseQuery.class, phraseQuery());
@ -145,11 +145,11 @@ public final class QueryAnalyzer {
}); });
} }
static Function<Query, Result> termsQuery() { static Function<Query, Result> termInSetQuery() {
return query -> { return query -> {
TermsQuery termsQuery = (TermsQuery) query; TermInSetQuery termInSetQuery = (TermInSetQuery) query;
Set<Term> terms = new HashSet<>(); Set<Term> terms = new HashSet<>();
PrefixCodedTerms.TermIterator iterator = termsQuery.getTermData().iterator(); PrefixCodedTerms.TermIterator iterator = termInSetQuery.getTermData().iterator();
for (BytesRef term = iterator.next(); term != null; term = iterator.next()) { for (BytesRef term = iterator.next(); term != null; term = iterator.next()) {
terms.add(new Term(iterator.field(), term)); terms.add(new Term(iterator.field(), term));
} }

View File

@ -26,12 +26,13 @@ import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.PrefixCodedTerms; import org.apache.lucene.index.PrefixCodedTerms;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
import org.apache.lucene.index.memory.MemoryIndex; import org.apache.lucene.index.memory.MemoryIndex;
import org.apache.lucene.queries.TermsQuery;
import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.TermInSetQuery;
import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TermRangeQuery; import org.apache.lucene.search.TermRangeQuery;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.search.join.ScoreMode;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.elasticsearch.Version; import org.elasticsearch.Version;
@ -207,10 +208,13 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase {
IndexReader indexReader = memoryIndex.createSearcher().getIndexReader(); IndexReader indexReader = memoryIndex.createSearcher().getIndexReader();
TermsQuery termsQuery = (TermsQuery) fieldType.createCandidateQuery(indexReader); BooleanQuery candidateQuery = (BooleanQuery) fieldType.createCandidateQuery(indexReader);
assertEquals(2, candidateQuery.clauses().size());
assertEquals(Occur.SHOULD, candidateQuery.clauses().get(0).getOccur());
TermInSetQuery termsQuery = (TermInSetQuery) candidateQuery.clauses().get(0).getQuery();
PrefixCodedTerms terms = termsQuery.getTermData(); PrefixCodedTerms terms = termsQuery.getTermData();
assertThat(terms.size(), equalTo(15L)); assertThat(terms.size(), equalTo(14L));
PrefixCodedTerms.TermIterator termIterator = terms.iterator(); PrefixCodedTerms.TermIterator termIterator = terms.iterator();
assertTermIterator(termIterator, "_field3\u0000me", fieldType.queryTermsField.name()); assertTermIterator(termIterator, "_field3\u0000me", fieldType.queryTermsField.name());
assertTermIterator(termIterator, "_field3\u0000unhide", fieldType.queryTermsField.name()); assertTermIterator(termIterator, "_field3\u0000unhide", fieldType.queryTermsField.name());
@ -226,7 +230,10 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase {
assertTermIterator(termIterator, "field2\u0000some", fieldType.queryTermsField.name()); assertTermIterator(termIterator, "field2\u0000some", fieldType.queryTermsField.name());
assertTermIterator(termIterator, "field2\u0000text", fieldType.queryTermsField.name()); assertTermIterator(termIterator, "field2\u0000text", fieldType.queryTermsField.name());
assertTermIterator(termIterator, "field4\u0000123", fieldType.queryTermsField.name()); assertTermIterator(termIterator, "field4\u0000123", fieldType.queryTermsField.name());
assertTermIterator(termIterator, EXTRACTION_FAILED, fieldType.extractionResultField.name());
assertEquals(Occur.SHOULD, candidateQuery.clauses().get(1).getOccur());
assertEquals(new TermQuery(new Term(fieldType.extractionResultField.name(), EXTRACTION_FAILED)),
candidateQuery.clauses().get(1).getQuery());
} }
private void assertTermIterator(PrefixCodedTerms.TermIterator termIterator, String expectedValue, String expectedField) { private void assertTermIterator(PrefixCodedTerms.TermIterator termIterator, String expectedValue, String expectedField) {

View File

@ -21,7 +21,6 @@ package org.elasticsearch.percolator;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
import org.apache.lucene.queries.BlendedTermQuery; import org.apache.lucene.queries.BlendedTermQuery;
import org.apache.lucene.queries.CommonTermsQuery; import org.apache.lucene.queries.CommonTermsQuery;
import org.apache.lucene.queries.TermsQuery;
import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.BoostQuery;
@ -31,6 +30,7 @@ import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.SynonymQuery; import org.apache.lucene.search.SynonymQuery;
import org.apache.lucene.search.TermInSetQuery;
import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TermRangeQuery; import org.apache.lucene.search.TermRangeQuery;
import org.apache.lucene.search.spans.SpanFirstQuery; import org.apache.lucene.search.spans.SpanFirstQuery;
@ -71,7 +71,7 @@ public class QueryAnalyzerTests extends ESTestCase {
} }
public void testExtractQueryMetadata_termsQuery() { public void testExtractQueryMetadata_termsQuery() {
TermsQuery termsQuery = new TermsQuery("_field", new BytesRef("_term1"), new BytesRef("_term2")); TermInSetQuery termsQuery = new TermInSetQuery("_field", new BytesRef("_term1"), new BytesRef("_term2"));
Result result = analyze(termsQuery); Result result = analyze(termsQuery);
assertThat(result.verified, is(true)); assertThat(result.verified, is(true));
List<Term> terms = new ArrayList<>(result.terms); List<Term> terms = new ArrayList<>(result.terms);
@ -81,18 +81,6 @@ public class QueryAnalyzerTests extends ESTestCase {
assertThat(terms.get(0).text(), equalTo("_term1")); assertThat(terms.get(0).text(), equalTo("_term1"));
assertThat(terms.get(1).field(), equalTo("_field")); assertThat(terms.get(1).field(), equalTo("_field"));
assertThat(terms.get(1).text(), equalTo("_term2")); assertThat(terms.get(1).text(), equalTo("_term2"));
// test with different fields
termsQuery = new TermsQuery(new Term("_field1", "_term1"), new Term("_field2", "_term2"));
result = analyze(termsQuery);
assertThat(result.verified, is(true));
terms = new ArrayList<>(result.terms);
Collections.sort(terms);
assertThat(terms.size(), equalTo(2));
assertThat(terms.get(0).field(), equalTo("_field1"));
assertThat(terms.get(0).text(), equalTo("_term1"));
assertThat(terms.get(1).field(), equalTo("_field2"));
assertThat(terms.get(1).text(), equalTo("_term2"));
} }
public void testExtractQueryMetadata_phraseQuery() { public void testExtractQueryMetadata_phraseQuery() {

View File

@ -1 +0,0 @@
dae7aa1d7ccb6eaa32d7208d25fe772c029113bd

View File

@ -0,0 +1 @@
e430aa3efe4883c74edc01711871870c907f37ca

View File

@ -1 +0,0 @@
8acb2fd78d2a4612d677e353b056c89fe700a73a

View File

@ -0,0 +1 @@
f4340c16ce417a688b5b20f6b6624f51683247bd

View File

@ -1 +0,0 @@
5f40ded59cc0a57d2a9fe9d9b9ff6d5dbdb319e6

View File

@ -0,0 +1 @@
ccf0f76f1249bc0027e9ebe01953e3663e52a5dc

View File

@ -1 +0,0 @@
1aff866b1c191914301af25f818309f7ceb76cd3

View File

@ -0,0 +1 @@
8cfa9d27ea3d36524d69b78601fe9b9c6d4b9628

View File

@ -1 +0,0 @@
52fcba7d7abde7d299ba31b1c5194fca3b1625da

View File

@ -0,0 +1 @@
eb7e1cdab96c107ca256cef75e149139f9b62044

View File

@ -1 +0,0 @@
405aeb0b03eca645434cbd23aed31bb74feaece8

View File

@ -0,0 +1 @@
d0a56789aa72751547c0001a0e950c387b245181

View File

@ -20,6 +20,7 @@
package org.elasticsearch; package org.elasticsearch;
import org.apache.lucene.analysis.en.PorterStemFilterFactory; import org.apache.lucene.analysis.en.PorterStemFilterFactory;
import org.apache.lucene.analysis.miscellaneous.WordDelimiterGraphFilterFactory;
import org.apache.lucene.analysis.reverse.ReverseStringFilterFactory; import org.apache.lucene.analysis.reverse.ReverseStringFilterFactory;
import org.apache.lucene.analysis.snowball.SnowballPorterFilterFactory; import org.apache.lucene.analysis.snowball.SnowballPorterFilterFactory;
import org.apache.lucene.analysis.util.CharFilterFactory; import org.apache.lucene.analysis.util.CharFilterFactory;
@ -249,6 +250,7 @@ public class AnalysisFactoryTestCase extends ESTestCase {
.put("type", KeepTypesFilterFactory.class) .put("type", KeepTypesFilterFactory.class)
.put("uppercase", UpperCaseTokenFilterFactory.class) .put("uppercase", UpperCaseTokenFilterFactory.class)
.put("worddelimiter", WordDelimiterTokenFilterFactory.class) .put("worddelimiter", WordDelimiterTokenFilterFactory.class)
.put("worddelimitergraph", WordDelimiterGraphFilterFactory.class)
.put("flattengraph", FlattenGraphTokenFilterFactory.class) .put("flattengraph", FlattenGraphTokenFilterFactory.class)
// TODO: these tokenfilters are not yet exposed: useful? // TODO: these tokenfilters are not yet exposed: useful?