Upgrade to lucene-8.2.0-snapshot-860e0be5378 (#44171) (#44184)

Upgrades lucene library to lucene-8.2.0-snapshot-860e0be5378
This commit is contained in:
Nick Knize 2019-07-11 09:17:22 -05:00 committed by GitHub
parent 66a9b721f5
commit 374030a53f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
62 changed files with 70 additions and 61 deletions

View File

@ -1,5 +1,5 @@
elasticsearch = 7.4.0
lucene = 8.1.0
lucene = 8.2.0-snapshot-860e0be5378
bundled_jdk = 12.0.1+12@69cfe15208a647278a19ef0990eea691

View File

@ -5,8 +5,8 @@ bare_version never includes -alpha or -beta
:bare_version: 7.4.0
:major-version: 7.x
:prev-major-version: 6.x
:lucene_version: 8.1.0
:lucene_version_path: 8_1_0
:lucene_version: 8.2.0
:lucene_version_path: 8_2_0
:branch: 7.x
:jdk: 1.8.0_131
:jdk_major: 8

View File

@ -1 +0,0 @@
0c98e3b9d25f27ab05ac643cfb28756daa516bc7

View File

@ -0,0 +1 @@
779a843cfa50fc09eb0cbadebe1ae2de35fb80b4

View File

@ -1 +0,0 @@
d61364290eb1c28696e62b7df3a7d041d3be2fa5

View File

@ -0,0 +1 @@
9ec124ed1efe19b6a6ebdc9ce299d2a3726cf040

View File

@ -1 +0,0 @@
7f78b18890a0a0e74a8249806a6cfcabd2fae304

View File

@ -0,0 +1 @@
86c02792e73efdc762e2e65ea7e78912ca01ab65

View File

@ -1 +0,0 @@
bfc6b5d67a792aa23ee134fe93307696aad94223

View File

@ -0,0 +1 @@
7fd7096cbee02f0c888df1bf391e0fe2d12422e6

View File

@ -1 +0,0 @@
6fac1ff799b86f872b67e7fad55120d338daa86f

View File

@ -0,0 +1 @@
9d22bef4e7aa7b651eda01ad99e608018da9527d

View File

@ -1 +0,0 @@
72941af5e1bfb012aec04dd518a2deb43402702c

View File

@ -0,0 +1 @@
1600d60203bf6b53244668352f4026653497abd1

View File

@ -1 +0,0 @@
0ac885595cfdc0267d7d9cb843c22dabf7215ff0

View File

@ -0,0 +1 @@
2902e1e58cf5d8bbb2ed98d85b1dcddff3b46cbe

View File

@ -1 +0,0 @@
e260cff7f48e350e1ec037dec1c260ce05ddb53e

View File

@ -0,0 +1 @@
d85bddbdc8a890bd90b572b7fe8e8f944af7fc70

View File

@ -1 +0,0 @@
262f20cb2786cdf7015a4ba1a64ce90ff2d746f5

View File

@ -0,0 +1 @@
ceed9fd2c88f62b9f039e28eef57f5d95e71c578

View File

@ -1 +0,0 @@
c5610306f8eff182b399b9aed7a60b82668a8395

View File

@ -0,0 +1 @@
24feeb6db1bc38b1a7aa31eaa052b5af323f206c

View File

@ -1 +0,0 @@
46d614acdeb42f4661e91347100217bc72aae11e

View File

@ -0,0 +1 @@
d5f5b26c967dce353db5b69f52a22ab1c39e3ce6

View File

@ -1 +0,0 @@
443f63d9038eea0601b493fa37fc599d74b035eb

View File

@ -0,0 +1 @@
45d506e861bcfe5dcffeab89e08f046934b540cb

View File

@ -1 +0,0 @@
e3e52591f8d44a4e1006ced4dd4a67f7a572990a

View File

@ -0,0 +1 @@
0a6c96eff6c944ec576873e1d1936fd6b265afd4

View File

@ -1 +0,0 @@
2e885b1e3e55f94ccc2744f85738563a577a4e21

View File

@ -0,0 +1 @@
9b9fba1279c24d2daac0a8c54228c0bbc4375d60

View File

@ -1 +0,0 @@
e58d0092da1c4744627d57d022f4e07d8b80d11b

View File

@ -0,0 +1 @@
d723326c2d973928a07868ec97282dea2f3ae511

View File

@ -1 +0,0 @@
07833aee2c5feb6fa1a16a21d27c8f15c01d0b4c

View File

@ -0,0 +1 @@
5c5422ee3afeebbe6114ef989888e15a7fd7af0e

View File

@ -1 +0,0 @@
63096d40298b8b8245a602d344b57bfa14b929fd

View File

@ -0,0 +1 @@
5006b9e0ef1bc9c9857742c898ab79eb276891c5

View File

@ -1 +0,0 @@
9bb4fb3c7035a877e4a87ed86870894509d26d65

View File

@ -0,0 +1 @@
a2e1ef30071d5f5da5b2c515455c70919e131d43

View File

@ -1 +0,0 @@
1033737c97703516134ba4c99d41724729854df4

View File

@ -0,0 +1 @@
921849a9546d7582b530415ceb718896bde3f017

View File

@ -1 +0,0 @@
968d2fb35b0c2e68ac07c1ec187ab38a74b6602a

View File

@ -0,0 +1 @@
81aecdc5655a43de5de16d27e7c9f79dd22e338a

View File

@ -1 +0,0 @@
551b7fa327645d3fd59ae1321320153b2f858766

View File

@ -0,0 +1 @@
268880a91f20eed94c5e48e53d50b7b9a32680d0

View File

@ -1 +0,0 @@
45e63df708be458e95d9da3e6054189c50c30dff

View File

@ -0,0 +1 @@
72169e3d35627df0c6f713288a7a32d02fb13117

View File

@ -1 +0,0 @@
d5cd0e619b473e132f03e3577d1b422f050f99c0

View File

@ -0,0 +1 @@
3b33ee4801404570028e8431ccb327e37a20ddfa

View File

@ -137,10 +137,10 @@ public class CustomUnifiedHighlighter extends UnifiedHighlighter {
Set<HighlightFlag> highlightFlags = getFlags(field);
PhraseHelper phraseHelper = getPhraseHelper(field, query, highlightFlags);
CharacterRunAutomaton[] automata = getAutomata(field, query, highlightFlags);
OffsetSource offsetSource = getOptimizedOffsetSource(field, terms, phraseHelper, automata);
UHComponents components = new UHComponents(field, fieldMatcher, query, terms, phraseHelper, automata, false , highlightFlags);
OffsetSource offsetSource = getOptimizedOffsetSource(components);
BreakIterator breakIterator = new SplittingBreakIterator(getBreakIterator(field),
UnifiedHighlighter.MULTIVAL_SEP_CHAR);
UHComponents components = new UHComponents(field, fieldMatcher, query, terms, phraseHelper, automata, highlightFlags);
FieldOffsetStrategy strategy = getOffsetStrategy(offsetSource, components);
return new CustomFieldHighlighter(field, strategy, breakIteratorLocale, breakIterator,
getScorer(field), maxPassages, (noMatchSize > 0 ? 1 : 0), getFormatter(field), noMatchSize, fieldValue);

View File

@ -104,7 +104,7 @@ public class Version implements Comparable<Version>, ToXContentFragment {
public static final Version V_7_2_0 = new Version(7020099, org.apache.lucene.util.Version.LUCENE_8_0_0);
public static final Version V_7_2_1 = new Version(7020199, org.apache.lucene.util.Version.LUCENE_8_0_0);
public static final Version V_7_3_0 = new Version(7030099, org.apache.lucene.util.Version.LUCENE_8_1_0);
public static final Version V_7_4_0 = new Version(7040099, org.apache.lucene.util.Version.LUCENE_8_1_0);
public static final Version V_7_4_0 = new Version(7040099, org.apache.lucene.util.Version.LUCENE_8_2_0);
public static final Version CURRENT = V_7_4_0;
private static final ImmutableOpenIntMap<Version> idToVersion;

View File

@ -19,7 +19,7 @@
package org.elasticsearch.common.geo;
import org.apache.lucene.document.LatLonShape.QueryRelation;
import org.apache.lucene.document.ShapeField.QueryRelation;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;

View File

@ -34,7 +34,7 @@ import org.apache.lucene.search.MultiTermQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermInSetQuery;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.intervals.IntervalsSource;
import org.apache.lucene.queries.intervals.IntervalsSource;
import org.apache.lucene.search.spans.SpanMultiTermQueryWrapper;
import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.util.BytesRef;

View File

@ -44,8 +44,8 @@ import org.apache.lucene.search.PrefixQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.SynonymQuery;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.intervals.Intervals;
import org.apache.lucene.search.intervals.IntervalsSource;
import org.apache.lucene.queries.intervals.Intervals;
import org.apache.lucene.queries.intervals.IntervalsSource;
import org.apache.lucene.search.spans.FieldMaskingSpanQuery;
import org.apache.lucene.search.spans.SpanMultiTermQueryWrapper;
import org.apache.lucene.search.spans.SpanNearQuery;
@ -425,7 +425,7 @@ public class TextFieldMapper extends FieldMapper {
public IntervalsSource intervals(BytesRef term) {
if (term.length > maxChars) {
return Intervals.prefix(term.utf8ToString());
return Intervals.prefix(term);
}
if (term.length >= minChars) {
return Intervals.fixField(name(), Intervals.term(term));
@ -435,7 +435,7 @@ public class TextFieldMapper extends FieldMapper {
sb.append("?");
}
String wildcardTerm = sb.toString();
return Intervals.or(Intervals.fixField(name(), Intervals.wildcard(wildcardTerm)), Intervals.term(term));
return Intervals.or(Intervals.fixField(name(), Intervals.wildcard(new BytesRef(wildcardTerm))), Intervals.term(term));
}
@Override
@ -679,7 +679,7 @@ public class TextFieldMapper extends FieldMapper {
if (prefixFieldType != null) {
return prefixFieldType.intervals(normalizedTerm);
}
return Intervals.prefix(normalizedTerm.utf8ToString()); // TODO make Intervals.prefix() take a BytesRef
return Intervals.prefix(normalizedTerm);
}
IntervalBuilder builder = new IntervalBuilder(name(), analyzer == null ? searchAnalyzer() : analyzer);
return builder.analyzeText(text, maxGaps, ordered);

View File

@ -29,9 +29,9 @@ import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.MatchesIterator;
import org.apache.lucene.search.QueryVisitor;
import org.apache.lucene.search.intervals.IntervalIterator;
import org.apache.lucene.search.intervals.Intervals;
import org.apache.lucene.search.intervals.IntervalsSource;
import org.apache.lucene.queries.intervals.IntervalIterator;
import org.apache.lucene.queries.intervals.Intervals;
import org.apache.lucene.queries.intervals.IntervalsSource;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.graph.GraphTokenStreamFiniteStrings;

View File

@ -19,7 +19,7 @@
package org.elasticsearch.index.query;
import org.apache.lucene.search.intervals.IntervalIterator;
import org.apache.lucene.queries.intervals.IntervalIterator;
import org.elasticsearch.script.ScriptContext;
/**

View File

@ -21,7 +21,7 @@ package org.elasticsearch.index.query;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.intervals.IntervalQuery;
import org.apache.lucene.queries.intervals.IntervalQuery;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;

View File

@ -20,10 +20,10 @@
package org.elasticsearch.index.query;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.search.intervals.FilteredIntervalsSource;
import org.apache.lucene.search.intervals.IntervalIterator;
import org.apache.lucene.search.intervals.Intervals;
import org.apache.lucene.search.intervals.IntervalsSource;
import org.apache.lucene.queries.intervals.FilteredIntervalsSource;
import org.apache.lucene.queries.intervals.IntervalIterator;
import org.apache.lucene.queries.intervals.Intervals;
import org.apache.lucene.queries.intervals.IntervalsSource;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.Version;
import org.elasticsearch.common.ParseField;
@ -585,12 +585,12 @@ public abstract class IntervalsSourceProvider implements NamedWriteable, ToXCont
}
BytesRef normalizedTerm = analyzer.normalize(useField, pattern);
// TODO Intervals.wildcard() should take BytesRef
source = Intervals.fixField(useField, Intervals.wildcard(normalizedTerm.utf8ToString()));
source = Intervals.fixField(useField, Intervals.wildcard(normalizedTerm));
}
else {
checkPositions(fieldType);
BytesRef normalizedTerm = analyzer.normalize(fieldType.name(), pattern);
source = Intervals.wildcard(normalizedTerm.utf8ToString());
source = Intervals.wildcard(normalizedTerm);
}
return source;
}

View File

@ -23,8 +23,8 @@ import org.apache.lucene.analysis.CachingTokenFilter;
import org.apache.lucene.analysis.CannedTokenStream;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.search.intervals.Intervals;
import org.apache.lucene.search.intervals.IntervalsSource;
import org.apache.lucene.queries.intervals.Intervals;
import org.apache.lucene.queries.intervals.IntervalsSource;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;

View File

@ -22,8 +22,9 @@ package org.elasticsearch.index.query;
import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.intervals.IntervalQuery;
import org.apache.lucene.search.intervals.Intervals;
import org.apache.lucene.queries.intervals.IntervalQuery;
import org.apache.lucene.queries.intervals.Intervals;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.compress.CompressedXContent;
@ -395,7 +396,7 @@ public class IntervalQueryBuilderTests extends AbstractQueryTestCase<IntervalQue
String json = "{ \"intervals\" : { \"" + STRING_FIELD_NAME + "\": { " +
"\"prefix\" : { \"prefix\" : \"term\" } } } }";
IntervalQueryBuilder builder = (IntervalQueryBuilder) parseQuery(json);
Query expected = new IntervalQuery(STRING_FIELD_NAME, Intervals.prefix("term"));
Query expected = new IntervalQuery(STRING_FIELD_NAME, Intervals.prefix(new BytesRef("term")));
assertEquals(expected, builder.toQuery(createShardContext()));
String no_positions_json = "{ \"intervals\" : { \"" + NO_POSITIONS_FIELD + "\": { " +
@ -422,7 +423,7 @@ public class IntervalQueryBuilderTests extends AbstractQueryTestCase<IntervalQue
"\"prefix\" : { \"prefix\" : \"t\" } } } }";
builder = (IntervalQueryBuilder) parseQuery(short_prefix_json);
expected = new IntervalQuery(PREFIXED_FIELD, Intervals.or(
Intervals.fixField(PREFIXED_FIELD + "._index_prefix", Intervals.wildcard("t?")),
Intervals.fixField(PREFIXED_FIELD + "._index_prefix", Intervals.wildcard(new BytesRef("t?"))),
Intervals.term("t")));
assertEquals(expected, builder.toQuery(createShardContext()));
@ -454,7 +455,7 @@ public class IntervalQueryBuilderTests extends AbstractQueryTestCase<IntervalQue
"\"wildcard\" : { \"pattern\" : \"Te?m\" } } } }";
IntervalQueryBuilder builder = (IntervalQueryBuilder) parseQuery(json);
Query expected = new IntervalQuery(STRING_FIELD_NAME, Intervals.wildcard("te?m"));
Query expected = new IntervalQuery(STRING_FIELD_NAME, Intervals.wildcard(new BytesRef("te?m")));
assertEquals(expected, builder.toQuery(createShardContext()));
String no_positions_json = "{ \"intervals\" : { \"" + NO_POSITIONS_FIELD + "\": { " +
@ -468,14 +469,14 @@ public class IntervalQueryBuilderTests extends AbstractQueryTestCase<IntervalQue
"\"wildcard\" : { \"pattern\" : \"Te?m\", \"analyzer\" : \"keyword\" } } } }";
builder = (IntervalQueryBuilder) parseQuery(keyword_json);
expected = new IntervalQuery(STRING_FIELD_NAME, Intervals.wildcard("Te?m"));
expected = new IntervalQuery(STRING_FIELD_NAME, Intervals.wildcard(new BytesRef("Te?m")));
assertEquals(expected, builder.toQuery(createShardContext()));
String fixed_field_json = "{ \"intervals\" : { \"" + STRING_FIELD_NAME + "\": { " +
"\"wildcard\" : { \"pattern\" : \"Te?m\", \"use_field\" : \"masked_field\" } } } }";
builder = (IntervalQueryBuilder) parseQuery(fixed_field_json);
expected = new IntervalQuery(STRING_FIELD_NAME, Intervals.fixField(MASKED_FIELD, Intervals.wildcard("te?m")));
expected = new IntervalQuery(STRING_FIELD_NAME, Intervals.fixField(MASKED_FIELD, Intervals.wildcard(new BytesRef("te?m"))));
assertEquals(expected, builder.toQuery(createShardContext()));
String fixed_field_json_no_positions = "{ \"intervals\" : { \"" + STRING_FIELD_NAME + "\": { " +
@ -489,7 +490,8 @@ public class IntervalQueryBuilderTests extends AbstractQueryTestCase<IntervalQue
"\"wildcard\" : { \"pattern\" : \"Te?m\", \"use_field\" : \"masked_field\", \"analyzer\" : \"keyword\" } } } }";
builder = (IntervalQueryBuilder) parseQuery(fixed_field_analyzer_json);
expected = new IntervalQuery(STRING_FIELD_NAME, Intervals.fixField(MASKED_FIELD, Intervals.wildcard("Te?m")));
expected = new IntervalQuery(STRING_FIELD_NAME, Intervals.fixField(MASKED_FIELD,
Intervals.wildcard(new BytesRef("Te?m"))));
assertEquals(expected, builder.toQuery(createShardContext()));
}

View File

@ -37,6 +37,7 @@ import java.util.Set;
import java.util.TreeSet;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import static java.util.Collections.emptyMap;
@ -281,19 +282,25 @@ public abstract class AnalysisFactoryTestCase extends ESTestCase {
}
public void testTokenizers() {
Set<String> missing = new TreeSet<String>(org.apache.lucene.analysis.util.TokenizerFactory.availableTokenizers());
Set<String> missing = new TreeSet<String>();
missing.addAll(org.apache.lucene.analysis.util.TokenizerFactory.availableTokenizers()
.stream().map(key -> key.toLowerCase(Locale.ROOT)).collect(Collectors.toSet()));
missing.removeAll(getTokenizers().keySet());
assertTrue("new tokenizers found, please update KNOWN_TOKENIZERS: " + missing.toString(), missing.isEmpty());
}
public void testCharFilters() {
Set<String> missing = new TreeSet<String>(org.apache.lucene.analysis.util.CharFilterFactory.availableCharFilters());
Set<String> missing = new TreeSet<String>();
missing.addAll(org.apache.lucene.analysis.util.CharFilterFactory.availableCharFilters()
.stream().map(key -> key.toLowerCase(Locale.ROOT)).collect(Collectors.toSet()));
missing.removeAll(getCharFilters().keySet());
assertTrue("new charfilters found, please update KNOWN_CHARFILTERS: " + missing.toString(), missing.isEmpty());
}
public void testTokenFilters() {
Set<String> missing = new TreeSet<String>(org.apache.lucene.analysis.util.TokenFilterFactory.availableTokenFilters());
Set<String> missing = new TreeSet<String>();
missing.addAll(org.apache.lucene.analysis.util.TokenFilterFactory.availableTokenFilters()
.stream().map(key -> key.toLowerCase(Locale.ROOT)).collect(Collectors.toSet()));
missing.removeAll(getTokenFilters().keySet());
assertTrue("new tokenfilters found, please update KNOWN_TOKENFILTERS: " + missing.toString(), missing.isEmpty());
}

View File

@ -1 +0,0 @@
46d614acdeb42f4661e91347100217bc72aae11e

View File

@ -0,0 +1 @@
d5f5b26c967dce353db5b69f52a22ab1c39e3ce6