Upgrade to lucene 8.4.0-snapshot-662c455. (#50016) (#50039)

Lucene 8.4 is about to be released so we should check it doesn't cause problems
with Elasticsearch.
This commit is contained in:
Adrien Grand 2019-12-10 18:04:58 +01:00 committed by GitHub
parent 1a6e5bf220
commit 87e72156ce
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
63 changed files with 66 additions and 67 deletions

View File

@ -1,5 +1,5 @@
elasticsearch = 7.6.0 elasticsearch = 7.6.0
lucene = 8.4.0-snapshot-e648d601efb lucene = 8.4.0-snapshot-662c455
bundled_jdk_vendor = adoptopenjdk bundled_jdk_vendor = adoptopenjdk
bundled_jdk = 13.0.1+9 bundled_jdk = 13.0.1+9

View File

@ -90,13 +90,13 @@ both index and query time.
"value": 2, "value": 2,
"relation": "eq" "relation": "eq"
}, },
"max_score": 0.47000363, "max_score": 0.4700036,
"hits": [ "hits": [
{ {
"_index": "index", "_index": "index",
"_type": "_doc", "_type": "_doc",
"_id": "1", "_id": "1",
"_score": 0.47000363, "_score": 0.4700036,
"_source": { "_source": {
"foo": "BÀR" "foo": "BÀR"
} }
@ -105,7 +105,7 @@ both index and query time.
"_index": "index", "_index": "index",
"_type": "_doc", "_type": "_doc",
"_id": "2", "_id": "2",
"_score": 0.47000363, "_score": 0.4700036,
"_source": { "_source": {
"foo": "bar" "foo": "bar"
} }

View File

@ -107,12 +107,12 @@ The API returns the following response:
"_id":"0", "_id":"0",
"matched":true, "matched":true,
"explanation":{ "explanation":{
"value":1.6943597, "value":1.6943598,
"description":"weight(message:elasticsearch in 0) [PerFieldSimilarity], result of:", "description":"weight(message:elasticsearch in 0) [PerFieldSimilarity], result of:",
"details":[ "details":[
{ {
"value":1.6943597, "value":1.6943598,
"description":"score(freq=1.0), product of:", "description":"score(freq=1.0), computed as boost * idf * tf from:",
"details":[ "details":[
{ {
"value":2.2, "value":2.2,
@ -136,7 +136,7 @@ The API returns the following response:
] ]
}, },
{ {
"value":0.5555555, "value":0.5555556,
"description":"tf, computed as freq / (freq + k1 * (1 - b + b * dl / avgdl)) from:", "description":"tf, computed as freq / (freq + k1 * (1 - b + b * dl / avgdl)) from:",
"details":[ "details":[
{ {

View File

@ -118,13 +118,13 @@ The API returns the following response:
"value": 1, "value": 1,
"relation": "eq" "relation": "eq"
}, },
"max_score": 1.3862944, "max_score": 1.3862942,
"hits" : [ "hits" : [
{ {
"_index" : "twitter", "_index" : "twitter",
"_type" : "_doc", "_type" : "_doc",
"_id" : "0", "_id" : "0",
"_score": 1.3862944, "_score": 1.3862942,
"_source" : { "_source" : {
"user" : "kimchy", "user" : "kimchy",
"message": "trying out Elasticsearch", "message": "trying out Elasticsearch",

View File

@ -840,13 +840,13 @@ Response:
"value": 1, "value": 1,
"relation": "eq" "relation": "eq"
}, },
"max_score": 1.601195, "max_score": 1.6011951,
"hits": [ "hits": [
{ {
"_index": "twitter", "_index": "twitter",
"_type": "_doc", "_type": "_doc",
"_id": "1", "_id": "1",
"_score": 1.601195, "_score": 1.6011951,
"_source": { "_source": {
"user": "test", "user": "test",
"message": "some message with the number 1", "message": "some message with the number 1",
@ -898,13 +898,13 @@ Response:
"value": 1, "value": 1,
"relation": "eq" "relation": "eq"
}, },
"max_score": 1.601195, "max_score": 1.6011951,
"hits": [ "hits": [
{ {
"_index": "twitter", "_index": "twitter",
"_type": "_doc", "_type": "_doc",
"_id": "1", "_id": "1",
"_score": 1.601195, "_score": 1.6011951,
"_source": { "_source": {
"user": "test", "user": "test",
"message": "some message with the number 1", "message": "some message with the number 1",

View File

@ -384,13 +384,13 @@ Which would look like:
"value": 1, "value": 1,
"relation": "eq" "relation": "eq"
}, },
"max_score": 0.6931472, "max_score": 0.6931471,
"hits": [ "hits": [
{ {
"_index": "test", "_index": "test",
"_type": "_doc", "_type": "_doc",
"_id": "1", "_id": "1",
"_score": 0.6931472, "_score": 0.6931471,
"_source": ..., "_source": ...,
"inner_hits": { "inner_hits": {
"comments.votes": { <1> "comments.votes": { <1>
@ -399,7 +399,7 @@ Which would look like:
"value": 1, "value": 1,
"relation": "eq" "relation": "eq"
}, },
"max_score": 0.6931472, "max_score": 0.6931471,
"hits": [ "hits": [
{ {
"_index": "test", "_index": "test",
@ -413,7 +413,7 @@ Which would look like:
"offset": 0 "offset": 0
} }
}, },
"_score": 0.6931472, "_score": 0.6931471,
"_source": { "_source": {
"value": 1, "value": 1,
"voter": "kimchy" "voter": "kimchy"

View File

@ -362,13 +362,13 @@ The API returns the following response:
"value" : 1, "value" : 1,
"relation" : "eq" "relation" : "eq"
}, },
"max_score" : 1.3862944, "max_score" : 1.3862942,
"hits" : [ "hits" : [
{ {
"_index" : "twitter", "_index" : "twitter",
"_type" : "_doc", "_type" : "_doc",
"_id" : "0", "_id" : "0",
"_score" : 1.3862944, "_score" : 1.3862942,
"_source" : { "_source" : {
"date" : "2009-11-15T14:12:12", "date" : "2009-11-15T14:12:12",
"likes" : 0, "likes" : 0,

View File

@ -134,13 +134,13 @@ The API returns the following response:
"value": 1, "value": 1,
"relation": "eq" "relation": "eq"
}, },
"max_score": 1.3862944, "max_score": 1.3862942,
"hits" : [ "hits" : [
{ {
"_index" : "twitter", "_index" : "twitter",
"_type" : "_doc", "_type" : "_doc",
"_id" : "0", "_id" : "0",
"_score": 1.3862944, "_score": 1.3862942,
"_source" : { "_source" : {
"user" : "kimchy", "user" : "kimchy",
"date" : "2009-11-15T14:12:12", "date" : "2009-11-15T14:12:12",

View File

@ -0,0 +1 @@
4041db9db7c394584571b45812734732912ef8e2

View File

@ -1 +0,0 @@
43b9178f582373f4fcee61837404c0cc8636043e

View File

@ -0,0 +1 @@
d5bddd6b7660439e29bbce26ded283931c756d75

View File

@ -1 +0,0 @@
8ee342fa6e6306e56b583251639a661250fada46

View File

@ -0,0 +1 @@
4303858c346c51bbbc68c32eb25f7f372b09331c

View File

@ -1 +0,0 @@
7e31f2a38d1434eb50781efc65b0e028f08d7821

View File

@ -0,0 +1 @@
b1a9182ed1b92a121c1587fe9710aa7a41f3f77a

View File

@ -1 +0,0 @@
9079d81a8ea2c7190ef09ca06a987d1cab2fdf17

View File

@ -0,0 +1 @@
4df747b25286baecf5e790bf76bc40038c059691

View File

@ -1 +0,0 @@
f253f59d4e8bb6e55eb307b011ddb81ba0ebab92

View File

@ -0,0 +1 @@
88d3f8f9134b95884f3b80280b09aa2513b71297

View File

@ -1 +0,0 @@
36547378493e6e84f63dc744df8d414cb2add1a4

View File

@ -0,0 +1 @@
9ddccf575ee03a1329c8d1eb2e4ee7a6e3f3f56f

View File

@ -1 +0,0 @@
8b15a376efa7d4289b697144f34a819a9f8772f1

View File

@ -0,0 +1 @@
e115e562a42c12a3292fb138607855c1fdfb0772

View File

@ -1 +0,0 @@
d1bc4170e6981ca9af71d7a4ce46a3feb2f7b613

View File

@ -0,0 +1 @@
061fb94ab616492721f8868dcaec3fbc989733be

View File

@ -1 +0,0 @@
1cb225781b19e758d216987e363b77fa4b041174

View File

@ -0,0 +1 @@
503f3d516889a99e1c0e2dbdba7bf9cc9900c54c

View File

@ -1 +0,0 @@
cbbf849e24ef0cc61312579acf6d6c5b72c99cf5

View File

@ -0,0 +1 @@
8ca36adea0a904ec725d57f509a62652a53ecff8

View File

@ -1 +0,0 @@
aa74590851b6fcf536976f75448be52f6ca18a4a

View File

@ -0,0 +1 @@
f176fdcf8fc574f4cb1c549aaa4da0301afd34ba

View File

@ -1 +0,0 @@
1bd113010c183168d79fbc10a6b590fdacc3fa35

View File

@ -0,0 +1 @@
db5ea7b647309e5d29fa92bcbb6b11286d11436d

View File

@ -1 +0,0 @@
4e44a435e14d12113ca9193182a302677fda155e

View File

@ -0,0 +1 @@
36329bc2ea6a5640d4128206221456656de7bbe2

View File

@ -1 +0,0 @@
eb8eacd015ef81ef2055ada357a92c9751308ef1

View File

@ -0,0 +1 @@
083f492781b3d2c1d470bd1439c875ebf74a14eb

View File

@ -1 +0,0 @@
4dc565203bb1eab0222c52215891e207e7032209

View File

@ -0,0 +1 @@
9cd5ea7bc08d93053ca993bd6fc1c9cd0a1b91fd

View File

@ -1 +0,0 @@
ef596e6d2a7ac9c7dfc6196dad75dc719c81ce85

View File

@ -0,0 +1 @@
89e39f65d1c42b5849ccf3a8e6cc9b3b277c08a6

View File

@ -1 +0,0 @@
b0c963e68dd71444f09336258c8f63425514426a

View File

@ -0,0 +1 @@
651f6a0075ee30b814c8b56020d95155424c0e67

View File

@ -1 +0,0 @@
bfab3e9b0467662a8ff969da215dc4a999b73076

View File

@ -0,0 +1 @@
935968488cc2bbcd3ced9c254f690e7c90447d9e

View File

@ -1 +0,0 @@
dadfc90e4cd032f8a4db5cc1e0bdddecea635edb

View File

@ -0,0 +1 @@
0bbdd0002d8d87e54b5caff6c77a1627bf449d38

View File

@ -1 +0,0 @@
e72dd79d30781e4d05bc8397ae61d0b51d7ad522

View File

@ -0,0 +1 @@
255b547571dcec118ff1a0560bb16e259f96b76a

View File

@ -1 +0,0 @@
e6b6dbd0526287f25d98d7fe354d5e290c875b8a

View File

@ -0,0 +1 @@
739af6d9876f6aa7f2a3d46fa3f236a5d6ee3653

View File

@ -1 +0,0 @@
6351edfc6dde2aefd8f6d8ef33ae5a6e08f88321

View File

@ -0,0 +1 @@
20fa11a541a7ca3a50caa443a9abf0276b1194ea

View File

@ -1 +0,0 @@
921dd4ab493b9d70a0b1bf7b0fe8a6790b7e8036

View File

@ -31,7 +31,6 @@ import org.apache.lucene.search.spans.SpanOrQuery;
import org.apache.lucene.search.spans.SpanQuery; import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.search.spans.SpanTermQuery; import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.automaton.CharacterRunAutomaton;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery; import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery;
@ -136,7 +135,7 @@ public class CustomUnifiedHighlighter extends UnifiedHighlighter {
BytesRef[] terms = filterExtractedTerms(fieldMatcher, allTerms); BytesRef[] terms = filterExtractedTerms(fieldMatcher, allTerms);
Set<HighlightFlag> highlightFlags = getFlags(field); Set<HighlightFlag> highlightFlags = getFlags(field);
PhraseHelper phraseHelper = getPhraseHelper(field, query, highlightFlags); PhraseHelper phraseHelper = getPhraseHelper(field, query, highlightFlags);
CharacterRunAutomaton[] automata = getAutomata(field, query, highlightFlags); LabelledCharArrayMatcher[] automata = getAutomata(field, query, highlightFlags);
UHComponents components = new UHComponents(field, fieldMatcher, query, terms, phraseHelper, automata, false , highlightFlags); UHComponents components = new UHComponents(field, fieldMatcher, query, terms, phraseHelper, automata, false , highlightFlags);
OffsetSource offsetSource = getOptimizedOffsetSource(components); OffsetSource offsetSource = getOptimizedOffsetSource(components);
BreakIterator breakIterator = new SplittingBreakIterator(getBreakIterator(field), BreakIterator breakIterator = new SplittingBreakIterator(getBreakIterator(field),

View File

@ -107,7 +107,7 @@ import java.util.Map;
public class Lucene { public class Lucene {
public static final String LATEST_DOC_VALUES_FORMAT = "Lucene70"; public static final String LATEST_DOC_VALUES_FORMAT = "Lucene70";
public static final String LATEST_POSTINGS_FORMAT = "Lucene50"; public static final String LATEST_POSTINGS_FORMAT = "Lucene50";
public static final String LATEST_CODEC = "Lucene80"; public static final String LATEST_CODEC = "Lucene84";
static { static {
Deprecated annotation = PostingsFormat.forName(LATEST_POSTINGS_FORMAT).getClass().getAnnotation(Deprecated.class); Deprecated annotation = PostingsFormat.forName(LATEST_POSTINGS_FORMAT).getClass().getAnnotation(Deprecated.class);

View File

@ -22,7 +22,7 @@ package org.elasticsearch.index.codec;
import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.Logger;
import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.Codec;
import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode; import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode;
import org.apache.lucene.codecs.lucene80.Lucene80Codec; import org.apache.lucene.codecs.lucene84.Lucene84Codec;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.collect.MapBuilder;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
@ -47,8 +47,8 @@ public class CodecService {
public CodecService(@Nullable MapperService mapperService, Logger logger) { public CodecService(@Nullable MapperService mapperService, Logger logger) {
final MapBuilder<String, Codec> codecs = MapBuilder.<String, Codec>newMapBuilder(); final MapBuilder<String, Codec> codecs = MapBuilder.<String, Codec>newMapBuilder();
if (mapperService == null) { if (mapperService == null) {
codecs.put(DEFAULT_CODEC, new Lucene80Codec()); codecs.put(DEFAULT_CODEC, new Lucene84Codec());
codecs.put(BEST_COMPRESSION_CODEC, new Lucene80Codec(Mode.BEST_COMPRESSION)); codecs.put(BEST_COMPRESSION_CODEC, new Lucene84Codec(Mode.BEST_COMPRESSION));
} else { } else {
codecs.put(DEFAULT_CODEC, codecs.put(DEFAULT_CODEC,
new PerFieldMappingPostingFormatCodec(Mode.BEST_SPEED, mapperService, logger)); new PerFieldMappingPostingFormatCodec(Mode.BEST_SPEED, mapperService, logger));

View File

@ -23,7 +23,7 @@ import org.apache.logging.log4j.Logger;
import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.Codec;
import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.codecs.PostingsFormat;
import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat; import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat;
import org.apache.lucene.codecs.lucene80.Lucene80Codec; import org.apache.lucene.codecs.lucene84.Lucene84Codec;
import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.index.mapper.CompletionFieldMapper; import org.elasticsearch.index.mapper.CompletionFieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType;
@ -37,7 +37,7 @@ import org.elasticsearch.index.mapper.MapperService;
* per index in real time via the mapping API. If no specific postings format is * per index in real time via the mapping API. If no specific postings format is
* configured for a specific field the default postings format is used. * configured for a specific field the default postings format is used.
*/ */
public class PerFieldMappingPostingFormatCodec extends Lucene80Codec { public class PerFieldMappingPostingFormatCodec extends Lucene84Codec {
private final Logger logger; private final Logger logger;
private final MapperService mapperService; private final MapperService mapperService;

View File

@ -24,7 +24,7 @@ import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.suggest.document.Completion50PostingsFormat; import org.apache.lucene.search.suggest.document.Completion84PostingsFormat;
import org.apache.lucene.search.suggest.document.CompletionAnalyzer; import org.apache.lucene.search.suggest.document.CompletionAnalyzer;
import org.apache.lucene.search.suggest.document.CompletionQuery; import org.apache.lucene.search.suggest.document.CompletionQuery;
import org.apache.lucene.search.suggest.document.FuzzyCompletionQuery; import org.apache.lucene.search.suggest.document.FuzzyCompletionQuery;
@ -265,7 +265,7 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp
*/ */
public static synchronized PostingsFormat postingsFormat() { public static synchronized PostingsFormat postingsFormat() {
if (postingsFormat == null) { if (postingsFormat == null) {
postingsFormat = new Completion50PostingsFormat(); postingsFormat = new Completion84PostingsFormat();
} }
return postingsFormat; return postingsFormat;
} }

View File

@ -80,7 +80,7 @@ public class IndicesQueryCache implements QueryCache, Closeable {
logger.debug("using [node] query cache with size [{}] max filter count [{}]", logger.debug("using [node] query cache with size [{}] max filter count [{}]",
size, count); size, count);
if (INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING.get(settings)) { if (INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING.get(settings)) {
cache = new ElasticsearchLRUQueryCache(count, size.getBytes(), context -> true); cache = new ElasticsearchLRUQueryCache(count, size.getBytes(), context -> true, 1f);
} else { } else {
cache = new ElasticsearchLRUQueryCache(count, size.getBytes()); cache = new ElasticsearchLRUQueryCache(count, size.getBytes());
} }
@ -250,8 +250,8 @@ public class IndicesQueryCache implements QueryCache, Closeable {
private class ElasticsearchLRUQueryCache extends LRUQueryCache { private class ElasticsearchLRUQueryCache extends LRUQueryCache {
ElasticsearchLRUQueryCache(int maxSize, long maxRamBytesUsed, Predicate<LeafReaderContext> leavesToCache) { ElasticsearchLRUQueryCache(int maxSize, long maxRamBytesUsed, Predicate<LeafReaderContext> leavesToCache, float skipFactor) {
super(maxSize, maxRamBytesUsed, leavesToCache); super(maxSize, maxRamBytesUsed, leavesToCache, skipFactor);
} }
ElasticsearchLRUQueryCache(int maxSize, long maxRamBytesUsed) { ElasticsearchLRUQueryCache(int maxSize, long maxRamBytesUsed) {

View File

@ -19,11 +19,16 @@
package org.elasticsearch.index.codec; package org.elasticsearch.index.codec;
import static org.hamcrest.Matchers.instanceOf;
import java.io.IOException;
import java.util.Collections;
import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.LogManager;
import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.Codec;
import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat; import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat;
import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode; import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode;
import org.apache.lucene.codecs.lucene80.Lucene80Codec; import org.apache.lucene.codecs.lucene84.Lucene84Codec;
import org.apache.lucene.document.Document; import org.apache.lucene.document.Document;
import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriter;
@ -42,19 +47,14 @@ import org.elasticsearch.plugins.MapperPlugin;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.test.IndexSettingsModule;
import java.io.IOException;
import java.util.Collections;
import static org.hamcrest.Matchers.instanceOf;
@SuppressCodecs("*") // we test against default codec so never get a random one here! @SuppressCodecs("*") // we test against default codec so never get a random one here!
public class CodecTests extends ESTestCase { public class CodecTests extends ESTestCase {
public void testResolveDefaultCodecs() throws Exception { public void testResolveDefaultCodecs() throws Exception {
CodecService codecService = createCodecService(); CodecService codecService = createCodecService();
assertThat(codecService.codec("default"), instanceOf(PerFieldMappingPostingFormatCodec.class)); assertThat(codecService.codec("default"), instanceOf(PerFieldMappingPostingFormatCodec.class));
assertThat(codecService.codec("default"), instanceOf(Lucene80Codec.class)); assertThat(codecService.codec("default"), instanceOf(Lucene84Codec.class));
assertThat(codecService.codec("Lucene80"), instanceOf(Lucene80Codec.class)); assertThat(codecService.codec("Lucene84"), instanceOf(Lucene84Codec.class));
} }
public void testDefault() throws Exception { public void testDefault() throws Exception {

View File

@ -0,0 +1 @@
8ca36adea0a904ec725d57f509a62652a53ecff8

View File

@ -1 +0,0 @@
aa74590851b6fcf536976f75448be52f6ca18a4a