Upgrade to a Lucene 8 snapshot (#33310)

The main benefit of the upgrade for users is the search optimization for top scored documents when the total hit count is not needed. However this optimization is not activated in this change, there is another issue opened to discuss how it should be integrated smoothly.
Some comments about the change:
* Tests that can produce negative scores have been adapted but we need to forbid them completely: #33309

Closes #32899
This commit is contained in:
Jim Ferenczi 2018-09-06 14:42:06 +02:00 committed by GitHub
parent 9b6bbc0182
commit 7ad71f906a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
293 changed files with 2147 additions and 1400 deletions

View File

@ -539,9 +539,9 @@ class BuildPlugin implements Plugin<Project> {
from generatePOMTask.destination
into "${project.buildDir}/distributions"
rename {
generatePOMTask.ext.pomFileName == null ?
"${project.archivesBaseName}-${project.version}.pom" :
generatePOMTask.ext.pomFileName
generatePOMTask.ext.pomFileName == null ?
"${project.archivesBaseName}-${project.version}.pom" :
generatePOMTask.ext.pomFileName
}
}
}

View File

@ -1,5 +1,5 @@
elasticsearch = 7.0.0-alpha1
lucene = 7.5.0-snapshot-13b9e28f9d
lucene = 8.0.0-snapshot-4d78db26be
# optional dependencies
spatial4j = 0.7

View File

@ -1034,7 +1034,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
assertTrue(explainResponse.isExists());
assertTrue(explainResponse.isMatch());
assertTrue(explainResponse.hasExplanation());
assertThat(explainResponse.getExplanation().getValue(), greaterThan(0.0f));
assertThat(explainResponse.getExplanation().getValue().floatValue(), greaterThan(0.0f));
assertNull(explainResponse.getGetResult());
}
{

View File

@ -21,7 +21,7 @@ package org.elasticsearch.plugins;
import joptsimple.OptionSet;
import joptsimple.OptionSpec;
import org.apache.lucene.search.spell.LevensteinDistance;
import org.apache.lucene.search.spell.LevenshteinDistance;
import org.apache.lucene.util.CollectionUtil;
import org.bouncycastle.bcpg.ArmoredInputStream;
import org.bouncycastle.jce.provider.BouncyCastleProvider;
@ -355,7 +355,7 @@ class InstallPluginCommand extends EnvironmentAwareCommand {
/** Returns all the official plugin names that look similar to pluginId. **/
private List<String> checkMisspelledPlugin(String pluginId) {
LevensteinDistance ld = new LevensteinDistance();
LevenshteinDistance ld = new LevenshteinDistance();
List<Tuple<Float, String>> scoredKeys = new ArrayList<>();
for (String officialPlugin : OFFICIAL_PLUGINS) {
float distance = ld.getDistance(pluginId, officialPlugin);

View File

@ -1,7 +1,7 @@
:version: 7.0.0-alpha1
:major-version: 7.x
:lucene_version: 7.5.0
:lucene_version_path: 7_5_0
:lucene_version: 8.0.0
:lucene_version_path: 8_0_0
:branch: master
:jdk: 1.8.0_131
:jdk_major: 8

View File

@ -38,7 +38,6 @@ PUT phonetic_sample
"my_analyzer": {
"tokenizer": "standard",
"filter": [
"standard",
"lowercase",
"my_metaphone"
]

View File

@ -320,7 +320,7 @@ Top hits response snippet with a nested hit, which resides in the first slot of
"by_nested": {
"hits": {
"total": 1,
"max_score": 0.2876821,
"max_score": 0.3616575,
"hits": [
{
"_index": "sales",
@ -330,7 +330,7 @@ Top hits response snippet with a nested hit, which resides in the first slot of
"field": "comments", <1>
"offset": 0 <2>
},
"_score": 0.2876821,
"_score": 0.3616575,
"_source": {
"comment": "This car could have better brakes", <3>
"username": "baddriver007"

View File

@ -273,7 +273,6 @@ Tokenizer::
* <<analysis-standard-tokenizer,Standard Tokenizer>>
Token Filters::
* <<analysis-standard-tokenfilter,Standard Token Filter>>
* <<analysis-lowercase-tokenfilter,Lower Case Token Filter>>
* <<analysis-stop-tokenfilter,Stop Token Filter>> (disabled by default)
@ -292,7 +291,6 @@ PUT /standard_example
"rebuilt_standard": {
"tokenizer": "standard",
"filter": [
"standard",
"lowercase" <1>
]
}

View File

@ -9,8 +9,6 @@ or add tokens (eg synonyms).
Elasticsearch has a number of built in token filters which can be
used to build <<analysis-custom-analyzer,custom analyzers>>.
include::tokenfilters/standard-tokenfilter.asciidoc[]
include::tokenfilters/asciifolding-tokenfilter.asciidoc[]
include::tokenfilters/flatten-graph-tokenfilter.asciidoc[]

View File

@ -15,7 +15,7 @@ PUT /asciifold_example
"analyzer" : {
"default" : {
"tokenizer" : "standard",
"filter" : ["standard", "asciifolding"]
"filter" : ["asciifolding"]
}
}
}
@ -37,7 +37,7 @@ PUT /asciifold_example
"analyzer" : {
"default" : {
"tokenizer" : "standard",
"filter" : ["standard", "my_ascii_folding"]
"filter" : ["my_ascii_folding"]
}
},
"filter" : {

View File

@ -16,7 +16,7 @@ PUT /elision_example
"analyzer" : {
"default" : {
"tokenizer" : "standard",
"filter" : ["standard", "elision"]
"filter" : ["elision"]
}
},
"filter" : {

View File

@ -26,7 +26,7 @@ PUT /keep_types_example
"analyzer" : {
"my_analyzer" : {
"tokenizer" : "standard",
"filter" : ["standard", "lowercase", "extract_numbers"]
"filter" : ["lowercase", "extract_numbers"]
}
},
"filter" : {
@ -87,7 +87,7 @@ PUT /keep_types_exclude_example
"analyzer" : {
"my_analyzer" : {
"tokenizer" : "standard",
"filter" : ["standard", "lowercase", "remove_numbers"]
"filter" : ["lowercase", "remove_numbers"]
}
},
"filter" : {

View File

@ -27,11 +27,11 @@ PUT /keep_words_example
"analyzer" : {
"example_1" : {
"tokenizer" : "standard",
"filter" : ["standard", "lowercase", "words_till_three"]
"filter" : ["lowercase", "words_till_three"]
},
"example_2" : {
"tokenizer" : "standard",
"filter" : ["standard", "lowercase", "words_in_file"]
"filter" : ["lowercase", "words_in_file"]
}
},
"filter" : {

View File

@ -19,7 +19,7 @@ PUT /my_index
"analyzer" : {
"my_analyzer" : {
"tokenizer" : "standard",
"filter" : ["standard", "lowercase", "my_snow"]
"filter" : ["lowercase", "my_snow"]
}
},
"filter" : {

View File

@ -1,15 +0,0 @@
[[analysis-standard-tokenfilter]]
=== Standard Token Filter
A token filter of type `standard` that normalizes tokens extracted with
the
<<analysis-standard-tokenizer,Standard
Tokenizer>>.
[TIP]
==================================================
The `standard` token filter currently does nothing. It remains as a placeholder
in case some filtering function needs to be added in a future version.
==================================================

View File

@ -13,7 +13,7 @@ PUT /my_index
"analyzer" : {
"my_analyzer" : {
"tokenizer" : "standard",
"filter" : ["standard", "lowercase", "my_stemmer"]
"filter" : ["lowercase", "my_stemmer"]
}
},
"filter" : {

View File

@ -143,13 +143,13 @@ GET index/_search
},
"hits": {
"total": 1,
"max_score": 0.80259144,
"max_score": 0.8025915,
"hits": [
{
"_index": "index",
"_type": "_doc",
"_id": "1",
"_score": 0.80259144,
"_score": 0.8025915,
"_source": {
"body": "Ski resort"
}
@ -200,13 +200,13 @@ GET index/_search
},
"hits": {
"total": 1,
"max_score": 0.80259144,
"max_score": 0.8025915,
"hits": [
{
"_index": "index",
"_type": "_doc",
"_id": "1",
"_score": 0.80259144,
"_score": 0.8025915,
"_source": {
"body": "Ski resort"
}

View File

@ -295,27 +295,27 @@ Which yields:
"details": []
},
{
"value": 2.0,
"value": 2,
"description": "field.docCount",
"details": []
},
{
"value": 4.0,
"value": 4,
"description": "field.sumDocFreq",
"details": []
},
{
"value": 5.0,
"value": 5,
"description": "field.sumTotalTermFreq",
"details": []
},
{
"value": 1.0,
"value": 1,
"description": "term.docFreq",
"details": []
},
{
"value": 2.0,
"value": 2,
"description": "term.totalTermFreq",
"details": []
},
@ -325,7 +325,7 @@ Which yields:
"details": []
},
{
"value": 3.0,
"value": 3,
"description": "doc.length",
"details": []
}
@ -469,27 +469,27 @@ GET /index/_search?explain=true
"details": []
},
{
"value": 2.0,
"value": 2,
"description": "field.docCount",
"details": []
},
{
"value": 4.0,
"value": 4,
"description": "field.sumDocFreq",
"details": []
},
{
"value": 5.0,
"value": 5,
"description": "field.sumTotalTermFreq",
"details": []
},
{
"value": 1.0,
"value": 1,
"description": "term.docFreq",
"details": []
},
{
"value": 2.0,
"value": 2,
"description": "term.totalTermFreq",
"details": []
},
@ -499,7 +499,7 @@ GET /index/_search?explain=true
"details": []
},
{
"value": 3.0,
"value": 3,
"description": "doc.length",
"details": []
}

View File

@ -446,7 +446,6 @@ PUT my_queries1
"type": "custom",
"tokenizer": "standard",
"filter": [
"standard",
"lowercase",
"wildcard_edge_ngram"
]
@ -597,7 +596,6 @@ PUT my_queries2
"type": "custom",
"tokenizer": "standard",
"filter": [
"standard",
"lowercase",
"reverse",
"wildcard_edge_ngram"
@ -607,7 +605,6 @@ PUT my_queries2
"type": "custom",
"tokenizer": "standard",
"filter": [
"standard",
"lowercase",
"reverse"
]

View File

@ -22,3 +22,7 @@ The `delimited_payload_filter` was deprecated and renamed to `delimited_payload`
Using it in indices created before 7.0 will issue deprecation warnings. Using the old
name in new indices created in 7.0 will throw an error. Use the new name `delimited_payload`
instead.
==== `standard` filter has been removed
The `standard` token filter has been removed because it doesn't change anything in the stream.

View File

@ -555,3 +555,8 @@ See <<commands>>.
See <<api-definitions>>.
[role="exclude",id="analysis-standard-tokenfilter"]
=== Standard filter removed
The standard token filter has been removed.

View File

@ -30,62 +30,67 @@ This will yield the following result:
[source,js]
--------------------------------------------------
{
"_index": "twitter",
"_type": "_doc",
"_id": "0",
"matched": true,
"explanation": {
"value": 1.6943599,
"description": "weight(message:elasticsearch in 0) [PerFieldSimilarity], result of:",
"details": [
"_index":"twitter",
"_type":"_doc",
"_id":"0",
"matched":true,
"explanation":{
"value":1.6943597,
"description":"weight(message:elasticsearch in 0) [PerFieldSimilarity], result of:",
"details":[
{
"value": 1.6943599,
"description": "score(doc=0,freq=1.0 = termFreq=1.0\n), product of:",
"details": [
"value":1.6943597,
"description":"score(freq=1.0), product of:",
"details":[
{
"value": 1.3862944,
"description": "idf, computed as log(1 + (docCount - docFreq + 0.5) / (docFreq + 0.5)) from:",
"details": [
{
"value": 1.0,
"description": "docFreq",
"details": []
},
{
"value": 5.0,
"description": "docCount",
"details": []
}
]
"value":2.2,
"description":"scaling factor, k1 + 1",
"details":[]
},
{
"value": 1.2222223,
"description": "tfNorm, computed as (freq * (k1 + 1)) / (freq + k1 * (1 - b + b * fieldLength / avgFieldLength)) from:",
"details": [
{
"value":1.3862944,
"description":"idf, computed as log(1 + (N - n + 0.5) / (n + 0.5)) from:",
"details":[
{
"value": 1.0,
"description": "termFreq=1.0",
"details": []
"value":1,
"description":"n, number of documents containing term",
"details":[]
},
{
"value": 1.2,
"description": "parameter k1",
"details": []
"value":5,
"description":"N, total number of documents with field",
"details":[]
}
]
},
{
"value":0.5555555,
"description":"tf, computed as freq / (freq + k1 * (1 - b + b * dl / avgdl)) from:",
"details":[
{
"value":1.0,
"description":"freq, occurrences of term within document",
"details":[]
},
{
"value": 0.75,
"description": "parameter b",
"details": []
"value":1.2,
"description":"k1, term saturation parameter",
"details":[]
},
{
"value": 5.4,
"description": "avgFieldLength",
"details": []
"value":0.75,
"description":"b, length normalization parameter",
"details":[]
},
{
"value": 3.0,
"description": "fieldLength",
"details": []
"value":3.0,
"description":"dl, length of field",
"details":[]
},
{
"value":5.4,
"description":"avgdl, average length of field",
"details":[]
}
]
}

View File

@ -72,7 +72,11 @@ This will yield the following result:
"next_doc": 53876,
"next_doc_count": 5,
"advance": 0,
"advance_count": 0
"advance_count": 0,
"compute_max_score": 0,
"compute_max_score_count": 0,
"shallow_advance": 0,
"shallow_advance_count": 0
},
"children": [
{
@ -91,7 +95,11 @@ This will yield the following result:
"next_doc": 10111,
"next_doc_count": 5,
"advance": 0,
"advance_count": 0
"advance_count": 0,
"compute_max_score": 0,
"compute_max_score_count": 0,
"shallow_advance": 0,
"shallow_advance_count": 0
}
},
{
@ -110,7 +118,11 @@ This will yield the following result:
"next_doc": 2852,
"next_doc_count": 5,
"advance": 0,
"advance_count": 0
"advance_count": 0,
"compute_max_score": 0,
"compute_max_score_count": 0,
"shallow_advance": 0,
"shallow_advance_count": 0
}
}
]
@ -288,7 +300,11 @@ The `breakdown` component lists detailed timing statistics about low-level Lucen
"next_doc": 53876,
"next_doc_count": 5,
"advance": 0,
"advance_count": 0
"advance_count": 0,
"compute_max_score": 0,
"compute_max_score_count": 0,
"shallow_advance": 0,
"shallow_advance_count": 0
}
--------------------------------------------------
// TESTRESPONSE[s/^/{\n"took": $body.took,\n"timed_out": $body.timed_out,\n"_shards": $body._shards,\n"hits": $body.hits,\n"profile": {\n"shards": [ {\n"id": "$body.$_path",\n"searches": [{\n"query": [{\n"type": "BooleanQuery",\n"description": "message:some message:number",\n"time_in_nanos": $body.$_path,/]
@ -548,7 +564,11 @@ And the response:
"score_count": 1,
"build_scorer": 377872,
"advance": 0,
"advance_count": 0
"advance_count": 0,
"compute_max_score": 0,
"compute_max_score_count": 0,
"shallow_advance": 0,
"shallow_advance_count": 0
}
},
{
@ -567,7 +587,11 @@ And the response:
"score_count": 1,
"build_scorer": 112551,
"advance": 0,
"advance_count": 0
"advance_count": 0,
"compute_max_score": 0,
"compute_max_score_count": 0,
"shallow_advance": 0,
"shallow_advance_count": 0
}
}
],

View File

@ -265,19 +265,19 @@ Response not included in text but tested for completeness sake.
...,
"hits": {
"total": 1,
"max_score": 1.0444683,
"max_score": 1.0444684,
"hits": [
{
"_index": "test",
"_type": "_doc",
"_id": "1",
"_score": 1.0444683,
"_score": 1.0444684,
"_source": ...,
"inner_hits": {
"comments": { <1>
"hits": {
"total": 1,
"max_score": 1.0444683,
"max_score": 1.0444684,
"hits": [
{
"_index": "test",
@ -287,7 +287,7 @@ Response not included in text but tested for completeness sake.
"field": "comments",
"offset": 1
},
"_score": 1.0444683,
"_score": 1.0444684,
"fields": {
"comments.text.keyword": [
"words words words"

View File

@ -33,12 +33,12 @@ PUT test
"trigram": {
"type": "custom",
"tokenizer": "standard",
"filter": ["standard", "shingle"]
"filter": ["shingle"]
},
"reverse": {
"type": "custom",
"tokenizer": "standard",
"filter": ["standard", "reverse"]
"filter": ["reverse"]
}
},
"filter": {

View File

@ -19,6 +19,7 @@
package org.elasticsearch.search.aggregations.matrix.stats;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.ScoreMode;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.ObjectArray;
@ -61,8 +62,8 @@ final class MatrixStatsAggregator extends MetricsAggregator {
}
@Override
public boolean needsScores() {
return (valuesSources == null) ? false : valuesSources.needsScores();
public ScoreMode scoreMode() {
return (valuesSources != null && valuesSources.needsScores()) ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES;
}
@Override

View File

@ -19,6 +19,7 @@
package org.elasticsearch.analysis.common;
import org.apache.lucene.analysis.en.EnglishAnalyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
@ -35,7 +36,7 @@ public class ChineseAnalyzerProvider extends AbstractIndexAnalyzerProvider<Stand
ChineseAnalyzerProvider(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
// old index: best effort
analyzer = new StandardAnalyzer();
analyzer = new StandardAnalyzer(EnglishAnalyzer.ENGLISH_STOP_WORDS_SET);
analyzer.setVersion(version);
}

View File

@ -44,7 +44,6 @@ import org.apache.lucene.analysis.core.DecimalDigitFilter;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.analysis.core.LetterTokenizer;
import org.apache.lucene.analysis.core.LowerCaseTokenizer;
import org.apache.lucene.analysis.core.StopAnalyzer;
import org.apache.lucene.analysis.core.UpperCaseFilter;
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
import org.apache.lucene.analysis.cz.CzechAnalyzer;
@ -325,7 +324,7 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin, Scri
() -> new PatternAnalyzer(Regex.compile("\\W+" /*PatternAnalyzer.NON_WORD_PATTERN*/, null), true,
CharArraySet.EMPTY_SET)));
analyzers.add(new PreBuiltAnalyzerProviderFactory("snowball", CachingStrategy.LUCENE,
() -> new SnowballAnalyzer("English", StopAnalyzer.ENGLISH_STOP_WORDS_SET)));
() -> new SnowballAnalyzer("English", EnglishAnalyzer.ENGLISH_STOP_WORDS_SET)));
// Language analyzers:
analyzers.add(new PreBuiltAnalyzerProviderFactory("arabic", CachingStrategy.LUCENE, ArabicAnalyzer::new));
@ -336,7 +335,8 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin, Scri
analyzers.add(new PreBuiltAnalyzerProviderFactory("bulgarian", CachingStrategy.LUCENE, BulgarianAnalyzer::new));
analyzers.add(new PreBuiltAnalyzerProviderFactory("catalan", CachingStrategy.LUCENE, CatalanAnalyzer::new));
// chinese analyzer: only for old indices, best effort
analyzers.add(new PreBuiltAnalyzerProviderFactory("chinese", CachingStrategy.ONE, StandardAnalyzer::new));
analyzers.add(new PreBuiltAnalyzerProviderFactory("chinese", CachingStrategy.ONE,
() -> new StandardAnalyzer(EnglishAnalyzer.ENGLISH_STOP_WORDS_SET)));
analyzers.add(new PreBuiltAnalyzerProviderFactory("cjk", CachingStrategy.LUCENE, CJKAnalyzer::new));
analyzers.add(new PreBuiltAnalyzerProviderFactory("czech", CachingStrategy.LUCENE, CzechAnalyzer::new));
analyzers.add(new PreBuiltAnalyzerProviderFactory("danish", CachingStrategy.LUCENE, DanishAnalyzer::new));
@ -408,14 +408,14 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin, Scri
DelimitedPayloadTokenFilterFactory.DEFAULT_ENCODER)));
filters.add(PreConfiguredTokenFilter.singleton("dutch_stem", false, input -> new SnowballFilter(input, new DutchStemmer())));
filters.add(PreConfiguredTokenFilter.singleton("edge_ngram", false, input ->
new EdgeNGramTokenFilter(input, EdgeNGramTokenFilter.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenFilter.DEFAULT_MAX_GRAM_SIZE)));
new EdgeNGramTokenFilter(input, 1)));
filters.add(PreConfiguredTokenFilter.singletonWithVersion("edgeNGram", false, (reader, version) -> {
if (version.onOrAfter(org.elasticsearch.Version.V_6_4_0)) {
DEPRECATION_LOGGER.deprecatedAndMaybeLog("edgeNGram_deprecation",
"The [edgeNGram] token filter name is deprecated and will be removed in a future version. "
+ "Please change the filter name to [edge_ngram] instead.");
}
return new EdgeNGramTokenFilter(reader, EdgeNGramTokenFilter.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenFilter.DEFAULT_MAX_GRAM_SIZE);
return new EdgeNGramTokenFilter(reader, 1);
}));
filters.add(PreConfiguredTokenFilter.singleton("elision", true,
input -> new ElisionFilter(input, FrenchAnalyzer.DEFAULT_ARTICLES)));
@ -432,14 +432,14 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin, Scri
new LimitTokenCountFilter(input,
LimitTokenCountFilterFactory.DEFAULT_MAX_TOKEN_COUNT,
LimitTokenCountFilterFactory.DEFAULT_CONSUME_ALL_TOKENS)));
filters.add(PreConfiguredTokenFilter.singleton("ngram", false, NGramTokenFilter::new));
filters.add(PreConfiguredTokenFilter.singleton("ngram", false, reader -> new NGramTokenFilter(reader, 1, 2, false)));
filters.add(PreConfiguredTokenFilter.singletonWithVersion("nGram", false, (reader, version) -> {
if (version.onOrAfter(org.elasticsearch.Version.V_6_4_0)) {
DEPRECATION_LOGGER.deprecatedAndMaybeLog("nGram_deprecation",
"The [nGram] token filter name is deprecated and will be removed in a future version. "
+ "Please change the filter name to [ngram] instead.");
}
return new NGramTokenFilter(reader);
return new NGramTokenFilter(reader, 1, 2, false);
}));
filters.add(PreConfiguredTokenFilter.singleton("persian_normalization", true, PersianNormalizationFilter::new));
filters.add(PreConfiguredTokenFilter.singleton("porter_stem", false, PorterStemFilter::new));
@ -462,7 +462,8 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin, Scri
filters.add(PreConfiguredTokenFilter.singleton("sorani_normalization", true, SoraniNormalizationFilter::new));
filters.add(PreConfiguredTokenFilter.singleton("stemmer", false, PorterStemFilter::new));
// The stop filter is in lucene-core but the English stop words set is in lucene-analyzers-common
filters.add(PreConfiguredTokenFilter.singleton("stop", false, input -> new StopFilter(input, StopAnalyzer.ENGLISH_STOP_WORDS_SET)));
filters.add(PreConfiguredTokenFilter.singleton("stop", false,
input -> new StopFilter(input, EnglishAnalyzer.ENGLISH_STOP_WORDS_SET)));
filters.add(PreConfiguredTokenFilter.singleton("trim", true, TrimFilter::new));
filters.add(PreConfiguredTokenFilter.singleton("truncate", false, input -> new TruncateTokenFilter(input, 10)));
filters.add(PreConfiguredTokenFilter.singleton("type_as_payload", false, TypeAsPayloadTokenFilter::new));

View File

@ -21,7 +21,6 @@ package org.elasticsearch.analysis.common;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter;
import org.apache.lucene.analysis.ngram.NGramTokenFilter;
import org.apache.lucene.analysis.reverse.ReverseStringFilter;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
@ -41,8 +40,8 @@ public class EdgeNGramTokenFilterFactory extends AbstractTokenFilterFactory {
EdgeNGramTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
this.minGram = settings.getAsInt("min_gram", NGramTokenFilter.DEFAULT_MIN_NGRAM_SIZE);
this.maxGram = settings.getAsInt("max_gram", NGramTokenFilter.DEFAULT_MAX_NGRAM_SIZE);
this.minGram = settings.getAsInt("min_gram", 1);
this.maxGram = settings.getAsInt("max_gram", 2);
this.side = parseSide(settings.get("side", "front"));
}
@ -63,7 +62,8 @@ public class EdgeNGramTokenFilterFactory extends AbstractTokenFilterFactory {
result = new ReverseStringFilter(result);
}
result = new EdgeNGramTokenFilter(result, minGram, maxGram);
// TODO: Expose preserveOriginal
result = new EdgeNGramTokenFilter(result, minGram, maxGram, false);
// side=BACK is not supported anymore but applying ReverseStringFilter up-front and after the token filter has the same effect
if (side == SIDE_BACK) {

View File

@ -39,8 +39,8 @@ public class NGramTokenFilterFactory extends AbstractTokenFilterFactory {
NGramTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
int maxAllowedNgramDiff = indexSettings.getMaxNgramDiff();
this.minGram = settings.getAsInt("min_gram", NGramTokenFilter.DEFAULT_MIN_NGRAM_SIZE);
this.maxGram = settings.getAsInt("max_gram", NGramTokenFilter.DEFAULT_MAX_NGRAM_SIZE);
this.minGram = settings.getAsInt("min_gram", 1);
this.maxGram = settings.getAsInt("max_gram", 2);
int ngramDiff = maxGram - minGram;
if (ngramDiff > maxAllowedNgramDiff) {
if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_7_0_0_alpha1)) {
@ -57,6 +57,7 @@ public class NGramTokenFilterFactory extends AbstractTokenFilterFactory {
@Override
public TokenStream create(TokenStream tokenStream) {
return new NGramTokenFilter(tokenStream, minGram, maxGram);
// TODO: Expose preserveOriginal
return new NGramTokenFilter(tokenStream, minGram, maxGram, false);
}
}

View File

@ -27,11 +27,10 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.en.EnglishPossessiveFilter;
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.apache.lucene.analysis.tr.TurkishLowerCaseFilter;
/** Filters {@link StandardTokenizer} with {@link StandardFilter}, {@link
/** Filters {@link StandardTokenizer} with {@link
* LowerCaseFilter}, {@link StopFilter} and {@link SnowballFilter}.
*
* Available stemmers are listed in org.tartarus.snowball.ext. The name of a
@ -57,8 +56,7 @@ public final class SnowballAnalyzer extends Analyzer {
stopSet = CharArraySet.unmodifiableSet(CharArraySet.copy(stopWords));
}
/** Constructs a {@link StandardTokenizer} filtered by a {@link
StandardFilter}, a {@link LowerCaseFilter}, a {@link StopFilter},
/** Constructs a {@link StandardTokenizer} filtered by a {@link LowerCaseFilter}, a {@link StopFilter},
and a {@link SnowballFilter} */
@Override
public TokenStreamComponents createComponents(String fieldName) {

View File

@ -19,8 +19,8 @@
package org.elasticsearch.analysis.common;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.core.StopAnalyzer;
import org.apache.lucene.analysis.de.GermanAnalyzer;
import org.apache.lucene.analysis.en.EnglishAnalyzer;
import org.apache.lucene.analysis.fr.FrenchAnalyzer;
import org.apache.lucene.analysis.nl.DutchAnalyzer;
import org.elasticsearch.common.settings.Settings;
@ -42,7 +42,7 @@ import static java.util.Collections.unmodifiableMap;
* Configuration of language is done with the "language" attribute or the analyzer.
* Also supports additional stopwords via "stopwords" attribute
* <p>
* The SnowballAnalyzer comes with a StandardFilter, LowerCaseFilter, StopFilter
* The SnowballAnalyzer comes with a LowerCaseFilter, StopFilter
* and the SnowballFilter.
*
*
@ -52,7 +52,7 @@ public class SnowballAnalyzerProvider extends AbstractIndexAnalyzerProvider<Snow
static {
Map<String, CharArraySet> defaultLanguageStopwords = new HashMap<>();
defaultLanguageStopwords.put("English", StopAnalyzer.ENGLISH_STOP_WORDS_SET);
defaultLanguageStopwords.put("English", EnglishAnalyzer.ENGLISH_STOP_WORDS_SET);
defaultLanguageStopwords.put("Dutch", DutchAnalyzer.getDefaultStopSet());
defaultLanguageStopwords.put("German", GermanAnalyzer.getDefaultStopSet());
defaultLanguageStopwords.put("German2", GermanAnalyzer.getDefaultStopSet());

View File

@ -25,8 +25,7 @@ import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.StopwordAnalyzerBase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.StopAnalyzer;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.en.EnglishAnalyzer;
import org.apache.lucene.analysis.standard.StandardTokenizer;
public class StandardHtmlStripAnalyzer extends StopwordAnalyzerBase {
@ -36,7 +35,7 @@ public class StandardHtmlStripAnalyzer extends StopwordAnalyzerBase {
*/
@Deprecated
public StandardHtmlStripAnalyzer() {
super(StopAnalyzer.ENGLISH_STOP_WORDS_SET);
super(EnglishAnalyzer.ENGLISH_STOP_WORDS_SET);
}
StandardHtmlStripAnalyzer(CharArraySet stopwords) {
@ -46,8 +45,7 @@ public class StandardHtmlStripAnalyzer extends StopwordAnalyzerBase {
@Override
protected TokenStreamComponents createComponents(final String fieldName) {
final Tokenizer src = new StandardTokenizer();
TokenStream tok = new StandardFilter(src);
tok = new LowerCaseFilter(tok);
TokenStream tok = new LowerCaseFilter(src);
if (!stopwords.isEmpty()) {
tok = new StopFilter(tok, stopwords);
}

View File

@ -20,7 +20,7 @@ package org.elasticsearch.analysis.common;
*/
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.core.StopAnalyzer;
import org.apache.lucene.analysis.en.EnglishAnalyzer;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.test.ESTokenStreamTestCase;
@ -44,7 +44,7 @@ public class PatternAnalyzerTests extends ESTokenStreamTestCase {
// split on non-letter pattern, lowercase, english stopwords
PatternAnalyzer b = new PatternAnalyzer(Pattern.compile("\\W+"), true,
StopAnalyzer.ENGLISH_STOP_WORDS_SET);
EnglishAnalyzer.ENGLISH_STOP_WORDS_SET);
assertAnalyzesTo(b, "The quick brown Fox,the abcd1234 (56.78) dc.",
new String[] { "quick", "brown", "fox", "abcd1234", "56", "78", "dc" });
}
@ -61,7 +61,7 @@ public class PatternAnalyzerTests extends ESTokenStreamTestCase {
// Split on whitespace patterns, lowercase, english stopwords
PatternAnalyzer b = new PatternAnalyzer(Pattern.compile("\\s+"), true,
StopAnalyzer.ENGLISH_STOP_WORDS_SET);
EnglishAnalyzer.ENGLISH_STOP_WORDS_SET);
assertAnalyzesTo(b, "The quick brown Fox,the abcd1234 (56.78) dc.",
new String[] { "quick", "brown", "fox,the", "abcd1234", "(56.78)", "dc." });
}
@ -78,7 +78,7 @@ public class PatternAnalyzerTests extends ESTokenStreamTestCase {
// split on comma, lowercase, english stopwords
PatternAnalyzer b = new PatternAnalyzer(Pattern.compile(","), true,
StopAnalyzer.ENGLISH_STOP_WORDS_SET);
EnglishAnalyzer.ENGLISH_STOP_WORDS_SET);
assertAnalyzesTo(b, "Here,Are,some,Comma,separated,words,",
new String[] { "here", "some", "comma", "separated", "words" });
}
@ -109,7 +109,7 @@ public class PatternAnalyzerTests extends ESTokenStreamTestCase {
/** blast some random strings through the analyzer */
public void testRandomStrings() throws Exception {
Analyzer a = new PatternAnalyzer(Pattern.compile(","), true, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
Analyzer a = new PatternAnalyzer(Pattern.compile(","), true, EnglishAnalyzer.ENGLISH_STOP_WORDS_SET);
checkRandomData(random(), a, 10000*RANDOM_MULTIPLIER);
}

View File

@ -20,7 +20,7 @@ package org.elasticsearch.analysis.common;
*/
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.en.EnglishAnalyzer;
import org.elasticsearch.test.ESTokenStreamTestCase;
public class SnowballAnalyzerTests extends ESTokenStreamTestCase {
@ -33,7 +33,7 @@ public class SnowballAnalyzerTests extends ESTokenStreamTestCase {
public void testStopwords() throws Exception {
Analyzer a = new SnowballAnalyzer("English",
StandardAnalyzer.STOP_WORDS_SET);
EnglishAnalyzer.ENGLISH_STOP_WORDS_SET);
assertAnalyzesTo(a, "the quick brown fox jumped",
new String[]{"quick", "brown", "fox", "jump"});
}

View File

@ -1 +0,0 @@
fded6bb485b8b01bb2a9280162fd14d4d3ce4510

View File

@ -0,0 +1 @@
5f469e925dde5dff81b9d56f465a8babb56cd26b

View File

@ -26,6 +26,7 @@ import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.store.RAMDirectory;
@ -550,7 +551,7 @@ public class PainlessExecuteAction extends Action<PainlessExecuteAction.Response
Query luceneQuery = request.contextSetup.query.rewrite(context).toQuery(context);
IndexSearcher indexSearcher = new IndexSearcher(leafReaderContext.reader());
luceneQuery = indexSearcher.rewrite(luceneQuery);
Weight weight = indexSearcher.createWeight(luceneQuery, true, 1f);
Weight weight = indexSearcher.createWeight(luceneQuery, ScoreMode.COMPLETE, 1f);
Scorer scorer = weight.scorer(indexSearcher.getIndexReader().leaves().get(0));
// Consume the first (and only) match.
int docID = scorer.iterator().nextDoc();

View File

@ -49,6 +49,11 @@ public class ScoreTests extends ScriptTestCase {
public float score() throws IOException {
return 2.5f;
}
@Override
public float getMaxScore(int upTo) throws IOException {
return 2.5f;
}
},
true));
}
@ -60,6 +65,11 @@ public class ScoreTests extends ScriptTestCase {
public float score() throws IOException {
throw new AssertionError("score() should not be called");
}
@Override
public float getMaxScore(int upTo) throws IOException {
return Float.MAX_VALUE;
}
},
true));
}
@ -75,6 +85,11 @@ public class ScoreTests extends ScriptTestCase {
}
throw new AssertionError("score() should not be called twice");
}
@Override
public float getMaxScore(int upTo) throws IOException {
return 4.5f;
}
},
true));
}

View File

@ -25,6 +25,7 @@ import org.elasticsearch.painless.spi.Whitelist;
import org.elasticsearch.script.ScriptContext;
import org.elasticsearch.script.ScriptedMetricAggContexts;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
@ -74,6 +75,11 @@ public class ScriptedMetricAggContextsTests extends ScriptTestCase {
@Override
public DocIdSetIterator iterator() { return null; }
@Override
public float getMaxScore(int upTo) throws IOException {
return 0.5f;
}
};
ScriptedMetricAggContexts.MapScript.LeafFactory leafFactory = factory.newFactory(params, state, null);

View File

@ -89,7 +89,7 @@ public class SimilarityScriptTests extends ScriptTestCase {
.add(new TermQuery(new Term("match", "yes")), Occur.FILTER)
.build(), 3.2f);
TopDocs topDocs = searcher.search(query, 1);
assertEquals(1, topDocs.totalHits);
assertEquals(1, topDocs.totalHits.value);
assertEquals((float) (3.2 * 2 / 3), topDocs.scoreDocs[0].score, 0);
w.close();
dir.close();
@ -128,7 +128,7 @@ public class SimilarityScriptTests extends ScriptTestCase {
.add(new TermQuery(new Term("match", "yes")), Occur.FILTER)
.build(), 3.2f);
TopDocs topDocs = searcher.search(query, 1);
assertEquals(1, topDocs.totalHits);
assertEquals(1, topDocs.totalHits.value);
assertEquals((float) (3.2 * 2 / 3), topDocs.scoreDocs[0].score, 0);
w.close();
dir.close();

View File

@ -161,7 +161,7 @@
"script_score": {
"script": {
"lang": "painless",
"source": "-doc['num1'].value"
"source": "3 - doc['num1'].value"
}
}
}]

View File

@ -24,6 +24,7 @@ import org.apache.lucene.index.SortedSetDocValues;
import org.apache.lucene.search.ConstantScoreScorer;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.Bits;
@ -78,8 +79,8 @@ public class ParentToChildrenAggregator extends BucketsAggregator implements Sin
throws IOException {
super(name, factories, context, parent, pipelineAggregators, metaData);
// these two filters are cached in the parser
this.childFilter = context.searcher().createNormalizedWeight(childFilter, false);
this.parentFilter = context.searcher().createNormalizedWeight(parentFilter, false);
this.childFilter = context.searcher().createWeight(context.searcher().rewrite(childFilter), ScoreMode.COMPLETE_NO_SCORES, 1f);
this.parentFilter = context.searcher().createWeight(context.searcher().rewrite(parentFilter), ScoreMode.COMPLETE_NO_SCORES, 1f);
this.parentOrdToBuckets = context.bigArrays().newLongArray(maxOrd, false);
this.parentOrdToBuckets.fill(0, maxOrd, -1);
this.parentOrdToOtherBuckets = new LongObjectPagedHashMap<>(context.bigArrays());

View File

@ -23,16 +23,21 @@ import org.apache.lucene.index.ReaderUtil;
import org.apache.lucene.index.SortedDocValues;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.MultiCollector;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.TopDocsCollector;
import org.apache.lucene.search.TopFieldCollector;
import org.apache.lucene.search.TopScoreDocCollector;
import org.apache.lucene.search.TotalHitCountCollector;
import org.apache.lucene.search.TotalHits;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.search.MaxScoreCollector;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore;
import org.elasticsearch.index.mapper.IdFieldMapper;
import org.elasticsearch.index.query.InnerHitBuilder;
import org.elasticsearch.index.query.InnerHitContextBuilder;
@ -92,14 +97,14 @@ class ParentChildInnerHitContextBuilder extends InnerHitContextBuilder {
}
@Override
public TopDocs[] topDocs(SearchHit[] hits) throws IOException {
public TopDocsAndMaxScore[] topDocs(SearchHit[] hits) throws IOException {
Weight innerHitQueryWeight = createInnerHitQueryWeight();
TopDocs[] result = new TopDocs[hits.length];
TopDocsAndMaxScore[] result = new TopDocsAndMaxScore[hits.length];
for (int i = 0; i < hits.length; i++) {
SearchHit hit = hits[i];
String joinName = getSortedDocValue(joinFieldMapper.name(), context, hit.docId());
if (joinName == null) {
result[i] = Lucene.EMPTY_TOP_DOCS;
result[i] = new TopDocsAndMaxScore(Lucene.EMPTY_TOP_DOCS, Float.NaN);
continue;
}
@ -107,7 +112,7 @@ class ParentChildInnerHitContextBuilder extends InnerHitContextBuilder {
ParentIdFieldMapper parentIdFieldMapper =
joinFieldMapper.getParentIdFieldMapper(typeName, fetchChildInnerHits == false);
if (parentIdFieldMapper == null) {
result[i] = Lucene.EMPTY_TOP_DOCS;
result[i] = new TopDocsAndMaxScore(Lucene.EMPTY_TOP_DOCS, Float.NaN);
continue;
}
@ -125,29 +130,41 @@ class ParentChildInnerHitContextBuilder extends InnerHitContextBuilder {
q = context.mapperService().fullName(IdFieldMapper.NAME).termQuery(parentId, qsc);
}
Weight weight = context.searcher().createNormalizedWeight(q, false);
Weight weight = context.searcher().createWeight(context.searcher().rewrite(q), ScoreMode.COMPLETE_NO_SCORES, 1f);
if (size() == 0) {
TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector();
for (LeafReaderContext ctx : context.searcher().getIndexReader().leaves()) {
intersect(weight, innerHitQueryWeight, totalHitCountCollector, ctx);
}
result[i] = new TopDocs(totalHitCountCollector.getTotalHits(), Lucene.EMPTY_SCORE_DOCS, Float.NaN);
result[i] = new TopDocsAndMaxScore(
new TopDocs(new TotalHits(totalHitCountCollector.getTotalHits(), TotalHits.Relation.EQUAL_TO),
Lucene.EMPTY_SCORE_DOCS), Float.NaN);
} else {
int topN = Math.min(from() + size(), context.searcher().getIndexReader().maxDoc());
TopDocsCollector<?> topDocsCollector;
MaxScoreCollector maxScoreCollector = null;
if (sort() != null) {
topDocsCollector = TopFieldCollector.create(sort().sort, topN, true, trackScores(), trackScores(), true);
topDocsCollector = TopFieldCollector.create(sort().sort, topN, Integer.MAX_VALUE);
if (trackScores()) {
maxScoreCollector = new MaxScoreCollector();
}
} else {
topDocsCollector = TopScoreDocCollector.create(topN);
topDocsCollector = TopScoreDocCollector.create(topN, Integer.MAX_VALUE);
maxScoreCollector = new MaxScoreCollector();
}
try {
for (LeafReaderContext ctx : context.searcher().getIndexReader().leaves()) {
intersect(weight, innerHitQueryWeight, topDocsCollector, ctx);
intersect(weight, innerHitQueryWeight, MultiCollector.wrap(topDocsCollector, maxScoreCollector), ctx);
}
} finally {
clearReleasables(Lifetime.COLLECTION);
}
result[i] = topDocsCollector.topDocs(from(), size());
TopDocs topDocs = topDocsCollector.topDocs(from(), size());
float maxScore = Float.NaN;
if (maxScoreCollector != null) {
maxScore = maxScoreCollector.getMaxScore();
}
result[i] = new TopDocsAndMaxScore(topDocs, maxScore);
}
}
return result;

View File

@ -26,11 +26,14 @@ import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.ScorerSupplier;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.TwoPhaseIterator;
import org.apache.lucene.search.Weight;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.Bits;
import org.elasticsearch.common.CheckedFunction;
@ -53,14 +56,17 @@ final class PercolateQuery extends Query implements Accountable {
private final Query candidateMatchesQuery;
private final Query verifiedMatchesQuery;
private final IndexSearcher percolatorIndexSearcher;
private final Query nonNestedDocsFilter;
PercolateQuery(String name, QueryStore queryStore, List<BytesReference> documents,
Query candidateMatchesQuery, IndexSearcher percolatorIndexSearcher, Query verifiedMatchesQuery) {
Query candidateMatchesQuery, IndexSearcher percolatorIndexSearcher,
Query nonNestedDocsFilter, Query verifiedMatchesQuery) {
this.name = name;
this.documents = Objects.requireNonNull(documents);
this.candidateMatchesQuery = Objects.requireNonNull(candidateMatchesQuery);
this.queryStore = Objects.requireNonNull(queryStore);
this.percolatorIndexSearcher = Objects.requireNonNull(percolatorIndexSearcher);
this.nonNestedDocsFilter = nonNestedDocsFilter;
this.verifiedMatchesQuery = Objects.requireNonNull(verifiedMatchesQuery);
}
@ -68,16 +74,17 @@ final class PercolateQuery extends Query implements Accountable {
public Query rewrite(IndexReader reader) throws IOException {
Query rewritten = candidateMatchesQuery.rewrite(reader);
if (rewritten != candidateMatchesQuery) {
return new PercolateQuery(name, queryStore, documents, rewritten, percolatorIndexSearcher, verifiedMatchesQuery);
return new PercolateQuery(name, queryStore, documents, rewritten, percolatorIndexSearcher,
nonNestedDocsFilter, verifiedMatchesQuery);
} else {
return this;
}
}
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
final Weight verifiedMatchesWeight = verifiedMatchesQuery.createWeight(searcher, false, boost);
final Weight candidateMatchesWeight = candidateMatchesQuery.createWeight(searcher, false, boost);
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
final Weight verifiedMatchesWeight = verifiedMatchesQuery.createWeight(searcher, ScoreMode.COMPLETE_NO_SCORES, boost);
final Weight candidateMatchesWeight = candidateMatchesQuery.createWeight(searcher, ScoreMode.COMPLETE_NO_SCORES, boost);
return new Weight(this) {
@Override
public void extractTerms(Set<Term> set) {
@ -91,7 +98,7 @@ final class PercolateQuery extends Query implements Accountable {
int result = twoPhaseIterator.approximation().advance(docId);
if (result == docId) {
if (twoPhaseIterator.matches()) {
if (needsScores) {
if (scoreMode.needsScores()) {
CheckedFunction<Integer, Query, IOException> percolatorQueries = queryStore.getQueries(leafReaderContext);
Query query = percolatorQueries.apply(docId);
Explanation detail = percolatorIndexSearcher.explain(query, 0);
@ -112,9 +119,9 @@ final class PercolateQuery extends Query implements Accountable {
return null;
}
final CheckedFunction<Integer, Query, IOException> queries = queryStore.getQueries(leafReaderContext);
if (needsScores) {
return new BaseScorer(this, approximation, queries, percolatorIndexSearcher) {
final CheckedFunction<Integer, Query, IOException> percolatorQueries = queryStore.getQueries(leafReaderContext);
if (scoreMode.needsScores()) {
return new BaseScorer(this, approximation) {
float score;
@ -122,8 +129,14 @@ final class PercolateQuery extends Query implements Accountable {
boolean matchDocId(int docId) throws IOException {
Query query = percolatorQueries.apply(docId);
if (query != null) {
if (nonNestedDocsFilter != null) {
query = new BooleanQuery.Builder()
.add(query, Occur.MUST)
.add(nonNestedDocsFilter, Occur.FILTER)
.build();
}
TopDocs topDocs = percolatorIndexSearcher.search(query, 1);
if (topDocs.totalHits > 0) {
if (topDocs.scoreDocs.length > 0) {
score = topDocs.scoreDocs[0].score;
return true;
} else {
@ -142,7 +155,7 @@ final class PercolateQuery extends Query implements Accountable {
} else {
ScorerSupplier verifiedDocsScorer = verifiedMatchesWeight.scorerSupplier(leafReaderContext);
Bits verifiedDocsBits = Lucene.asSequentialAccessBits(leafReaderContext.reader().maxDoc(), verifiedDocsScorer);
return new BaseScorer(this, approximation, queries, percolatorIndexSearcher) {
return new BaseScorer(this, approximation) {
@Override
public float score() throws IOException {
@ -159,7 +172,16 @@ final class PercolateQuery extends Query implements Accountable {
return true;
}
Query query = percolatorQueries.apply(docId);
return query != null && Lucene.exists(percolatorIndexSearcher, query);
if (query == null) {
return false;
}
if (nonNestedDocsFilter != null) {
query = new BooleanQuery.Builder()
.add(query, Occur.MUST)
.add(nonNestedDocsFilter, Occur.FILTER)
.build();
}
return Lucene.exists(percolatorIndexSearcher, query);
}
};
}
@ -182,6 +204,10 @@ final class PercolateQuery extends Query implements Accountable {
return percolatorIndexSearcher;
}
boolean excludesNestedDocs() {
return nonNestedDocsFilter != null;
}
List<BytesReference> getDocuments() {
return documents;
}
@ -241,15 +267,10 @@ final class PercolateQuery extends Query implements Accountable {
abstract static class BaseScorer extends Scorer {
final Scorer approximation;
final CheckedFunction<Integer, Query, IOException> percolatorQueries;
final IndexSearcher percolatorIndexSearcher;
BaseScorer(Weight weight, Scorer approximation, CheckedFunction<Integer, Query, IOException> percolatorQueries,
IndexSearcher percolatorIndexSearcher) {
BaseScorer(Weight weight, Scorer approximation) {
super(weight);
this.approximation = approximation;
this.percolatorQueries = percolatorQueries;
this.percolatorIndexSearcher = percolatorIndexSearcher;
}
@Override
@ -279,6 +300,10 @@ final class PercolateQuery extends Query implements Accountable {
abstract boolean matchDocId(int docId) throws IOException;
@Override
public float getMaxScore(int upTo) throws IOException {
return Float.MAX_VALUE;
}
}
}

View File

@ -29,10 +29,9 @@ import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.ReaderUtil;
import org.apache.lucene.index.memory.MemoryIndex;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.search.join.BitSetProducer;
@ -56,7 +55,6 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.XContent;
@ -605,13 +603,19 @@ public class PercolateQueryBuilder extends AbstractQueryBuilder<PercolateQueryBu
}
};
final IndexSearcher docSearcher;
final boolean excludeNestedDocuments;
if (docs.size() > 1 || docs.get(0).docs().size() > 1) {
assert docs.size() != 1 || docMapper.hasNestedObjects();
docSearcher = createMultiDocumentSearcher(analyzer, docs);
excludeNestedDocuments = docMapper.hasNestedObjects() && docs.stream()
.map(ParsedDocument::docs)
.mapToInt(List::size)
.anyMatch(size -> size > 1);
} else {
MemoryIndex memoryIndex = MemoryIndex.fromDocument(docs.get(0).rootDoc(), analyzer, true, false);
docSearcher = memoryIndex.createSearcher();
docSearcher.setQueryCache(null);
excludeNestedDocuments = false;
}
PercolatorFieldMapper.FieldType pft = (PercolatorFieldMapper.FieldType) fieldType;
@ -621,7 +625,7 @@ public class PercolateQueryBuilder extends AbstractQueryBuilder<PercolateQueryBu
percolateShardContext,
pft.mapUnmappedFieldsAsText);
return pft.percolateQuery(name, queryStore, documents, docSearcher, context.indexVersionCreated());
return pft.percolateQuery(name, queryStore, documents, docSearcher, excludeNestedDocuments, context.indexVersionCreated());
}
public String getField() {
@ -653,17 +657,7 @@ public class PercolateQueryBuilder extends AbstractQueryBuilder<PercolateQueryBu
DirectoryReader directoryReader = DirectoryReader.open(indexWriter);
assert directoryReader.leaves().size() == 1 : "Expected single leaf, but got [" + directoryReader.leaves().size() + "]";
final IndexSearcher slowSearcher = new IndexSearcher(directoryReader) {
@Override
public Weight createNormalizedWeight(Query query, boolean needsScores) throws IOException {
BooleanQuery.Builder bq = new BooleanQuery.Builder();
bq.add(query, BooleanClause.Occur.MUST);
bq.add(Queries.newNestedFilter(), BooleanClause.Occur.MUST_NOT);
return super.createNormalizedWeight(bq.build(), needsScores);
}
};
final IndexSearcher slowSearcher = new IndexSearcher(directoryReader);
slowSearcher.setQueryCache(null);
return slowSearcher;
} catch (IOException e) {
@ -738,7 +732,7 @@ public class PercolateQueryBuilder extends AbstractQueryBuilder<PercolateQueryBu
final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context);
final IndexSearcher searcher = new IndexSearcher(topLevelContext);
searcher.setQueryCache(null);
final Weight weight = searcher.createNormalizedWeight(query, false);
final Weight weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1f);
final Scorer s = weight.scorer(context);
if (s != null) {

View File

@ -50,6 +50,7 @@ import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.hash.MurmurHash3;
import org.elasticsearch.common.io.stream.OutputStreamStreamOutput;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
@ -244,7 +245,7 @@ public class PercolatorFieldMapper extends FieldMapper {
}
Query percolateQuery(String name, PercolateQuery.QueryStore queryStore, List<BytesReference> documents,
IndexSearcher searcher, Version indexVersion) throws IOException {
IndexSearcher searcher, boolean excludeNestedDocuments, Version indexVersion) throws IOException {
IndexReader indexReader = searcher.getIndexReader();
Tuple<BooleanQuery, Boolean> t = createCandidateQuery(indexReader, indexVersion);
Query candidateQuery = t.v1();
@ -261,7 +262,11 @@ public class PercolatorFieldMapper extends FieldMapper {
} else {
verifiedMatchesQuery = new MatchNoDocsQuery("multiple or nested docs or CoveringQuery could not be used");
}
return new PercolateQuery(name, queryStore, documents, candidateQuery, searcher, verifiedMatchesQuery);
Query filter = null;
if (excludeNestedDocuments) {
filter = Queries.newNonNestedFilter(indexVersion);
}
return new PercolateQuery(name, queryStore, documents, candidateQuery, searcher, filter, verifiedMatchesQuery);
}
Tuple<BooleanQuery, Boolean> createCandidateQuery(IndexReader indexReader, Version indexVersion) throws IOException {

View File

@ -22,6 +22,7 @@ import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.ReaderUtil;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
@ -74,7 +75,8 @@ final class PercolatorMatchedSlotSubFetchPhase implements FetchSubPhase {
// See https://issues.apache.org/jira/browse/LUCENE-8055
// for now we just use version 6.0 version to find nested parent
final Version version = Version.V_6_0_0; //context.mapperService().getIndexSettings().getIndexVersionCreated();
Weight weight = percolatorIndexSearcher.createNormalizedWeight(Queries.newNonNestedFilter(version), false);
Weight weight = percolatorIndexSearcher.createWeight(percolatorIndexSearcher.rewrite(Queries.newNonNestedFilter(version)),
ScoreMode.COMPLETE_NO_SCORES, 1f);
Scorer s = weight.scorer(percolatorIndexSearcher.getIndexReader().leaves().get(0));
int memoryIndexMaxDoc = percolatorIndexSearcher.getIndexReader().maxDoc();
BitSet rootDocs = BitSet.of(s.iterator(), memoryIndexMaxDoc);
@ -96,7 +98,7 @@ final class PercolatorMatchedSlotSubFetchPhase implements FetchSubPhase {
}
TopDocs topDocs = percolatorIndexSearcher.search(query, memoryIndexMaxDoc, new Sort(SortField.FIELD_DOC));
if (topDocs.totalHits == 0) {
if (topDocs.totalHits.value == 0) {
// This hit didn't match with a percolate query,
// likely to happen when percolating multiple documents
continue;

View File

@ -61,6 +61,7 @@ import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.PrefixQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
@ -595,51 +596,52 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
Version v = Version.V_6_1_0;
MemoryIndex memoryIndex = MemoryIndex.fromDocument(Collections.singleton(new IntPoint("int_field", 3)), new WhitespaceAnalyzer());
IndexSearcher percolateSearcher = memoryIndex.createSearcher();
Query query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, v);
Query query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")),
percolateSearcher, false, v);
TopDocs topDocs = shardSearcher.search(query, 1);
assertEquals(1L, topDocs.totalHits);
assertEquals(1L, topDocs.totalHits.value);
assertEquals(1, topDocs.scoreDocs.length);
assertEquals(0, topDocs.scoreDocs[0].doc);
memoryIndex = MemoryIndex.fromDocument(Collections.singleton(new LongPoint("long_field", 7L)), new WhitespaceAnalyzer());
percolateSearcher = memoryIndex.createSearcher();
query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, v);
query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, v);
topDocs = shardSearcher.search(query, 1);
assertEquals(1L, topDocs.totalHits);
assertEquals(1L, topDocs.totalHits.value);
assertEquals(1, topDocs.scoreDocs.length);
assertEquals(1, topDocs.scoreDocs[0].doc);
memoryIndex = MemoryIndex.fromDocument(Collections.singleton(new HalfFloatPoint("half_float_field", 12)),
new WhitespaceAnalyzer());
percolateSearcher = memoryIndex.createSearcher();
query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, v);
query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, v);
topDocs = shardSearcher.search(query, 1);
assertEquals(1L, topDocs.totalHits);
assertEquals(1L, topDocs.totalHits.value);
assertEquals(1, topDocs.scoreDocs.length);
assertEquals(2, topDocs.scoreDocs[0].doc);
memoryIndex = MemoryIndex.fromDocument(Collections.singleton(new FloatPoint("float_field", 17)), new WhitespaceAnalyzer());
percolateSearcher = memoryIndex.createSearcher();
query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, v);
query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, v);
topDocs = shardSearcher.search(query, 1);
assertEquals(1, topDocs.totalHits);
assertEquals(1, topDocs.totalHits.value);
assertEquals(1, topDocs.scoreDocs.length);
assertEquals(3, topDocs.scoreDocs[0].doc);
memoryIndex = MemoryIndex.fromDocument(Collections.singleton(new DoublePoint("double_field", 21)), new WhitespaceAnalyzer());
percolateSearcher = memoryIndex.createSearcher();
query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, v);
query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, v);
topDocs = shardSearcher.search(query, 1);
assertEquals(1, topDocs.totalHits);
assertEquals(1, topDocs.totalHits.value);
assertEquals(1, topDocs.scoreDocs.length);
assertEquals(4, topDocs.scoreDocs[0].doc);
memoryIndex = MemoryIndex.fromDocument(Collections.singleton(new InetAddressPoint("ip_field",
forString("192.168.0.4"))), new WhitespaceAnalyzer());
percolateSearcher = memoryIndex.createSearcher();
query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, v);
query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, v);
topDocs = shardSearcher.search(query, 1);
assertEquals(1, topDocs.totalHits);
assertEquals(1, topDocs.totalHits.value);
assertEquals(1, topDocs.scoreDocs.length);
assertEquals(5, topDocs.scoreDocs[0].doc);
}
@ -777,16 +779,16 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
memoryIndex.addField("field", "value1", new WhitespaceAnalyzer());
IndexSearcher percolateSearcher = memoryIndex.createSearcher();
PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore,
Collections.singletonList(new BytesArray("{}")), percolateSearcher, Version.CURRENT);
TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC), true, true);
assertEquals(3L, topDocs.totalHits);
Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, Version.CURRENT);
TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC));
assertEquals(3L, topDocs.totalHits.value);
assertEquals(3, topDocs.scoreDocs.length);
assertEquals(0, topDocs.scoreDocs[0].doc);
assertEquals(1, topDocs.scoreDocs[1].doc);
assertEquals(4, topDocs.scoreDocs[2].doc);
topDocs = shardSearcher.search(new ConstantScoreQuery(query), 10);
assertEquals(3L, topDocs.totalHits);
assertEquals(3L, topDocs.totalHits.value);
assertEquals(3, topDocs.scoreDocs.length);
assertEquals(0, topDocs.scoreDocs[0].doc);
assertEquals(1, topDocs.scoreDocs[1].doc);
@ -810,9 +812,9 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
memoryIndex.addField("field", "value", new WhitespaceAnalyzer());
IndexSearcher percolateSearcher = memoryIndex.createSearcher();
PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore,
Collections.singletonList(new BytesArray("{}")), percolateSearcher, Version.CURRENT);
TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC), true, true);
assertEquals(2L, topDocs.totalHits);
Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, Version.CURRENT);
TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC));
assertEquals(2L, topDocs.totalHits.value);
assertEquals(2, topDocs.scoreDocs.length);
assertEquals(0, topDocs.scoreDocs[0].doc);
assertEquals(2, topDocs.scoreDocs[1].doc);
@ -860,17 +862,18 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
try (IndexReader ir = DirectoryReader.open(directory)){
IndexSearcher percolateSearcher = new IndexSearcher(ir);
PercolateQuery query = (PercolateQuery)
fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, v);
fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")),
percolateSearcher, false, v);
BooleanQuery candidateQuery = (BooleanQuery) query.getCandidateMatchesQuery();
assertThat(candidateQuery.clauses().get(0).getQuery(), instanceOf(CoveringQuery.class));
TopDocs topDocs = shardSearcher.search(query, 10);
assertEquals(2L, topDocs.totalHits);
assertEquals(2L, topDocs.totalHits.value);
assertEquals(2, topDocs.scoreDocs.length);
assertEquals(0, topDocs.scoreDocs[0].doc);
assertEquals(2, topDocs.scoreDocs[1].doc);
topDocs = shardSearcher.search(new ConstantScoreQuery(query), 10);
assertEquals(2L, topDocs.totalHits);
assertEquals(2L, topDocs.totalHits.value);
assertEquals(2, topDocs.scoreDocs.length);
assertEquals(0, topDocs.scoreDocs[0].doc);
assertEquals(2, topDocs.scoreDocs[1].doc);
@ -890,18 +893,19 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
try (IndexReader ir = DirectoryReader.open(directory)){
IndexSearcher percolateSearcher = new IndexSearcher(ir);
PercolateQuery query = (PercolateQuery)
fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, v);
fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")),
percolateSearcher, false, v);
BooleanQuery candidateQuery = (BooleanQuery) query.getCandidateMatchesQuery();
assertThat(candidateQuery.clauses().get(0).getQuery(), instanceOf(TermInSetQuery.class));
TopDocs topDocs = shardSearcher.search(query, 10);
assertEquals(2L, topDocs.totalHits);
assertEquals(2L, topDocs.totalHits.value);
assertEquals(2, topDocs.scoreDocs.length);
assertEquals(1, topDocs.scoreDocs[0].doc);
assertEquals(2, topDocs.scoreDocs[1].doc);
topDocs = shardSearcher.search(new ConstantScoreQuery(query), 10);
assertEquals(2L, topDocs.totalHits);
assertEquals(2L, topDocs.totalHits.value);
assertEquals(2, topDocs.scoreDocs.length);
assertEquals(1, topDocs.scoreDocs[0].doc);
assertEquals(2, topDocs.scoreDocs[1].doc);
@ -951,9 +955,9 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
MemoryIndex memoryIndex = new MemoryIndex();
memoryIndex.addField("field", "value1 value2 value3", new WhitespaceAnalyzer());
IndexSearcher percolateSearcher = memoryIndex.createSearcher();
PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, v);
TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC), true, true);
assertEquals(2L, topDocs.totalHits);
PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, false, v);
TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC));
assertEquals(2L, topDocs.totalHits.value);
assertEquals(0, topDocs.scoreDocs[0].doc);
assertEquals(1, topDocs.scoreDocs[1].doc);
}
@ -985,25 +989,25 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
MemoryIndex memoryIndex = new MemoryIndex();
memoryIndex.addField("field", "value1 value4 value5", new WhitespaceAnalyzer());
IndexSearcher percolateSearcher = memoryIndex.createSearcher();
PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, v);
TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC), true, true);
assertEquals(1L, topDocs.totalHits);
PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, false, v);
TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC));
assertEquals(1L, topDocs.totalHits.value);
assertEquals(0, topDocs.scoreDocs[0].doc);
memoryIndex = new MemoryIndex();
memoryIndex.addField("field", "value1 value2", new WhitespaceAnalyzer());
percolateSearcher = memoryIndex.createSearcher();
query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, v);
topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC), true, true);
assertEquals(1L, topDocs.totalHits);
query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, false, v);
topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC));
assertEquals(1L, topDocs.totalHits.value);
assertEquals(0, topDocs.scoreDocs[0].doc);
memoryIndex = new MemoryIndex();
memoryIndex.addField("field", "value3", new WhitespaceAnalyzer());
percolateSearcher = memoryIndex.createSearcher();
query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, v);
topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC), true, true);
assertEquals(1L, topDocs.totalHits);
query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, false, v);
topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC));
assertEquals(1L, topDocs.totalHits.value);
assertEquals(0, topDocs.scoreDocs[0].doc);
}
@ -1036,9 +1040,9 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
document.add(new IntPoint("int_field", 7));
MemoryIndex memoryIndex = MemoryIndex.fromDocument(document, new WhitespaceAnalyzer());
IndexSearcher percolateSearcher = memoryIndex.createSearcher();
PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, v);
TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC), true, true);
assertEquals(1L, topDocs.totalHits);
PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, false, v);
TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC));
assertEquals(1L, topDocs.totalHits.value);
assertEquals(0, topDocs.scoreDocs[0].doc);
}
@ -1046,7 +1050,7 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
boolean requireScore = randomBoolean();
IndexSearcher percolateSearcher = memoryIndex.createSearcher();
Query percolateQuery = fieldType.percolateQuery("_name", queryStore,
Collections.singletonList(new BytesArray("{}")), percolateSearcher, Version.CURRENT);
Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, Version.CURRENT);
Query query = requireScore ? percolateQuery : new ConstantScoreQuery(percolateQuery);
TopDocs topDocs = shardSearcher.search(query, 100);
@ -1055,7 +1059,7 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
TopDocs controlTopDocs = shardSearcher.search(controlQuery, 100);
try {
assertThat(topDocs.totalHits, equalTo(controlTopDocs.totalHits));
assertThat(topDocs.totalHits.value, equalTo(controlTopDocs.totalHits.value));
assertThat(topDocs.scoreDocs.length, equalTo(controlTopDocs.scoreDocs.length));
for (int j = 0; j < topDocs.scoreDocs.length; j++) {
assertThat(topDocs.scoreDocs[j].doc, equalTo(controlTopDocs.scoreDocs[j].doc));
@ -1130,7 +1134,7 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
IndexSearcher shardSearcher) throws IOException {
IndexSearcher percolateSearcher = memoryIndex.createSearcher();
Query percolateQuery = fieldType.percolateQuery("_name", queryStore,
Collections.singletonList(new BytesArray("{}")), percolateSearcher, Version.CURRENT);
Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, Version.CURRENT);
return shardSearcher.search(percolateQuery, 10);
}
@ -1174,7 +1178,7 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
}
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) {
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) {
final IndexSearcher percolatorIndexSearcher = memoryIndex.createSearcher();
return new Weight(this) {
@ -1210,8 +1214,8 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
try {
Query query = leaf.apply(doc);
TopDocs topDocs = percolatorIndexSearcher.search(query, 1);
if (topDocs.totalHits > 0) {
if (needsScores) {
if (topDocs.scoreDocs.length > 0) {
if (scoreMode.needsScores()) {
_score[0] = topDocs.scoreDocs[0].score;
}
return true;
@ -1239,6 +1243,11 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
public float score() throws IOException {
return _score[0];
}
@Override
public float getMaxScore(int upTo) throws IOException {
return _score[0];
}
};
}

View File

@ -19,12 +19,6 @@
package org.elasticsearch.percolator;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.Query;
import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
@ -40,8 +34,6 @@ import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.get.GetResult;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.query.Rewriteable;
@ -63,7 +55,6 @@ import java.util.Map;
import java.util.Set;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.sameInstance;
public class PercolateQueryBuilderTests extends AbstractQueryTestCase<PercolateQueryBuilder> {
@ -72,8 +63,8 @@ public class PercolateQueryBuilderTests extends AbstractQueryTestCase<PercolateQ
PercolateQueryBuilder.DOCUMENTS_FIELD.getPreferredName()
};
private static String queryField = "field";
private static String aliasField = "alias";
protected static String queryField = "field";
protected static String aliasField = "alias";
private static String docType;
private String indexedDocumentIndex;
@ -249,48 +240,6 @@ public class PercolateQueryBuilderTests extends AbstractQueryTestCase<PercolateQ
() -> parseQuery("{\"percolate\" : { \"document\": {}, \"documents\": [{}, {}], \"field\":\"" + queryField + "\"}}"));
}
public void testCreateNestedDocumentSearcher() throws Exception {
int numNestedDocs = randomIntBetween(2, 8);
List<ParseContext.Document> docs = new ArrayList<>(numNestedDocs);
for (int i = 0; i < numNestedDocs; i++) {
docs.add(new ParseContext.Document());
}
Collection<ParsedDocument> parsedDocument = Collections.singleton(
new ParsedDocument(null, null, "_id", "_type", null, docs, null, null, null));
Analyzer analyzer = new WhitespaceAnalyzer();
IndexSearcher indexSearcher = PercolateQueryBuilder.createMultiDocumentSearcher(analyzer, parsedDocument);
assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(numNestedDocs));
// ensure that any query get modified so that the nested docs are never included as hits:
Query query = new MatchAllDocsQuery();
BooleanQuery result = (BooleanQuery) indexSearcher.createNormalizedWeight(query, true).getQuery();
assertThat(result.clauses().size(), equalTo(2));
assertThat(result.clauses().get(0).getQuery(), sameInstance(query));
assertThat(result.clauses().get(0).getOccur(), equalTo(BooleanClause.Occur.MUST));
assertThat(result.clauses().get(1).getOccur(), equalTo(BooleanClause.Occur.MUST_NOT));
}
public void testCreateMultiDocumentSearcher() throws Exception {
int numDocs = randomIntBetween(2, 8);
List<ParsedDocument> docs = new ArrayList<>();
for (int i = 0; i < numDocs; i++) {
docs.add(new ParsedDocument(null, null, "_id", "_type", null,
Collections.singletonList(new ParseContext.Document()), null, null, null));
}
Analyzer analyzer = new WhitespaceAnalyzer();
IndexSearcher indexSearcher = PercolateQueryBuilder.createMultiDocumentSearcher(analyzer, docs);
assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(numDocs));
// ensure that any query get modified so that the nested docs are never included as hits:
Query query = new MatchAllDocsQuery();
BooleanQuery result = (BooleanQuery) indexSearcher.createNormalizedWeight(query, true).getQuery();
assertThat(result.clauses().size(), equalTo(2));
assertThat(result.clauses().get(0).getQuery(), sameInstance(query));
assertThat(result.clauses().get(0).getOccur(), equalTo(BooleanClause.Occur.MUST));
assertThat(result.clauses().get(1).getOccur(), equalTo(BooleanClause.Occur.MUST_NOT));
}
private static BytesReference randomSource(Set<String> usedFields) {
try {
// If we create two source that have the same field, but these fields have different kind of values (str vs. lng) then
@ -352,4 +301,5 @@ public class PercolateQueryBuilderTests extends AbstractQueryTestCase<PercolateQ
assertEquals(query.getCandidateMatchesQuery(), aliasQuery.getCandidateMatchesQuery());
assertEquals(query.getVerifiedMatchesQuery(), aliasQuery.getVerifiedMatchesQuery());
}
}

View File

@ -117,9 +117,9 @@ public class PercolateQueryTests extends ESTestCase {
IndexSearcher percolateSearcher = memoryIndex.createSearcher();
// no scoring, wrapping it in a constant score query:
Query query = new ConstantScoreQuery(new PercolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("a")),
new TermQuery(new Term("select", "a")), percolateSearcher, new MatchNoDocsQuery("")));
new TermQuery(new Term("select", "a")), percolateSearcher, null, new MatchNoDocsQuery("")));
TopDocs topDocs = shardSearcher.search(query, 10);
assertThat(topDocs.totalHits, equalTo(1L));
assertThat(topDocs.totalHits.value, equalTo(1L));
assertThat(topDocs.scoreDocs.length, equalTo(1));
assertThat(topDocs.scoreDocs[0].doc, equalTo(0));
Explanation explanation = shardSearcher.explain(query, 0);
@ -127,9 +127,9 @@ public class PercolateQueryTests extends ESTestCase {
assertThat(explanation.getValue(), equalTo(topDocs.scoreDocs[0].score));
query = new ConstantScoreQuery(new PercolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("b")),
new TermQuery(new Term("select", "b")), percolateSearcher, new MatchNoDocsQuery("")));
new TermQuery(new Term("select", "b")), percolateSearcher, null, new MatchNoDocsQuery("")));
topDocs = shardSearcher.search(query, 10);
assertThat(topDocs.totalHits, equalTo(3L));
assertThat(topDocs.totalHits.value, equalTo(3L));
assertThat(topDocs.scoreDocs.length, equalTo(3));
assertThat(topDocs.scoreDocs[0].doc, equalTo(1));
explanation = shardSearcher.explain(query, 1);
@ -147,14 +147,14 @@ public class PercolateQueryTests extends ESTestCase {
assertThat(explanation.getValue(), equalTo(topDocs.scoreDocs[2].score));
query = new ConstantScoreQuery(new PercolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("c")),
new MatchAllDocsQuery(), percolateSearcher, new MatchAllDocsQuery()));
new MatchAllDocsQuery(), percolateSearcher, null, new MatchAllDocsQuery()));
topDocs = shardSearcher.search(query, 10);
assertThat(topDocs.totalHits, equalTo(4L));
assertThat(topDocs.totalHits.value, equalTo(4L));
query = new PercolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")),
new TermQuery(new Term("select", "b")), percolateSearcher, new MatchNoDocsQuery(""));
new TermQuery(new Term("select", "b")), percolateSearcher, null, new MatchNoDocsQuery(""));
topDocs = shardSearcher.search(query, 10);
assertThat(topDocs.totalHits, equalTo(3L));
assertThat(topDocs.totalHits.value, equalTo(3L));
assertThat(topDocs.scoreDocs.length, equalTo(3));
assertThat(topDocs.scoreDocs[0].doc, equalTo(3));
explanation = shardSearcher.explain(query, 3);

View File

@ -0,0 +1,57 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.percolator;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryShardContext;
import java.io.IOException;
public class PercolateWithNestedQueryBuilderTests extends PercolateQueryBuilderTests {
@Override
protected void initializeAdditionalMappings(MapperService mapperService) throws IOException {
super.initializeAdditionalMappings(mapperService);
mapperService.merge("_doc", new CompressedXContent(Strings.toString(PutMappingRequest.buildFromSimplifiedDef(
"_doc", "some_nested_object", "type=nested"))), MapperService.MergeReason.MAPPING_UPDATE);
}
public void testDetectsNestedDocuments() throws IOException {
QueryShardContext shardContext = createShardContext();
PercolateQueryBuilder builder = new PercolateQueryBuilder(queryField,
new BytesArray("{ \"foo\": \"bar\" }"), XContentType.JSON);
QueryBuilder rewrittenBuilder = rewriteAndFetch(builder, shardContext);
PercolateQuery query = (PercolateQuery) rewrittenBuilder.toQuery(shardContext);
assertFalse(query.excludesNestedDocs());
builder = new PercolateQueryBuilder(queryField,
new BytesArray("{ \"foo\": \"bar\", \"some_nested_object\": [ { \"baz\": 42 } ] }"), XContentType.JSON);
rewrittenBuilder = rewriteAndFetch(builder, shardContext);
query = (PercolateQuery) rewrittenBuilder.toQuery(shardContext);
assertTrue(query.excludesNestedDocs());
}
}

View File

@ -46,7 +46,7 @@ public class PercolatorHighlightSubFetchPhaseTests extends ESTestCase {
public void testHitsExecutionNeeded() {
PercolateQuery percolateQuery = new PercolateQuery("_name", ctx -> null, Collections.singletonList(new BytesArray("{}")),
new MatchAllDocsQuery(), Mockito.mock(IndexSearcher.class), new MatchAllDocsQuery());
new MatchAllDocsQuery(), Mockito.mock(IndexSearcher.class), null, new MatchAllDocsQuery());
PercolatorHighlightSubFetchPhase subFetchPhase = new PercolatorHighlightSubFetchPhase(Settings.EMPTY,
emptyMap());
SearchContext searchContext = Mockito.mock(SearchContext.class);
@ -60,7 +60,7 @@ public class PercolatorHighlightSubFetchPhaseTests extends ESTestCase {
public void testLocatePercolatorQuery() {
PercolateQuery percolateQuery = new PercolateQuery("_name", ctx -> null, Collections.singletonList(new BytesArray("{}")),
new MatchAllDocsQuery(), Mockito.mock(IndexSearcher.class), new MatchAllDocsQuery());
new MatchAllDocsQuery(), Mockito.mock(IndexSearcher.class), null, new MatchAllDocsQuery());
assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(new MatchAllDocsQuery()).size(), equalTo(0));
BooleanQuery.Builder bq = new BooleanQuery.Builder();
bq.add(new MatchAllDocsQuery(), BooleanClause.Occur.FILTER);
@ -94,7 +94,7 @@ public class PercolatorHighlightSubFetchPhaseTests extends ESTestCase {
assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(disjunctionMaxQuery).get(0), sameInstance(percolateQuery));
PercolateQuery percolateQuery2 = new PercolateQuery("_name", ctx -> null, Collections.singletonList(new BytesArray("{}")),
new MatchAllDocsQuery(), Mockito.mock(IndexSearcher.class), new MatchAllDocsQuery());
new MatchAllDocsQuery(), Mockito.mock(IndexSearcher.class), null, new MatchAllDocsQuery());
bq = new BooleanQuery.Builder();
bq.add(new MatchAllDocsQuery(), BooleanClause.Occur.FILTER);
assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(bq.build()).size(), equalTo(0));

View File

@ -30,6 +30,7 @@ import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.TotalHits;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.FixedBitSet;
import org.elasticsearch.search.SearchHit;
@ -58,7 +59,7 @@ public class PercolatorMatchedSlotSubFetchPhaseTests extends ESTestCase {
MemoryIndex memoryIndex = new MemoryIndex();
memoryIndex.addField("field", "value", new WhitespaceAnalyzer());
PercolateQuery percolateQuery = new PercolateQuery("_name", queryStore, Collections.emptyList(),
new MatchAllDocsQuery(), memoryIndex.createSearcher(), new MatchNoDocsQuery());
new MatchAllDocsQuery(), memoryIndex.createSearcher(), null, new MatchNoDocsQuery());
PercolatorMatchedSlotSubFetchPhase.innerHitsExecute(percolateQuery, indexSearcher, hits);
assertNotNull(hits[0].field(PercolatorMatchedSlotSubFetchPhase.FIELD_NAME_PREFIX));
@ -72,7 +73,7 @@ public class PercolatorMatchedSlotSubFetchPhaseTests extends ESTestCase {
MemoryIndex memoryIndex = new MemoryIndex();
memoryIndex.addField("field", "value1", new WhitespaceAnalyzer());
PercolateQuery percolateQuery = new PercolateQuery("_name", queryStore, Collections.emptyList(),
new MatchAllDocsQuery(), memoryIndex.createSearcher(), new MatchNoDocsQuery());
new MatchAllDocsQuery(), memoryIndex.createSearcher(), null, new MatchNoDocsQuery());
PercolatorMatchedSlotSubFetchPhase.innerHitsExecute(percolateQuery, indexSearcher, hits);
assertNull(hits[0].field(PercolatorMatchedSlotSubFetchPhase.FIELD_NAME_PREFIX));
@ -85,7 +86,7 @@ public class PercolatorMatchedSlotSubFetchPhaseTests extends ESTestCase {
MemoryIndex memoryIndex = new MemoryIndex();
memoryIndex.addField("field", "value", new WhitespaceAnalyzer());
PercolateQuery percolateQuery = new PercolateQuery("_name", queryStore, Collections.emptyList(),
new MatchAllDocsQuery(), memoryIndex.createSearcher(), new MatchNoDocsQuery());
new MatchAllDocsQuery(), memoryIndex.createSearcher(), null, new MatchNoDocsQuery());
PercolatorMatchedSlotSubFetchPhase.innerHitsExecute(percolateQuery, indexSearcher, hits);
assertNull(hits[0].field(PercolatorMatchedSlotSubFetchPhase.FIELD_NAME_PREFIX));
@ -100,7 +101,7 @@ public class PercolatorMatchedSlotSubFetchPhaseTests extends ESTestCase {
scoreDocs[i] = new ScoreDoc(i, 1f);
}
TopDocs topDocs = new TopDocs(scoreDocs.length, scoreDocs, 1f);
TopDocs topDocs = new TopDocs(new TotalHits(scoreDocs.length, TotalHits.Relation.EQUAL_TO), scoreDocs);
IntStream stream = PercolatorMatchedSlotSubFetchPhase.convertTopDocsToSlots(topDocs, null);
int[] result = stream.toArray();
@ -117,7 +118,7 @@ public class PercolatorMatchedSlotSubFetchPhaseTests extends ESTestCase {
scoreDocs[2] = new ScoreDoc(8, 1f);
scoreDocs[3] = new ScoreDoc(11, 1f);
scoreDocs[4] = new ScoreDoc(14, 1f);
TopDocs topDocs = new TopDocs(scoreDocs.length, scoreDocs, 1f);
TopDocs topDocs = new TopDocs(new TotalHits(scoreDocs.length, TotalHits.Relation.EQUAL_TO), scoreDocs);
FixedBitSet bitSet = new FixedBitSet(15);
bitSet.set(2);

View File

@ -1 +0,0 @@
a010e852be8d56efe1906e6da5292e4541239724

View File

@ -0,0 +1 @@
97a3758487272ba4d15720b0ca15b0f980310c89

View File

@ -12,7 +12,7 @@
analyzer:
my_analyzer:
tokenizer: standard
filter: ["standard", "lowercase", "my_collator"]
filter: ["lowercase", "my_collator"]
filter:
my_collator:
type: icu_collation

View File

@ -1 +0,0 @@
88e0ed90d433a9088528485cd4f59311735d92a4

View File

@ -0,0 +1 @@
12ed739794cd317754684308ddc5bdbdcc46cdde

View File

@ -1 +0,0 @@
0daec9ac3c4bba5f91b1bc413c651b7a98313982

View File

@ -0,0 +1 @@
4da6e5c17a17f0a9a99b518ea9985ea06996b63b

View File

@ -1 +0,0 @@
f5af81eec04c1da0d6969cff18f360ff379b1bf7

View File

@ -0,0 +1 @@
a36b2db18a2a22966ab0bf9fced775f22dd7029d

View File

@ -13,7 +13,7 @@
analyzer:
my_analyzer:
tokenizer: standard
filter: ["standard", "lowercase", "my_metaphone"]
filter: ["lowercase", "my_metaphone"]
filter:
my_metaphone:
type: phonetic

View File

@ -13,7 +13,7 @@
analyzer:
my_analyzer:
tokenizer: standard
filter: ["standard", "lowercase", "my_metaphone"]
filter: ["lowercase", "my_metaphone"]
filter:
my_metaphone:
type: phonetic

View File

@ -13,7 +13,7 @@
analyzer:
my_analyzer:
tokenizer: standard
filter: ["standard", "lowercase", "beider_morse"]
filter: ["lowercase", "beider_morse"]
filter:
beider_morse:
type: phonetic

View File

@ -12,7 +12,7 @@
analyzer:
my_analyzer:
tokenizer: standard
filter: ["standard", "lowercase", "my_metaphone"]
filter: ["lowercase", "my_metaphone"]
filter:
my_metaphone:
type: phonetic

View File

@ -13,7 +13,7 @@
analyzer:
my_analyzer:
tokenizer: standard
filter: ["standard", "lowercase", "daitch_mokotoff"]
filter: ["lowercase", "daitch_mokotoff"]
filter:
daitch_mokotoff:
type: phonetic

View File

@ -1 +0,0 @@
9e649088ee298293aa95a05391dff9cb0582648e

View File

@ -0,0 +1 @@
5f1d360a47d2fd166e970d17c46b284830e64258

View File

@ -1 +0,0 @@
47fb370054ba7413d050f13c177edf01180c31ca

View File

@ -0,0 +1 @@
b07883b5e988d1d991503aa49d9b59059518825d

View File

@ -1 +0,0 @@
bc0708acbac195772b67b5ad2e9c4683d27ff450

View File

@ -0,0 +1 @@
1b46b3ee62932de7ba7b670820a13eb973ec5777

View File

@ -21,6 +21,7 @@ package org.elasticsearch.example.rescore;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.TotalHits;
import org.elasticsearch.common.io.stream.Writeable.Reader;
import org.elasticsearch.search.rescore.RescoreContext;
import org.elasticsearch.test.AbstractWireSerializingTestCase;
@ -68,7 +69,7 @@ public class ExampleRescoreBuilderTests extends AbstractWireSerializingTestCase<
String fieldFactor = null;
ExampleRescoreBuilder builder = new ExampleRescoreBuilder(factor, fieldFactor).windowSize(2);
RescoreContext context = builder.buildContext(null);
TopDocs docs = new TopDocs(10, new ScoreDoc[3], 0);
TopDocs docs = new TopDocs(new TotalHits(10, TotalHits.Relation.EQUAL_TO), new ScoreDoc[3]);
docs.scoreDocs[0] = new ScoreDoc(0, 1.0f);
docs.scoreDocs[1] = new ScoreDoc(1, 1.0f);
docs.scoreDocs[2] = new ScoreDoc(2, 1.0f);

View File

@ -1 +0,0 @@
c547b30525ad80d0ceeaa40c2d3a901c7e76fd46

View File

@ -0,0 +1 @@
fa8e0fbef3e3fcf49ace4a4153580070def770eb

View File

@ -1 +0,0 @@
9c327295d54d5abd2684e00c3aefe58aa1caace7

View File

@ -0,0 +1 @@
3d636541581e338a1be7e3e176aac73d7ae0b323

View File

@ -1 +0,0 @@
73dd7703a94ec2357581f65ee7c1c4d618ff310f

View File

@ -0,0 +1 @@
126faacb28d1b8cc1ab81d702973d057892120d1

View File

@ -1 +0,0 @@
1c3802fa30990a1758f2df19d17fe2c95fc45870

View File

@ -0,0 +1 @@
abd514ec02837f48b8c478287fde7cc5d6439ada

View File

@ -1 +0,0 @@
8d7abdbb7900d7e6a76c391d8be07217c0d882ca

View File

@ -0,0 +1 @@
778e87a263184b8ddcbb4ef9d244467933f32993

View File

@ -1 +0,0 @@
011f78ae9d9a386fcf20ceea29ba30e75fb512e8

View File

@ -0,0 +1 @@
96aff29ad966204c73f8dd98d8116f09e34b6ebd

View File

@ -1 +0,0 @@
c3dd461a7cebdcacc77304660218513e10f89adb

View File

@ -0,0 +1 @@
e72e2accebb1277c57dfe21bc011195eed91dbfd

View File

@ -1 +0,0 @@
d63101181708d78eccc441b0d1193dd91d1a0bf1

View File

@ -0,0 +1 @@
bf25587ebf6823781f5d7acffd7d65c46c21cb27

View File

@ -1 +0,0 @@
22e56fbd44d6a47d7dddbdda3c17ce22ad0a6680

View File

@ -0,0 +1 @@
6cad42923bcb6e1c6060ae1cbab574646e8c808e

View File

@ -1 +0,0 @@
36b38a1d71045f5bee5dc40526f8d57084dbdc00

View File

@ -0,0 +1 @@
e5841d7e877e51bbd2d325709353f5ab7e94b49a

View File

@ -1 +0,0 @@
21eb8b111bcb94f4abb8c6402dfd10f51ecc0b38

View File

@ -0,0 +1 @@
fefe17f6ac0c7d505c5051e96d0f4916fec2bf9e

View File

@ -1 +0,0 @@
d60081c5641ed21aea82d5d0976b40e1f184c8e5

View File

@ -0,0 +1 @@
22b0a9d9fb675f7c82a7a2b18f593f3278b40f11

View File

@ -1 +0,0 @@
2d42b373546aa8923d25e4e9a673dd186064f9bd

Some files were not shown because too many files have changed in this diff Show More