mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-02-25 22:36:20 +00:00
Merge remote-tracking branch 'es/master' into ccr
* es/master: (68 commits) Allow using distance measure in the geo context precision (#29273) Disable failing query in QueryBuilderBWCIT. Fixed quote_field_suffix in query_string (#29332) Use fixture to test repository-url module (#29355) Remove undocumented action.master.force_local setting (#29351) Enhance error for out of bounds byte size settings (#29338) Fix QueryAnalyzerTests. Fix HasChildQueryBuilderTests to not use the `classic` similarity. [Docs] Correct javadoc of GetIndexRequest (#29364) Make TransportRankEvalAction members final Add awaits fix for a query analyzer test Check presence of multi-types before validating new mapping (#29316) Add awaits fix for HasChildQueryBuilderTests Remove silent batch mode from install plugin (#29359) Align cat thread pool info to thread pool config (#29195) Track Lucene operations in engine explicitly (#29357) Build: Fix Java9 MR build (#29312) Reindex: Fix error in delete-by-query rest spec (#29318) Improve similarity integration. (#29187) Fix some query extraction bugs. (#29283) ...
This commit is contained in:
commit
1f306c321e
8
.ci/java-versions.properties
Normal file
8
.ci/java-versions.properties
Normal file
@ -0,0 +1,8 @@
|
||||
# This file is used with all of the non-matrix tests in Jenkins.
|
||||
|
||||
# This .properties file defines the versions of Java with which to
|
||||
# build and test Elasticsearch for this branch. Valid Java versions
|
||||
# are 'java' or 'openjdk' followed by the major release number.
|
||||
|
||||
ES_BUILD_JAVA=java10
|
||||
ES_RUNTIME_JAVA=java8
|
9
.ci/matrix-build-javas.yml
Normal file
9
.ci/matrix-build-javas.yml
Normal file
@ -0,0 +1,9 @@
|
||||
# This file is used as part of a matrix build in Jenkins where the
|
||||
# values below are included as an axis of the matrix.
|
||||
|
||||
# This axis of the build matrix represents the versions of Java with
|
||||
# which Elasticsearch will be built. Valid Java versions are 'java'
|
||||
# or 'openjdk' followed by the major release number.
|
||||
|
||||
ES_BUILD_JAVA:
|
||||
- java10
|
14
.ci/matrix-java-exclusions.yml
Normal file
14
.ci/matrix-java-exclusions.yml
Normal file
@ -0,0 +1,14 @@
|
||||
# This file is used as part of a matrix build in Jenkins where the
|
||||
# values below are excluded from the test matrix.
|
||||
|
||||
# The yaml mapping below represents a single intersection on the build
|
||||
# matrix where a test *should not* be run. The value of the exclude
|
||||
# key is a list of maps.
|
||||
|
||||
# In this example all of the combinations defined in the matrix will
|
||||
# run except for the test that builds with java10 and runs with java8.
|
||||
# exclude:
|
||||
# - ES_BUILD_JAVA: java10
|
||||
# ES_RUNTIME_JAVA: java8
|
||||
|
||||
exclude:
|
10
.ci/matrix-runtime-javas.yml
Normal file
10
.ci/matrix-runtime-javas.yml
Normal file
@ -0,0 +1,10 @@
|
||||
# This file is used as part of a matrix build in Jenkins where the
|
||||
# values below are included as an axis of the matrix.
|
||||
|
||||
# This axis of the build matrix represents the versions of Java on
|
||||
# which Elasticsearch will be tested. Valid Java versions are 'java'
|
||||
# or 'openjdk' followed by the major release number.
|
||||
|
||||
ES_RUNTIME_JAVA:
|
||||
- java8
|
||||
- java10
|
@ -296,7 +296,6 @@ e.g. -Dtests.rest.suite=index,get,create/10_with_id
|
||||
* `tests.rest.blacklist`: comma separated globs that identify tests that are
|
||||
blacklisted and need to be skipped
|
||||
e.g. -Dtests.rest.blacklist=index/*/Index document,get/10_basic/*
|
||||
* `tests.rest.spec`: REST spec path (default /rest-api-spec/api)
|
||||
|
||||
Note that the REST tests, like all the integration tests, can be run against an external
|
||||
cluster by specifying the `tests.cluster` property, which if present needs to contain a
|
||||
@ -477,12 +476,12 @@ branch. Finally, on a release branch, it will test against the most recent relea
|
||||
=== BWC Testing against a specific remote/branch
|
||||
|
||||
Sometimes a backward compatibility change spans two versions. A common case is a new functionality
|
||||
that needs a BWC bridge in and an unreleased versioned of a release branch (for example, 5.x).
|
||||
that needs a BWC bridge in an unreleased versioned of a release branch (for example, 5.x).
|
||||
To test the changes, you can instruct Gradle to build the BWC version from a another remote/branch combination instead of
|
||||
pulling the release branch from GitHub. You do so using the `tests.bwc.remote` and `tests.bwc.refspec` system properties:
|
||||
pulling the release branch from GitHub. You do so using the `tests.bwc.remote` and `tests.bwc.refspec.BRANCH` system properties:
|
||||
|
||||
-------------------------------------------------
|
||||
./gradlew check -Dtests.bwc.remote=${remote} -Dtests.bwc.refspec=index_req_bwc_5.x
|
||||
./gradlew check -Dtests.bwc.remote=${remote} -Dtests.bwc.refspec.5.x=index_req_bwc_5.x
|
||||
-------------------------------------------------
|
||||
|
||||
The branch needs to be available on the remote that the BWC makes of the
|
||||
@ -497,7 +496,7 @@ will need to:
|
||||
will contain your change.
|
||||
. Create a branch called `index_req_bwc_5.x` off `5.x`. This will contain your bwc layer.
|
||||
. Push both branches to your remote repository.
|
||||
. Run the tests with `./gradlew check -Dtests.bwc.remote=${remote} -Dtests.bwc.refspec=index_req_bwc_5.x`.
|
||||
. Run the tests with `./gradlew check -Dtests.bwc.remote=${remote} -Dtests.bwc.refspec.5.x=index_req_bwc_5.x`.
|
||||
|
||||
== Test coverage analysis
|
||||
|
||||
|
@ -196,6 +196,7 @@ subprojects {
|
||||
"org.elasticsearch:elasticsearch-cli:${version}": ':server:cli',
|
||||
"org.elasticsearch:elasticsearch-core:${version}": ':libs:elasticsearch-core',
|
||||
"org.elasticsearch:elasticsearch-nio:${version}": ':libs:elasticsearch-nio',
|
||||
"org.elasticsearch:elasticsearch-x-content:${version}": ':libs:x-content',
|
||||
"org.elasticsearch:elasticsearch-secure-sm:${version}": ':libs:secure-sm',
|
||||
"org.elasticsearch.client:elasticsearch-rest-client:${version}": ':client:rest',
|
||||
"org.elasticsearch.client:elasticsearch-rest-client-sniffer:${version}": ':client:sniffer',
|
||||
|
@ -94,7 +94,7 @@ dependencies {
|
||||
compile 'com.netflix.nebula:gradle-info-plugin:3.0.3'
|
||||
compile 'org.eclipse.jgit:org.eclipse.jgit:3.2.0.201312181205-r'
|
||||
compile 'com.perforce:p4java:2012.3.551082' // THIS IS SUPPOSED TO BE OPTIONAL IN THE FUTURE....
|
||||
compile 'de.thetaphi:forbiddenapis:2.4.1'
|
||||
compile 'de.thetaphi:forbiddenapis:2.5'
|
||||
compile 'org.apache.rat:apache-rat:0.11'
|
||||
compile "org.elasticsearch:jna:4.5.1"
|
||||
}
|
||||
|
@ -311,8 +311,8 @@ class BuildPlugin implements Plugin<Project> {
|
||||
/** Adds repositories used by ES dependencies */
|
||||
static void configureRepositories(Project project) {
|
||||
RepositoryHandler repos = project.repositories
|
||||
if (System.getProperty("repos.mavenlocal") != null) {
|
||||
// with -Drepos.mavenlocal=true we can force checking the local .m2 repo which is
|
||||
if (System.getProperty("repos.mavenLocal") != null) {
|
||||
// with -Drepos.mavenLocal=true we can force checking the local .m2 repo which is
|
||||
// useful for development ie. bwc tests where we install stuff in the local repository
|
||||
// such that we don't have to pass hardcoded files to gradle
|
||||
repos.mavenLocal()
|
||||
@ -551,7 +551,7 @@ class BuildPlugin implements Plugin<Project> {
|
||||
if (project.licenseFile == null || project.noticeFile == null) {
|
||||
throw new GradleException("Must specify license and notice file for project ${project.path}")
|
||||
}
|
||||
jarTask.into('META-INF') {
|
||||
jarTask.metaInf {
|
||||
from(project.licenseFile.parent) {
|
||||
include project.licenseFile.name
|
||||
}
|
||||
|
@ -95,7 +95,7 @@ public class PluginBuildPlugin extends BuildPlugin {
|
||||
// we "upgrade" these optional deps to provided for plugins, since they will run
|
||||
// with a full elasticsearch server that includes optional deps
|
||||
compileOnly "org.locationtech.spatial4j:spatial4j:${project.versions.spatial4j}"
|
||||
compileOnly "com.vividsolutions:jts:${project.versions.jts}"
|
||||
compileOnly "org.locationtech.jts:jts-core:${project.versions.jts}"
|
||||
compileOnly "org.apache.logging.log4j:log4j-api:${project.versions.log4j}"
|
||||
compileOnly "org.apache.logging.log4j:log4j-core:${project.versions.log4j}"
|
||||
compileOnly "org.elasticsearch:jna:${project.versions.jna}"
|
||||
|
@ -18,6 +18,7 @@
|
||||
*/
|
||||
package org.elasticsearch.gradle.precommit
|
||||
|
||||
import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApis
|
||||
import de.thetaphi.forbiddenapis.gradle.ForbiddenApisPlugin
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.Task
|
||||
@ -83,17 +84,14 @@ class PrecommitTasks {
|
||||
getClass().getResource('/forbidden/es-all-signatures.txt')]
|
||||
suppressAnnotations = ['**.SuppressForbidden']
|
||||
}
|
||||
Task mainForbidden = project.tasks.findByName('forbiddenApisMain')
|
||||
if (mainForbidden != null) {
|
||||
mainForbidden.configure {
|
||||
signaturesURLs += getClass().getResource('/forbidden/es-server-signatures.txt')
|
||||
}
|
||||
}
|
||||
Task testForbidden = project.tasks.findByName('forbiddenApisTest')
|
||||
if (testForbidden != null) {
|
||||
testForbidden.configure {
|
||||
signaturesURLs += getClass().getResource('/forbidden/es-test-signatures.txt')
|
||||
signaturesURLs += getClass().getResource('/forbidden/http-signatures.txt')
|
||||
project.tasks.withType(CheckForbiddenApis) {
|
||||
// we do not use the += operator to add signatures, as conventionMappings of Gradle do not work when it's configured using withType:
|
||||
if (name.endsWith('Test')) {
|
||||
signaturesURLs = project.forbiddenApis.signaturesURLs +
|
||||
[ getClass().getResource('/forbidden/es-test-signatures.txt'), getClass().getResource('/forbidden/http-signatures.txt') ]
|
||||
} else {
|
||||
signaturesURLs = project.forbiddenApis.signaturesURLs +
|
||||
[ getClass().getResource('/forbidden/es-server-signatures.txt') ]
|
||||
}
|
||||
}
|
||||
Task forbiddenApis = project.tasks.findByName('forbiddenApis')
|
||||
@ -144,21 +142,15 @@ class PrecommitTasks {
|
||||
]
|
||||
toolVersion = 7.5
|
||||
}
|
||||
for (String taskName : ['checkstyleMain', 'checkstyleJava9', 'checkstyleTest']) {
|
||||
Task task = project.tasks.findByName(taskName)
|
||||
if (task != null) {
|
||||
project.tasks['check'].dependsOn.remove(task)
|
||||
checkstyleTask.dependsOn(task)
|
||||
task.dependsOn(copyCheckstyleConf)
|
||||
task.inputs.file(checkstyleSuppressions)
|
||||
task.reports {
|
||||
html.enabled false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
project.tasks.withType(Checkstyle) {
|
||||
dependsOn(copyCheckstyleConf)
|
||||
project.tasks.withType(Checkstyle) { task ->
|
||||
project.tasks[JavaBasePlugin.CHECK_TASK_NAME].dependsOn.remove(task)
|
||||
checkstyleTask.dependsOn(task)
|
||||
task.dependsOn(copyCheckstyleConf)
|
||||
task.inputs.file(checkstyleSuppressions)
|
||||
task.reports {
|
||||
html.enabled false
|
||||
}
|
||||
}
|
||||
|
||||
return checkstyleTask
|
||||
|
@ -494,7 +494,7 @@ class ClusterFormationTasks {
|
||||
* the short name requiring the path to already exist.
|
||||
*/
|
||||
final Object esPluginUtil = "${-> node.binPath().resolve('elasticsearch-plugin').toString()}"
|
||||
final Object[] args = [esPluginUtil, 'install', file]
|
||||
final Object[] args = [esPluginUtil, 'install', '--batch', file]
|
||||
return configureExecTask(name, project, setup, node, args)
|
||||
}
|
||||
|
||||
|
@ -248,9 +248,7 @@
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]concurrent[/\\]EsExecutors.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]concurrent[/\\]ThreadBarrier.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]concurrent[/\\]ThreadContext.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]xcontent[/\\]XContentFactory.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]xcontent[/\\]XContentHelper.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]xcontent[/\\]smile[/\\]SmileXContent.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]Discovery.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]DiscoverySettings.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]ZenDiscovery.java" checks="LineLength" />
|
||||
|
@ -1,9 +1,9 @@
|
||||
elasticsearch = 7.0.0-alpha1
|
||||
lucene = 7.2.1
|
||||
lucene = 7.3.0-snapshot-98a6b3d
|
||||
|
||||
# optional dependencies
|
||||
spatial4j = 0.6
|
||||
jts = 1.13
|
||||
spatial4j = 0.7
|
||||
jts = 1.15.0
|
||||
jackson = 2.8.10
|
||||
snakeyaml = 1.17
|
||||
# when updating log4j, please update also docs/java-api/index.asciidoc
|
||||
|
@ -0,0 +1,350 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.nio.entity.NStringEntity;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse;
|
||||
import org.elasticsearch.action.bulk.BulkProcessor;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
import org.elasticsearch.action.get.MultiGetItemResponse;
|
||||
import org.elasticsearch.action.get.MultiGetRequest;
|
||||
import org.elasticsearch.action.get.MultiGetResponse;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static org.hamcrest.Matchers.both;
|
||||
import static org.hamcrest.Matchers.either;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.lessThanOrEqualTo;
|
||||
|
||||
public class BulkProcessorIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
private static BulkProcessor.Builder initBulkProcessorBuilder(BulkProcessor.Listener listener) {
|
||||
return BulkProcessor.builder(highLevelClient()::bulkAsync, listener);
|
||||
}
|
||||
|
||||
public void testThatBulkProcessorCountIsCorrect() throws Exception {
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
BulkProcessorTestListener listener = new BulkProcessorTestListener(latch);
|
||||
|
||||
int numDocs = randomIntBetween(10, 100);
|
||||
try (BulkProcessor processor = initBulkProcessorBuilder(listener)
|
||||
//let's make sure that the bulk action limit trips, one single execution will index all the documents
|
||||
.setConcurrentRequests(randomIntBetween(0, 1)).setBulkActions(numDocs)
|
||||
.setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB))
|
||||
.build()) {
|
||||
|
||||
MultiGetRequest multiGetRequest = indexDocs(processor, numDocs);
|
||||
|
||||
latch.await();
|
||||
|
||||
assertThat(listener.beforeCounts.get(), equalTo(1));
|
||||
assertThat(listener.afterCounts.get(), equalTo(1));
|
||||
assertThat(listener.bulkFailures.size(), equalTo(0));
|
||||
assertResponseItems(listener.bulkItems, numDocs);
|
||||
assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest), numDocs);
|
||||
}
|
||||
}
|
||||
|
||||
public void testBulkProcessorFlush() throws Exception {
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
BulkProcessorTestListener listener = new BulkProcessorTestListener(latch);
|
||||
|
||||
int numDocs = randomIntBetween(10, 100);
|
||||
|
||||
try (BulkProcessor processor = initBulkProcessorBuilder(listener)
|
||||
//let's make sure that this bulk won't be automatically flushed
|
||||
.setConcurrentRequests(randomIntBetween(0, 10)).setBulkActions(numDocs + randomIntBetween(1, 100))
|
||||
.setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)).build()) {
|
||||
|
||||
MultiGetRequest multiGetRequest = indexDocs(processor, numDocs);
|
||||
|
||||
assertThat(latch.await(randomInt(500), TimeUnit.MILLISECONDS), equalTo(false));
|
||||
//we really need an explicit flush as none of the bulk thresholds was reached
|
||||
processor.flush();
|
||||
latch.await();
|
||||
|
||||
assertThat(listener.beforeCounts.get(), equalTo(1));
|
||||
assertThat(listener.afterCounts.get(), equalTo(1));
|
||||
assertThat(listener.bulkFailures.size(), equalTo(0));
|
||||
assertResponseItems(listener.bulkItems, numDocs);
|
||||
assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest), numDocs);
|
||||
}
|
||||
}
|
||||
|
||||
public void testBulkProcessorConcurrentRequests() throws Exception {
|
||||
int bulkActions = randomIntBetween(10, 100);
|
||||
int numDocs = randomIntBetween(bulkActions, bulkActions + 100);
|
||||
int concurrentRequests = randomIntBetween(0, 7);
|
||||
|
||||
int expectedBulkActions = numDocs / bulkActions;
|
||||
|
||||
final CountDownLatch latch = new CountDownLatch(expectedBulkActions);
|
||||
int totalExpectedBulkActions = numDocs % bulkActions == 0 ? expectedBulkActions : expectedBulkActions + 1;
|
||||
final CountDownLatch closeLatch = new CountDownLatch(totalExpectedBulkActions);
|
||||
|
||||
BulkProcessorTestListener listener = new BulkProcessorTestListener(latch, closeLatch);
|
||||
|
||||
MultiGetRequest multiGetRequest;
|
||||
|
||||
try (BulkProcessor processor = initBulkProcessorBuilder(listener)
|
||||
.setConcurrentRequests(concurrentRequests).setBulkActions(bulkActions)
|
||||
//set interval and size to high values
|
||||
.setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)).build()) {
|
||||
|
||||
multiGetRequest = indexDocs(processor, numDocs);
|
||||
|
||||
latch.await();
|
||||
|
||||
assertThat(listener.beforeCounts.get(), equalTo(expectedBulkActions));
|
||||
assertThat(listener.afterCounts.get(), equalTo(expectedBulkActions));
|
||||
assertThat(listener.bulkFailures.size(), equalTo(0));
|
||||
assertThat(listener.bulkItems.size(), equalTo(numDocs - numDocs % bulkActions));
|
||||
}
|
||||
|
||||
closeLatch.await();
|
||||
|
||||
assertThat(listener.beforeCounts.get(), equalTo(totalExpectedBulkActions));
|
||||
assertThat(listener.afterCounts.get(), equalTo(totalExpectedBulkActions));
|
||||
assertThat(listener.bulkFailures.size(), equalTo(0));
|
||||
assertThat(listener.bulkItems.size(), equalTo(numDocs));
|
||||
|
||||
Set<String> ids = new HashSet<>();
|
||||
for (BulkItemResponse bulkItemResponse : listener.bulkItems) {
|
||||
assertThat(bulkItemResponse.getFailureMessage(), bulkItemResponse.isFailed(), equalTo(false));
|
||||
assertThat(bulkItemResponse.getIndex(), equalTo("test"));
|
||||
assertThat(bulkItemResponse.getType(), equalTo("test"));
|
||||
//with concurrent requests > 1 we can't rely on the order of the bulk requests
|
||||
assertThat(Integer.valueOf(bulkItemResponse.getId()), both(greaterThan(0)).and(lessThanOrEqualTo(numDocs)));
|
||||
//we do want to check that we don't get duplicate ids back
|
||||
assertThat(ids.add(bulkItemResponse.getId()), equalTo(true));
|
||||
}
|
||||
|
||||
assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest), numDocs);
|
||||
}
|
||||
|
||||
public void testBulkProcessorWaitOnClose() throws Exception {
|
||||
BulkProcessorTestListener listener = new BulkProcessorTestListener();
|
||||
|
||||
int numDocs = randomIntBetween(10, 100);
|
||||
BulkProcessor processor = initBulkProcessorBuilder(listener)
|
||||
//let's make sure that the bulk action limit trips, one single execution will index all the documents
|
||||
.setConcurrentRequests(randomIntBetween(0, 1)).setBulkActions(numDocs)
|
||||
.setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(randomIntBetween(1, 10),
|
||||
RandomPicks.randomFrom(random(), ByteSizeUnit.values())))
|
||||
.build();
|
||||
|
||||
MultiGetRequest multiGetRequest = indexDocs(processor, numDocs);
|
||||
assertThat(processor.awaitClose(1, TimeUnit.MINUTES), is(true));
|
||||
if (randomBoolean()) { // check if we can call it multiple times
|
||||
if (randomBoolean()) {
|
||||
assertThat(processor.awaitClose(1, TimeUnit.MINUTES), is(true));
|
||||
} else {
|
||||
processor.close();
|
||||
}
|
||||
}
|
||||
|
||||
assertThat(listener.beforeCounts.get(), greaterThanOrEqualTo(1));
|
||||
assertThat(listener.afterCounts.get(), greaterThanOrEqualTo(1));
|
||||
for (Throwable bulkFailure : listener.bulkFailures) {
|
||||
logger.error("bulk failure", bulkFailure);
|
||||
}
|
||||
assertThat(listener.bulkFailures.size(), equalTo(0));
|
||||
assertResponseItems(listener.bulkItems, numDocs);
|
||||
assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest), numDocs);
|
||||
}
|
||||
|
||||
public void testBulkProcessorConcurrentRequestsReadOnlyIndex() throws Exception {
|
||||
|
||||
String createIndexBody = "{\n" +
|
||||
" \"settings\" : {\n" +
|
||||
" \"index\" : {\n" +
|
||||
" \"blocks.write\" : true\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" \n" +
|
||||
"}";
|
||||
|
||||
NStringEntity entity = new NStringEntity(createIndexBody, ContentType.APPLICATION_JSON);
|
||||
Response response = client().performRequest("PUT", "/test-ro", Collections.emptyMap(), entity);
|
||||
assertThat(response.getStatusLine().getStatusCode(), equalTo(200));
|
||||
|
||||
int bulkActions = randomIntBetween(10, 100);
|
||||
int numDocs = randomIntBetween(bulkActions, bulkActions + 100);
|
||||
int concurrentRequests = randomIntBetween(0, 10);
|
||||
|
||||
int expectedBulkActions = numDocs / bulkActions;
|
||||
|
||||
final CountDownLatch latch = new CountDownLatch(expectedBulkActions);
|
||||
int totalExpectedBulkActions = numDocs % bulkActions == 0 ? expectedBulkActions : expectedBulkActions + 1;
|
||||
final CountDownLatch closeLatch = new CountDownLatch(totalExpectedBulkActions);
|
||||
|
||||
int testDocs = 0;
|
||||
int testReadOnlyDocs = 0;
|
||||
MultiGetRequest multiGetRequest = new MultiGetRequest();
|
||||
BulkProcessorTestListener listener = new BulkProcessorTestListener(latch, closeLatch);
|
||||
|
||||
try (BulkProcessor processor = initBulkProcessorBuilder(listener)
|
||||
.setConcurrentRequests(concurrentRequests).setBulkActions(bulkActions)
|
||||
//set interval and size to high values
|
||||
.setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)).build()) {
|
||||
|
||||
for (int i = 1; i <= numDocs; i++) {
|
||||
if (randomBoolean()) {
|
||||
testDocs++;
|
||||
processor.add(new IndexRequest("test", "test", Integer.toString(testDocs))
|
||||
.source(XContentType.JSON, "field", "value"));
|
||||
multiGetRequest.add("test", "test", Integer.toString(testDocs));
|
||||
} else {
|
||||
testReadOnlyDocs++;
|
||||
processor.add(new IndexRequest("test-ro", "test", Integer.toString(testReadOnlyDocs))
|
||||
.source(XContentType.JSON, "field", "value"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
closeLatch.await();
|
||||
|
||||
assertThat(listener.beforeCounts.get(), equalTo(totalExpectedBulkActions));
|
||||
assertThat(listener.afterCounts.get(), equalTo(totalExpectedBulkActions));
|
||||
assertThat(listener.bulkFailures.size(), equalTo(0));
|
||||
assertThat(listener.bulkItems.size(), equalTo(testDocs + testReadOnlyDocs));
|
||||
|
||||
Set<String> ids = new HashSet<>();
|
||||
Set<String> readOnlyIds = new HashSet<>();
|
||||
for (BulkItemResponse bulkItemResponse : listener.bulkItems) {
|
||||
assertThat(bulkItemResponse.getIndex(), either(equalTo("test")).or(equalTo("test-ro")));
|
||||
assertThat(bulkItemResponse.getType(), equalTo("test"));
|
||||
if (bulkItemResponse.getIndex().equals("test")) {
|
||||
assertThat(bulkItemResponse.isFailed(), equalTo(false));
|
||||
//with concurrent requests > 1 we can't rely on the order of the bulk requests
|
||||
assertThat(Integer.valueOf(bulkItemResponse.getId()), both(greaterThan(0)).and(lessThanOrEqualTo(testDocs)));
|
||||
//we do want to check that we don't get duplicate ids back
|
||||
assertThat(ids.add(bulkItemResponse.getId()), equalTo(true));
|
||||
} else {
|
||||
assertThat(bulkItemResponse.isFailed(), equalTo(true));
|
||||
//with concurrent requests > 1 we can't rely on the order of the bulk requests
|
||||
assertThat(Integer.valueOf(bulkItemResponse.getId()), both(greaterThan(0)).and(lessThanOrEqualTo(testReadOnlyDocs)));
|
||||
//we do want to check that we don't get duplicate ids back
|
||||
assertThat(readOnlyIds.add(bulkItemResponse.getId()), equalTo(true));
|
||||
}
|
||||
}
|
||||
|
||||
assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest), testDocs);
|
||||
}
|
||||
|
||||
private static MultiGetRequest indexDocs(BulkProcessor processor, int numDocs) throws Exception {
|
||||
MultiGetRequest multiGetRequest = new MultiGetRequest();
|
||||
for (int i = 1; i <= numDocs; i++) {
|
||||
if (randomBoolean()) {
|
||||
processor.add(new IndexRequest("test", "test", Integer.toString(i))
|
||||
.source(XContentType.JSON, "field", randomRealisticUnicodeOfLengthBetween(1, 30)));
|
||||
} else {
|
||||
final String source = "{ \"index\":{\"_index\":\"test\",\"_type\":\"test\",\"_id\":\"" + Integer.toString(i) + "\"} }\n"
|
||||
+ Strings.toString(JsonXContent.contentBuilder()
|
||||
.startObject().field("field", randomRealisticUnicodeOfLengthBetween(1, 30)).endObject()) + "\n";
|
||||
processor.add(new BytesArray(source), null, null, XContentType.JSON);
|
||||
}
|
||||
multiGetRequest.add("test", "test", Integer.toString(i));
|
||||
}
|
||||
return multiGetRequest;
|
||||
}
|
||||
|
||||
private static void assertResponseItems(List<BulkItemResponse> bulkItemResponses, int numDocs) {
|
||||
assertThat(bulkItemResponses.size(), is(numDocs));
|
||||
int i = 1;
|
||||
for (BulkItemResponse bulkItemResponse : bulkItemResponses) {
|
||||
assertThat(bulkItemResponse.getIndex(), equalTo("test"));
|
||||
assertThat(bulkItemResponse.getType(), equalTo("test"));
|
||||
assertThat(bulkItemResponse.getId(), equalTo(Integer.toString(i++)));
|
||||
assertThat("item " + i + " failed with cause: " + bulkItemResponse.getFailureMessage(),
|
||||
bulkItemResponse.isFailed(), equalTo(false));
|
||||
}
|
||||
}
|
||||
|
||||
private static void assertMultiGetResponse(MultiGetResponse multiGetResponse, int numDocs) {
|
||||
assertThat(multiGetResponse.getResponses().length, equalTo(numDocs));
|
||||
int i = 1;
|
||||
for (MultiGetItemResponse multiGetItemResponse : multiGetResponse) {
|
||||
assertThat(multiGetItemResponse.getIndex(), equalTo("test"));
|
||||
assertThat(multiGetItemResponse.getType(), equalTo("test"));
|
||||
assertThat(multiGetItemResponse.getId(), equalTo(Integer.toString(i++)));
|
||||
}
|
||||
}
|
||||
|
||||
private static class BulkProcessorTestListener implements BulkProcessor.Listener {
|
||||
|
||||
private final CountDownLatch[] latches;
|
||||
private final AtomicInteger beforeCounts = new AtomicInteger();
|
||||
private final AtomicInteger afterCounts = new AtomicInteger();
|
||||
private final List<BulkItemResponse> bulkItems = new CopyOnWriteArrayList<>();
|
||||
private final List<Throwable> bulkFailures = new CopyOnWriteArrayList<>();
|
||||
|
||||
private BulkProcessorTestListener(CountDownLatch... latches) {
|
||||
this.latches = latches;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void beforeBulk(long executionId, BulkRequest request) {
|
||||
beforeCounts.incrementAndGet();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
|
||||
bulkItems.addAll(Arrays.asList(response.getItems()));
|
||||
afterCounts.incrementAndGet();
|
||||
for (CountDownLatch latch : latches) {
|
||||
latch.countDown();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
|
||||
bulkFailures.add(failure);
|
||||
afterCounts.incrementAndGet();
|
||||
for (CountDownLatch latch : latches) {
|
||||
latch.countDown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -91,7 +91,7 @@ subprojects {
|
||||
|
||||
String buildMetadataKey = "bwc_refspec_${project.path.substring(1)}"
|
||||
task checkoutBwcBranch(type: LoggedExec) {
|
||||
String refspec = System.getProperty("tests.bwc.refspec", buildMetadata.get(buildMetadataKey, "${remote}/${bwcBranch}"))
|
||||
String refspec = System.getProperty("tests.bwc.refspec.${bwcBranch}", buildMetadata.get(buildMetadataKey, "${remote}/${bwcBranch}"))
|
||||
dependsOn fetchLatest
|
||||
workingDir = checkoutDir
|
||||
commandLine = ['git', 'checkout', refspec]
|
||||
|
@ -208,7 +208,7 @@ class InstallPluginCommand extends EnvironmentAwareCommand {
|
||||
@Override
|
||||
protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception {
|
||||
String pluginId = arguments.value(options);
|
||||
boolean isBatch = options.has(batchOption) || System.console() == null;
|
||||
final boolean isBatch = options.has(batchOption);
|
||||
execute(terminal, pluginId, isBatch, env);
|
||||
}
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
:version: 7.0.0-alpha1
|
||||
:major-version: 7.x
|
||||
:lucene_version: 7.2.1
|
||||
:lucene_version_path: 7_2_1
|
||||
:lucene_version: 7.3.0
|
||||
:lucene_version_path: 7_3_0
|
||||
:branch: master
|
||||
:jdk: 1.8.0_131
|
||||
:jdk_major: 8
|
||||
|
@ -12,13 +12,13 @@ to your classpath in order to use this type:
|
||||
<dependency>
|
||||
<groupId>org.locationtech.spatial4j</groupId>
|
||||
<artifactId>spatial4j</artifactId>
|
||||
<version>0.6</version> <1>
|
||||
<version>0.7</version> <1>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>com.vividsolutions</groupId>
|
||||
<artifactId>jts</artifactId>
|
||||
<version>1.13</version> <2>
|
||||
<groupId>org.locationtech.jts</groupId>
|
||||
<artifactId>jts-core</artifactId>
|
||||
<version>1.15.0</version> <2>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>xerces</groupId>
|
||||
@ -28,7 +28,7 @@ to your classpath in order to use this type:
|
||||
</dependency>
|
||||
-----------------------------------------------
|
||||
<1> check for updates in http://search.maven.org/#search%7Cga%7C1%7Cg%3A%22org.locationtech.spatial4j%22%20AND%20a%3A%22spatial4j%22[Maven Central]
|
||||
<2> check for updates in http://search.maven.org/#search%7Cga%7C1%7Cg%3A%22com.vividsolutions%22%20AND%20a%3A%22jts%22[Maven Central]
|
||||
<2> check for updates in http://search.maven.org/#search%7Cga%7C1%7Cg%3A%22org.locationtech.jts%22%20AND%20a%3A%22jts-core%22[Maven Central]
|
||||
|
||||
[source,java]
|
||||
--------------------------------------------------
|
||||
|
@ -82,6 +82,9 @@ releases 2.0 and later do not support rivers.
|
||||
[float]
|
||||
==== Supported by Elasticsearch:
|
||||
|
||||
* https://github.com/elastic/ansible-elasticsearch[Ansible playbook for Elasticsearch]:
|
||||
An officially supported ansible playbook for Elasticsearch. Tested with the latest version of 5.x and 6.x on Ubuntu 14.04/16.04, Debian 8, Centos 7.
|
||||
|
||||
* https://github.com/elastic/puppet-elasticsearch[Puppet]:
|
||||
Elasticsearch puppet module.
|
||||
|
||||
|
@ -40,6 +40,10 @@ NOTE: Bucketing aggregations can have sub-aggregations (bucketing or metric). Th
|
||||
aggregations (one can nest an aggregation under a "parent" aggregation, which is itself a sub-aggregation of
|
||||
another higher-level aggregation).
|
||||
|
||||
NOTE: Aggregations operate on the `double` representation of
|
||||
the data. As a consequence, the result may be approximate when running on longs
|
||||
whose absolute value is greater than `2^53`.
|
||||
|
||||
[float]
|
||||
== Structuring Aggregations
|
||||
|
||||
|
@ -113,14 +113,15 @@ in the table below.
|
||||
|Field Name |Alias |Description
|
||||
|`type` |`t` |The current (*) type of thread pool (`fixed` or `scaling`)
|
||||
|`active` |`a` |The number of active threads in the current thread pool
|
||||
|`size` |`s` |The number of threads in the current thread pool
|
||||
|`pool_size` |`psz` |The number of threads in the current thread pool
|
||||
|`queue` |`q` |The number of tasks in the queue for the current thread pool
|
||||
|`queue_size` |`qs` |The maximum number of tasks permitted in the queue for the current thread pool
|
||||
|`rejected` |`r` |The number of tasks rejected by the thread pool executor
|
||||
|`largest` |`l` |The highest number of active threads in the current thread pool
|
||||
|`completed` |`c` |The number of tasks completed by the thread pool executor
|
||||
|`min` |`mi` |The configured minimum number of active threads allowed in the current thread pool
|
||||
|`max` |`ma` |The configured maximum number of active threads allowed in the current thread pool
|
||||
|`core` |`cr` |The configured core number of active threads allowed in the current thread pool
|
||||
|`max` |`mx` |The configured maximum number of active threads allowed in the current thread pool
|
||||
|`size` |`sz` |The configured fixed number of active threads allowed in the current thread pool
|
||||
|`keep_alive` |`k` |The configured keep alive time for threads
|
||||
|=======================================================================
|
||||
|
||||
|
@ -118,8 +118,11 @@ POST test/_doc/1/_update
|
||||
|
||||
The update API also support passing a partial document,
|
||||
which will be merged into the existing document (simple recursive merge,
|
||||
inner merging of objects, replacing core "keys/values" and arrays). For
|
||||
example:
|
||||
inner merging of objects, replacing core "keys/values" and arrays).
|
||||
To fully replace the existing document, the <<docs-index_,`index` API>> should
|
||||
be used instead.
|
||||
The following partial update adds a new field to the
|
||||
existing document:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
@ -777,7 +777,7 @@ GET /bank/_search
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
The difference here is that instead of passing `q=*` in the URI, we POST a JSON-style query request body to the `_search` API. We'll discuss this JSON query in the next section.
|
||||
The difference here is that instead of passing `q=*` in the URI, we provide a JSON-style query request body to the `_search` API. We'll discuss this JSON query in the next section.
|
||||
|
||||
////
|
||||
Hidden response just so we can assert that it is indeed the same but don't have
|
||||
|
@ -82,20 +82,6 @@ This similarity has the following options:
|
||||
|
||||
Type name: `BM25`
|
||||
|
||||
[float]
|
||||
[[classic-similarity]]
|
||||
==== Classic similarity
|
||||
|
||||
The classic similarity that is based on the TF/IDF model. This
|
||||
similarity has the following option:
|
||||
|
||||
`discount_overlaps`::
|
||||
Determines whether overlap tokens (Tokens with
|
||||
0 position increment) are ignored when computing norm. By default this
|
||||
is true, meaning overlap tokens do not count when computing norms.
|
||||
|
||||
Type name: `classic`
|
||||
|
||||
[float]
|
||||
[[dfr]]
|
||||
==== DFR similarity
|
||||
@ -541,7 +527,7 @@ PUT /index
|
||||
"index": {
|
||||
"similarity": {
|
||||
"default": {
|
||||
"type": "classic"
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -563,7 +549,7 @@ PUT /index/_settings
|
||||
"index": {
|
||||
"similarity": {
|
||||
"default": {
|
||||
"type": "classic"
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -93,12 +93,12 @@ which returns something similar to:
|
||||
{
|
||||
"commit" : {
|
||||
"id" : "3M3zkw2GHMo2Y4h4/KFKCg==",
|
||||
"generation" : 4,
|
||||
"generation" : 3,
|
||||
"user_data" : {
|
||||
"translog_uuid" : "hnOG3xFcTDeoI_kvvvOdNA",
|
||||
"history_uuid" : "XP7KDJGiS1a2fHYiFL5TXQ",
|
||||
"local_checkpoint" : "-1",
|
||||
"translog_generation" : "3",
|
||||
"translog_generation" : "2",
|
||||
"max_seq_no" : "-1",
|
||||
"sync_id" : "AVvFY-071siAOuFGEO9P", <1>
|
||||
"max_unsafe_auto_id_timestamp" : "-1"
|
||||
|
@ -1341,7 +1341,7 @@ Here is an example of a pipeline specifying custom pattern definitions:
|
||||
{
|
||||
"grok": {
|
||||
"field": "message",
|
||||
"patterns": ["my %{FAVORITE_DOG:dog} is colored %{RGB:color}"]
|
||||
"patterns": ["my %{FAVORITE_DOG:dog} is colored %{RGB:color}"],
|
||||
"pattern_definitions" : {
|
||||
"FAVORITE_DOG" : "beagle",
|
||||
"RGB" : "RED|GREEN|BLUE"
|
||||
|
@ -44,13 +44,9 @@ PUT my_index
|
||||
"default_field": { <1>
|
||||
"type": "text"
|
||||
},
|
||||
"classic_field": {
|
||||
"type": "text",
|
||||
"similarity": "classic" <2>
|
||||
},
|
||||
"boolean_sim_field": {
|
||||
"type": "text",
|
||||
"similarity": "boolean" <3>
|
||||
"similarity": "boolean" <2>
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -59,5 +55,4 @@ PUT my_index
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
<1> The `default_field` uses the `BM25` similarity.
|
||||
<2> The `classic_field` uses the `classic` similarity (ie TF/IDF).
|
||||
<3> The `boolean_sim_field` uses the `boolean` similarity.
|
||||
<2> The `boolean_sim_field` uses the `boolean` similarity.
|
||||
|
@ -154,12 +154,12 @@ are provided:
|
||||
[float]
|
||||
===== Accuracy
|
||||
|
||||
Geo_shape does not provide 100% accuracy and depending on how it is
|
||||
configured it may return some false positives or false negatives for
|
||||
certain queries. To mitigate this, it is important to select an
|
||||
appropriate value for the tree_levels parameter and to adjust
|
||||
expectations accordingly. For example, a point may be near the border of
|
||||
a particular grid cell and may thus not match a query that only matches the
|
||||
Geo_shape does not provide 100% accuracy and depending on how it is configured
|
||||
it may return some false positives for `INTERSECTS`, `WITHIN` and `CONTAINS`
|
||||
queries, and some false negatives for `DISJOINT` queries. To mitigate this, it
|
||||
is important to select an appropriate value for the tree_levels parameter and
|
||||
to adjust expectations accordingly. For example, a point may be near the border
|
||||
of a particular grid cell and may thus not match a query that only matches the
|
||||
cell right next to it -- even though the shape is very close to the point.
|
||||
|
||||
[float]
|
||||
@ -220,7 +220,7 @@ to Elasticsearch types:
|
||||
|=======================================================================
|
||||
|GeoJSON Type |WKT Type |Elasticsearch Type |Description
|
||||
|
||||
|`Point` |`POINT` |`point` |A single geographic coordinate.
|
||||
|`Point` |`POINT` |`point` |A single geographic coordinate. Note: Elasticsearch uses WGS-84 coordinates only.
|
||||
|`LineString` |`LINESTRING` |`linestring` |An arbitrary line given two or more points.
|
||||
|`Polygon` |`POLYGON` |`polygon` |A _closed_ polygon whose first and last point
|
||||
must match, thus requiring `n + 1` vertices to create an `n`-sided
|
||||
@ -378,22 +378,24 @@ POST /example/doc
|
||||
// CONSOLE
|
||||
// TEST[skip:https://github.com/elastic/elasticsearch/issues/23836]
|
||||
|
||||
*IMPORTANT NOTE:* GeoJSON and WKT do not enforce a specific order for vertices
|
||||
thus ambiguous polygons around the dateline and poles are possible. To alleviate
|
||||
ambiguity the Open Geospatial Consortium (OGC)
|
||||
http://www.opengeospatial.org/standards/sfa[Simple Feature Access] specification
|
||||
defines the following vertex ordering:
|
||||
*IMPORTANT NOTE:* WKT does not enforce a specific order for vertices thus
|
||||
ambiguous polygons around the dateline and poles are possible.
|
||||
https://tools.ietf.org/html/rfc7946#section-3.1.6[GeoJSON] mandates that the
|
||||
outer polygon must be counterclockwise and interior shapes must be clockwise,
|
||||
which agrees with the Open Geospatial Consortium (OGC)
|
||||
http://www.opengeospatial.org/standards/sfa[Simple Feature Access]
|
||||
specification for vertex ordering.
|
||||
|
||||
* Outer Ring - Counterclockwise
|
||||
* Inner Ring(s) / Holes - Clockwise
|
||||
Elasticsearch accepts both clockwise and counterclockwise polygons if they
|
||||
appear not to cross the dateline (i.e. they cross less than 180° of longitude),
|
||||
but for polygons that do cross the dateline (or for other polygons wider than
|
||||
180°) Elasticsearch requires the vertex ordering to comply with the OGC and
|
||||
GeoJSON specifications. Otherwise, an unintended polygon may be created and
|
||||
unexpected query/filter results will be returned.
|
||||
|
||||
For polygons that do not cross the dateline, vertex order will not matter in
|
||||
Elasticsearch. For polygons that do cross the dateline, Elasticsearch requires
|
||||
vertex ordering to comply with the OGC specification. Otherwise, an unintended polygon
|
||||
may be created and unexpected query/filter results will be returned.
|
||||
|
||||
The following provides an example of an ambiguous polygon. Elasticsearch will apply
|
||||
OGC standards to eliminate ambiguity resulting in a polygon that crosses the dateline.
|
||||
The following provides an example of an ambiguous polygon. Elasticsearch will
|
||||
apply the GeoJSON standard to eliminate ambiguity resulting in a polygon that
|
||||
crosses the dateline.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
@ -24,3 +24,16 @@ the index setting `index.mapping.nested_objects.limit`.
|
||||
==== The `update_all_types` option has been removed
|
||||
|
||||
This option is useless now that all indices have at most one type.
|
||||
|
||||
=== The `classic` similarity has been removed
|
||||
|
||||
The `classic` similarity relied on coordination factors for scoring to be good
|
||||
in presence of stopwords in the query. This feature has been removed from
|
||||
Lucene, which means that the `classic` similarity now produces scores of lower
|
||||
quality. It is advised to switch to `BM25` instead, which is widely accepted
|
||||
as a better alternative.
|
||||
|
||||
=== Similarities fail when unsupported options are provided
|
||||
|
||||
An error will now be thrown when unknown configuration options are provided
|
||||
to similarities. Such unknown parameters were ignored before.
|
||||
|
@ -39,7 +39,7 @@ from the outside. Defaults to the actual port assigned via `http.port`.
|
||||
|`http.host` |Used to set the `http.bind_host` and the `http.publish_host` Defaults to `http.host` or `network.host`.
|
||||
|
||||
|`http.max_content_length` |The max content of an HTTP request. Defaults to
|
||||
`100mb`. If set to greater than `Integer.MAX_VALUE`, it will be reset to 100mb.
|
||||
`100mb`.
|
||||
|
||||
|`http.max_initial_line_length` |The max length of an HTTP URL. Defaults
|
||||
to `4kb`
|
||||
|
@ -312,7 +312,7 @@ GET /_search
|
||||
|
||||
The example above creates a boolean query:
|
||||
|
||||
`(ny OR (new AND york)) city)`
|
||||
`(ny OR (new AND york)) city`
|
||||
|
||||
that matches documents with the term `ny` or the conjunction `new AND york`.
|
||||
By default the parameter `auto_generate_synonyms_phrase_query` is set to `true`.
|
||||
|
@ -23,11 +23,9 @@ search terms, but it is possible to specify other fields in the query syntax:
|
||||
|
||||
status:active
|
||||
|
||||
* where the `title` field contains `quick` or `brown`.
|
||||
If you omit the OR operator the default operator will be used
|
||||
* where the `title` field contains `quick` or `brown`
|
||||
|
||||
title:(quick OR brown)
|
||||
title:(quick brown)
|
||||
|
||||
* where the `author` field contains the exact phrase `"john smith"`
|
||||
|
||||
@ -36,7 +34,7 @@ search terms, but it is possible to specify other fields in the query syntax:
|
||||
* where any of the fields `book.title`, `book.content` or `book.date` contains
|
||||
`quick` or `brown` (note how we need to escape the `*` with a backslash):
|
||||
|
||||
book.\*:(quick brown)
|
||||
book.\*:(quick OR brown)
|
||||
|
||||
* where the field `title` has any non-null value:
|
||||
|
||||
|
@ -1,9 +1,7 @@
|
||||
[[search-rank-eval]]
|
||||
== Ranking Evaluation API
|
||||
|
||||
experimental[The ranking evaluation API is experimental and may be changed or removed completely in a future release,
|
||||
as well as change in non-backwards compatible ways on minor versions updates. Elastic will take a best effort
|
||||
approach to fix any issues, but experimental features are not subject to the support SLA of official GA features.]
|
||||
experimental[The ranking evaluation API is experimental and may be changed or removed completely in a future release, as well as change in non-backwards compatible ways on minor versions updates. Elastic will take a best effort approach to fix any issues, but experimental features are not subject to the support SLA of official GA features.]
|
||||
|
||||
The ranking evaluation API allows to evaluate the quality of ranked search
|
||||
results over a set of typical search queries. Given this set of queries and a
|
||||
@ -19,7 +17,7 @@ Users have a specific _information need_, e.g. they are looking for gift in a we
|
||||
They usually enters some search terms into a search box or some other web form.
|
||||
All of this information, together with meta information about the user (e.g. the browser, location, earlier preferences etc...) then gets translated into a query to the underlying search system.
|
||||
|
||||
The challenge for search engineers is to tweak this translation process from user entries to a concrete query in such a way, that the search results contain the most relevant information with respect to the users information_need.
|
||||
The challenge for search engineers is to tweak this translation process from user entries to a concrete query in such a way, that the search results contain the most relevant information with respect to the users information need.
|
||||
This can only be done if the search result quality is evaluated constantly across a representative test suite of typical user queries, so that improvements in the rankings for one particular query doesn't negatively effect the ranking for other types of queries.
|
||||
|
||||
In order to get started with search quality evaluation, three basic things are needed:
|
||||
@ -28,7 +26,7 @@ In order to get started with search quality evaluation, three basic things are n
|
||||
. a collection of typical search requests that users enter into your system
|
||||
. a set of document ratings that judge the documents relevance with respect to a search request+
|
||||
It is important to note that one set of document ratings is needed per test query, and that
|
||||
the relevance judgements are based on the _information_need_ of the user that entered the query.
|
||||
the relevance judgements are based on the information need of the user that entered the query.
|
||||
|
||||
The ranking evaluation API provides a convenient way to use this information in a ranking evaluation request to calculate different search evaluation metrics. This gives a first estimation of your overall search quality and give you a measurement to optimize against when fine-tuning various aspect of the query generation in your application.
|
||||
|
||||
|
@ -73,6 +73,19 @@ public final class Booleans {
|
||||
throw new IllegalArgumentException("Failed to parse value [" + value + "] as only [true] or [false] are allowed.");
|
||||
}
|
||||
|
||||
private static boolean hasText(CharSequence str) {
|
||||
if (str == null || str.length() == 0) {
|
||||
return false;
|
||||
}
|
||||
int strLen = str.length();
|
||||
for (int i = 0; i < strLen; i++) {
|
||||
if (!Character.isWhitespace(str.charAt(i))) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param value text to parse.
|
||||
@ -80,14 +93,14 @@ public final class Booleans {
|
||||
* @return see {@link #parseBoolean(String)}
|
||||
*/
|
||||
public static boolean parseBoolean(String value, boolean defaultValue) {
|
||||
if (Strings.hasText(value)) {
|
||||
if (hasText(value)) {
|
||||
return parseBoolean(value);
|
||||
}
|
||||
return defaultValue;
|
||||
}
|
||||
|
||||
public static Boolean parseBoolean(String value, Boolean defaultValue) {
|
||||
if (Strings.hasText(value)) {
|
||||
if (hasText(value)) {
|
||||
return parseBoolean(value);
|
||||
}
|
||||
return defaultValue;
|
@ -0,0 +1,70 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common;
|
||||
|
||||
/**
|
||||
* Utility class for glob-like matching
|
||||
*/
|
||||
public class Glob {
|
||||
|
||||
/**
|
||||
* Match a String against the given pattern, supporting the following simple
|
||||
* pattern styles: "xxx*", "*xxx", "*xxx*" and "xxx*yyy" matches (with an
|
||||
* arbitrary number of pattern parts), as well as direct equality.
|
||||
*
|
||||
* @param pattern the pattern to match against
|
||||
* @param str the String to match
|
||||
* @return whether the String matches the given pattern
|
||||
*/
|
||||
public static boolean globMatch(String pattern, String str) {
|
||||
if (pattern == null || str == null) {
|
||||
return false;
|
||||
}
|
||||
int firstIndex = pattern.indexOf('*');
|
||||
if (firstIndex == -1) {
|
||||
return pattern.equals(str);
|
||||
}
|
||||
if (firstIndex == 0) {
|
||||
if (pattern.length() == 1) {
|
||||
return true;
|
||||
}
|
||||
int nextIndex = pattern.indexOf('*', firstIndex + 1);
|
||||
if (nextIndex == -1) {
|
||||
return str.endsWith(pattern.substring(1));
|
||||
} else if (nextIndex == 1) {
|
||||
// Double wildcard "**" - skipping the first "*"
|
||||
return globMatch(pattern.substring(1), str);
|
||||
}
|
||||
String part = pattern.substring(1, nextIndex);
|
||||
int partIndex = str.indexOf(part);
|
||||
while (partIndex != -1) {
|
||||
if (globMatch(pattern.substring(nextIndex), str.substring(partIndex + part.length()))) {
|
||||
return true;
|
||||
}
|
||||
partIndex = str.indexOf(part, partIndex + 1);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
return (str.length() >= firstIndex &&
|
||||
pattern.substring(0, firstIndex).equals(str.substring(0, firstIndex)) &&
|
||||
globMatch(pattern.substring(firstIndex), str.substring(firstIndex)));
|
||||
}
|
||||
|
||||
}
|
85
libs/x-content/build.gradle
Normal file
85
libs/x-content/build.gradle
Normal file
@ -0,0 +1,85 @@
|
||||
import org.elasticsearch.gradle.precommit.PrecommitTasks
|
||||
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
apply plugin: 'elasticsearch.build'
|
||||
apply plugin: 'nebula.maven-base-publish'
|
||||
apply plugin: 'nebula.maven-scm'
|
||||
|
||||
archivesBaseName = 'elasticsearch-x-content'
|
||||
|
||||
publishing {
|
||||
publications {
|
||||
nebula {
|
||||
artifactId = archivesBaseName
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dependencies {
|
||||
compile "org.elasticsearch:elasticsearch-core:${version}"
|
||||
|
||||
compile "org.yaml:snakeyaml:${versions.snakeyaml}"
|
||||
compile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}"
|
||||
compile "com.fasterxml.jackson.dataformat:jackson-dataformat-smile:${versions.jackson}"
|
||||
compile "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:${versions.jackson}"
|
||||
compile "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}"
|
||||
|
||||
testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"
|
||||
testCompile "junit:junit:${versions.junit}"
|
||||
testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}"
|
||||
|
||||
if (isEclipse == false || project.path == ":libs:x-content-tests") {
|
||||
testCompile("org.elasticsearch.test:framework:${version}") {
|
||||
exclude group: 'org.elasticsearch', module: 'elasticsearch-x-content'
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
forbiddenApisMain {
|
||||
// x-content does not depend on server
|
||||
// TODO: Need to decide how we want to handle for forbidden signatures with the changes to core
|
||||
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')]
|
||||
}
|
||||
|
||||
if (isEclipse) {
|
||||
// in eclipse the project is under a fake root, we need to change around the source sets
|
||||
sourceSets {
|
||||
if (project.path == ":libs:x-content") {
|
||||
main.java.srcDirs = ['java']
|
||||
main.resources.srcDirs = ['resources']
|
||||
} else {
|
||||
test.java.srcDirs = ['java']
|
||||
test.resources.srcDirs = ['resources']
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
thirdPartyAudit.excludes = [
|
||||
// from com.fasterxml.jackson.dataformat.yaml.YAMLMapper (jackson-dataformat-yaml)
|
||||
'com.fasterxml.jackson.databind.ObjectMapper',
|
||||
]
|
||||
|
||||
dependencyLicenses {
|
||||
mapping from: /jackson-.*/, to: 'jackson'
|
||||
}
|
||||
|
||||
jarHell.enabled = false
|
3
libs/x-content/src/main/eclipse-build.gradle
Normal file
3
libs/x-content/src/main/eclipse-build.gradle
Normal file
@ -0,0 +1,3 @@
|
||||
|
||||
// this is just shell gradle file for eclipse to have separate projects for secure-sm src and tests
|
||||
apply from: '../../build.gradle'
|
@ -35,6 +35,8 @@ public class ParseField {
|
||||
private String allReplacedWith = null;
|
||||
private final String[] allNames;
|
||||
|
||||
private static final String[] EMPTY = new String[0];
|
||||
|
||||
/**
|
||||
* @param name
|
||||
* the primary name for this field. This will be returned by
|
||||
@ -46,7 +48,7 @@ public class ParseField {
|
||||
public ParseField(String name, String... deprecatedNames) {
|
||||
this.name = name;
|
||||
if (deprecatedNames == null || deprecatedNames.length == 0) {
|
||||
this.deprecatedNames = Strings.EMPTY_ARRAY;
|
||||
this.deprecatedNames = EMPTY;
|
||||
} else {
|
||||
final HashSet<String> set = new HashSet<>();
|
||||
Collections.addAll(set, deprecatedNames);
|
@ -19,6 +19,8 @@
|
||||
|
||||
package org.elasticsearch.common.xcontent;
|
||||
|
||||
import org.elasticsearch.common.Booleans;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
@ -19,6 +19,8 @@
|
||||
|
||||
package org.elasticsearch.common.xcontent;
|
||||
|
||||
import org.elasticsearch.common.Booleans;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
@ -19,8 +19,6 @@
|
||||
|
||||
package org.elasticsearch.common.xcontent;
|
||||
|
||||
import org.elasticsearch.common.util.CollectionUtils;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.Closeable;
|
||||
import java.io.Flushable;
|
||||
@ -35,6 +33,7 @@ import java.util.Collections;
|
||||
import java.util.Date;
|
||||
import java.util.GregorianCalendar;
|
||||
import java.util.HashMap;
|
||||
import java.util.IdentityHashMap;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
@ -740,7 +739,9 @@ public final class XContentBuilder implements Closeable, Flushable {
|
||||
//Path implements Iterable<Path> and causes endless recursion and a StackOverFlow if treated as an Iterable here
|
||||
value((Path) value);
|
||||
} else if (value instanceof Map) {
|
||||
map((Map<String,?>) value, ensureNoSelfReferences);
|
||||
@SuppressWarnings("unchecked")
|
||||
final Map<String, ?> valueMap = (Map<String, ?>) value;
|
||||
map(valueMap, ensureNoSelfReferences);
|
||||
} else if (value instanceof Iterable) {
|
||||
value((Iterable<?>) value, ensureNoSelfReferences);
|
||||
} else if (value instanceof Object[]) {
|
||||
@ -799,7 +800,7 @@ public final class XContentBuilder implements Closeable, Flushable {
|
||||
// checks that the map does not contain references to itself because
|
||||
// iterating over map entries will cause a stackoverflow error
|
||||
if (ensureNoSelfReferences) {
|
||||
CollectionUtils.ensureNoSelfReferences(values);
|
||||
ensureNoSelfReferences(values);
|
||||
}
|
||||
|
||||
startObject();
|
||||
@ -828,7 +829,7 @@ public final class XContentBuilder implements Closeable, Flushable {
|
||||
// checks that the iterable does not contain references to itself because
|
||||
// iterating over entries will cause a stackoverflow error
|
||||
if (ensureNoSelfReferences) {
|
||||
CollectionUtils.ensureNoSelfReferences(values);
|
||||
ensureNoSelfReferences(values);
|
||||
}
|
||||
startArray();
|
||||
for (Object value : values) {
|
||||
@ -937,4 +938,39 @@ public final class XContentBuilder implements Closeable, Flushable {
|
||||
throw new IllegalArgumentException(message);
|
||||
}
|
||||
}
|
||||
|
||||
private static void ensureNoSelfReferences(Object value) {
|
||||
Iterable<?> it = convert(value);
|
||||
if (it != null) {
|
||||
ensureNoSelfReferences(it, value, Collections.newSetFromMap(new IdentityHashMap<>()));
|
||||
}
|
||||
}
|
||||
|
||||
private static Iterable<?> convert(Object value) {
|
||||
if (value == null) {
|
||||
return null;
|
||||
}
|
||||
if (value instanceof Map) {
|
||||
return ((Map<?,?>) value).values();
|
||||
} else if ((value instanceof Iterable) && (value instanceof Path == false)) {
|
||||
return (Iterable<?>) value;
|
||||
} else if (value instanceof Object[]) {
|
||||
return Arrays.asList((Object[]) value);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private static void ensureNoSelfReferences(final Iterable<?> value, Object originalReference, final Set<Object> ancestors) {
|
||||
if (value != null) {
|
||||
if (ancestors.add(originalReference) == false) {
|
||||
throw new IllegalArgumentException("Iterable object is self-referencing itself");
|
||||
}
|
||||
for (Object o : value) {
|
||||
ensureNoSelfReferences(convert(o), o, ancestors);
|
||||
}
|
||||
ancestors.remove(originalReference);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -21,7 +21,6 @@ package org.elasticsearch.common.xcontent;
|
||||
|
||||
import com.fasterxml.jackson.dataformat.cbor.CBORConstants;
|
||||
import com.fasterxml.jackson.dataformat.smile.SmileConstants;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.xcontent.cbor.CborXContent;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.common.xcontent.smile.SmileXContent;
|
||||
@ -154,7 +153,8 @@ public class XContentFactory {
|
||||
return XContentType.JSON;
|
||||
}
|
||||
// Should we throw a failure here? Smile idea is to use it in bytes....
|
||||
if (length > 2 && first == SmileConstants.HEADER_BYTE_1 && content.charAt(1) == SmileConstants.HEADER_BYTE_2 && content.charAt(2) == SmileConstants.HEADER_BYTE_3) {
|
||||
if (length > 2 && first == SmileConstants.HEADER_BYTE_1 && content.charAt(1) == SmileConstants.HEADER_BYTE_2 &&
|
||||
content.charAt(2) == SmileConstants.HEADER_BYTE_3) {
|
||||
return XContentType.SMILE;
|
||||
}
|
||||
if (length > 2 && first == '-' && content.charAt(1) == '-' && content.charAt(2) == '-') {
|
||||
@ -186,7 +186,7 @@ public class XContentFactory {
|
||||
public static XContent xContent(CharSequence content) {
|
||||
XContentType type = xContentType(content);
|
||||
if (type == null) {
|
||||
throw new ElasticsearchParseException("Failed to derive xcontent");
|
||||
throw new XContentParseException("Failed to derive xcontent");
|
||||
}
|
||||
return xContent(type);
|
||||
}
|
||||
@ -213,7 +213,7 @@ public class XContentFactory {
|
||||
public static XContent xContent(byte[] data, int offset, int length) {
|
||||
XContentType type = xContentType(data, offset, length);
|
||||
if (type == null) {
|
||||
throw new ElasticsearchParseException("Failed to derive xcontent");
|
||||
throw new XContentParseException("Failed to derive xcontent");
|
||||
}
|
||||
return xContent(type);
|
||||
}
|
||||
@ -278,7 +278,8 @@ public class XContentFactory {
|
||||
if (first == '{') {
|
||||
return XContentType.JSON;
|
||||
}
|
||||
if (length > 2 && first == SmileConstants.HEADER_BYTE_1 && bytes[offset + 1] == SmileConstants.HEADER_BYTE_2 && bytes[offset + 2] == SmileConstants.HEADER_BYTE_3) {
|
||||
if (length > 2 && first == SmileConstants.HEADER_BYTE_1 && bytes[offset + 1] == SmileConstants.HEADER_BYTE_2 &&
|
||||
bytes[offset + 2] == SmileConstants.HEADER_BYTE_3) {
|
||||
return XContentType.SMILE;
|
||||
}
|
||||
if (length > 2 && first == '-' && bytes[offset + 1] == '-' && bytes[offset + 2] == '-') {
|
@ -103,6 +103,57 @@ public interface XContentGenerator extends Closeable, Flushable {
|
||||
|
||||
void copyCurrentStructure(XContentParser parser) throws IOException;
|
||||
|
||||
default void copyCurrentEvent(XContentParser parser) throws IOException {
|
||||
switch (parser.currentToken()) {
|
||||
case START_OBJECT:
|
||||
writeStartObject();
|
||||
break;
|
||||
case END_OBJECT:
|
||||
writeEndObject();
|
||||
break;
|
||||
case START_ARRAY:
|
||||
writeStartArray();
|
||||
break;
|
||||
case END_ARRAY:
|
||||
writeEndArray();
|
||||
break;
|
||||
case FIELD_NAME:
|
||||
writeFieldName(parser.currentName());
|
||||
break;
|
||||
case VALUE_STRING:
|
||||
if (parser.hasTextCharacters()) {
|
||||
writeString(parser.textCharacters(), parser.textOffset(), parser.textLength());
|
||||
} else {
|
||||
writeString(parser.text());
|
||||
}
|
||||
break;
|
||||
case VALUE_NUMBER:
|
||||
switch (parser.numberType()) {
|
||||
case INT:
|
||||
writeNumber(parser.intValue());
|
||||
break;
|
||||
case LONG:
|
||||
writeNumber(parser.longValue());
|
||||
break;
|
||||
case FLOAT:
|
||||
writeNumber(parser.floatValue());
|
||||
break;
|
||||
case DOUBLE:
|
||||
writeNumber(parser.doubleValue());
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case VALUE_BOOLEAN:
|
||||
writeBoolean(parser.booleanValue());
|
||||
break;
|
||||
case VALUE_NULL:
|
||||
writeNull();
|
||||
break;
|
||||
case VALUE_EMBEDDED_OBJECT:
|
||||
writeBinary(parser.binaryValue());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns {@code true} if this XContentGenerator has been closed. A closed generator can not do any more output.
|
||||
*/
|
@ -23,12 +23,12 @@ import com.fasterxml.jackson.core.JsonEncoding;
|
||||
import com.fasterxml.jackson.core.JsonGenerator;
|
||||
import com.fasterxml.jackson.core.JsonParser;
|
||||
import com.fasterxml.jackson.dataformat.cbor.CBORFactory;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.xcontent.DeprecationHandler;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.XContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentGenerator;
|
||||
import org.elasticsearch.common.xcontent.XContentParseException;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
|
||||
@ -70,7 +70,7 @@ public class CborXContent implements XContent {
|
||||
|
||||
@Override
|
||||
public byte streamSeparator() {
|
||||
throw new ElasticsearchParseException("cbor does not support stream parsing...");
|
||||
throw new XContentParseException("cbor does not support stream parsing...");
|
||||
}
|
||||
|
||||
@Override
|
@ -28,16 +28,15 @@ import com.fasterxml.jackson.core.json.JsonWriteContext;
|
||||
import com.fasterxml.jackson.core.util.DefaultIndenter;
|
||||
import com.fasterxml.jackson.core.util.DefaultPrettyPrinter;
|
||||
import com.fasterxml.jackson.core.util.JsonGeneratorDelegate;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.common.xcontent.DeprecationHandler;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.XContent;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentGenerator;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.common.xcontent.support.filtering.FilterPathBasedFilter;
|
||||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
|
||||
import java.io.BufferedInputStream;
|
||||
import java.io.IOException;
|
||||
@ -325,7 +324,7 @@ public class JsonXContentGenerator implements XContentGenerator {
|
||||
} else {
|
||||
writeStartRaw(name);
|
||||
flush();
|
||||
Streams.copy(content, os);
|
||||
copyStream(content, os);
|
||||
writeEndRaw();
|
||||
}
|
||||
}
|
||||
@ -393,7 +392,40 @@ public class JsonXContentGenerator implements XContentGenerator {
|
||||
if (parser instanceof JsonXContentParser) {
|
||||
generator.copyCurrentStructure(((JsonXContentParser) parser).parser);
|
||||
} else {
|
||||
XContentHelper.copyCurrentStructure(this, parser);
|
||||
copyCurrentStructure(this, parser);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Low level implementation detail of {@link XContentGenerator#copyCurrentStructure(XContentParser)}.
|
||||
*/
|
||||
private static void copyCurrentStructure(XContentGenerator destination, XContentParser parser) throws IOException {
|
||||
XContentParser.Token token = parser.currentToken();
|
||||
|
||||
// Let's handle field-name separately first
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
destination.writeFieldName(parser.currentName());
|
||||
token = parser.nextToken();
|
||||
// fall-through to copy the associated value
|
||||
}
|
||||
|
||||
switch (token) {
|
||||
case START_ARRAY:
|
||||
destination.writeStartArray();
|
||||
while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
|
||||
copyCurrentStructure(destination, parser);
|
||||
}
|
||||
destination.writeEndArray();
|
||||
break;
|
||||
case START_OBJECT:
|
||||
destination.writeStartObject();
|
||||
while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
|
||||
copyCurrentStructure(destination, parser);
|
||||
}
|
||||
destination.writeEndObject();
|
||||
break;
|
||||
default: // others are simple:
|
||||
destination.copyCurrentEvent(parser);
|
||||
}
|
||||
}
|
||||
|
||||
@ -423,4 +455,37 @@ public class JsonXContentGenerator implements XContentGenerator {
|
||||
public boolean isClosed() {
|
||||
return generator.isClosed();
|
||||
}
|
||||
|
||||
/**
|
||||
* Copy the contents of the given InputStream to the given OutputStream.
|
||||
* Closes both streams when done.
|
||||
*
|
||||
* @param in the stream to copy from
|
||||
* @param out the stream to copy to
|
||||
* @return the number of bytes copied
|
||||
* @throws IOException in case of I/O errors
|
||||
*/
|
||||
private static long copyStream(InputStream in, OutputStream out) throws IOException {
|
||||
Objects.requireNonNull(in, "No InputStream specified");
|
||||
Objects.requireNonNull(out, "No OutputStream specified");
|
||||
final byte[] buffer = new byte[8192];
|
||||
boolean success = false;
|
||||
try {
|
||||
long byteCount = 0;
|
||||
int bytesRead;
|
||||
while ((bytesRead = in.read(buffer)) != -1) {
|
||||
out.write(buffer, 0, bytesRead);
|
||||
byteCount += bytesRead;
|
||||
}
|
||||
out.flush();
|
||||
success = true;
|
||||
return byteCount;
|
||||
} finally {
|
||||
if (success) {
|
||||
IOUtils.close(in, out);
|
||||
} else {
|
||||
IOUtils.closeWhileHandlingException(in, out);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -53,7 +53,8 @@ public class SmileXContent implements XContent {
|
||||
|
||||
static {
|
||||
smileFactory = new SmileFactory();
|
||||
smileFactory.configure(SmileGenerator.Feature.ENCODE_BINARY_AS_7BIT, false); // for now, this is an overhead, might make sense for web sockets
|
||||
// for now, this is an overhead, might make sense for web sockets
|
||||
smileFactory.configure(SmileGenerator.Feature.ENCODE_BINARY_AS_7BIT, false);
|
||||
smileFactory.configure(SmileFactory.Feature.FAIL_ON_SYMBOL_HASH_OVERFLOW, false); // this trips on many mappings now...
|
||||
// Do not automatically close unclosed objects/arrays in com.fasterxml.jackson.dataformat.smile.SmileGenerator#close() method
|
||||
smileFactory.configure(JsonGenerator.Feature.AUTO_CLOSE_JSON_CONTENT, false);
|
@ -19,14 +19,15 @@
|
||||
|
||||
package org.elasticsearch.common.xcontent.support;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.common.Numbers;
|
||||
import org.elasticsearch.common.xcontent.DeprecationHandler;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.XContentParseException;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.math.BigDecimal;
|
||||
import java.math.BigInteger;
|
||||
import java.nio.CharBuffer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
@ -178,6 +179,34 @@ public abstract class AbstractXContentParser implements XContentParser {
|
||||
|
||||
protected abstract int doIntValue() throws IOException;
|
||||
|
||||
/** Return the long that {@code stringValue} stores or throws an exception if the
|
||||
* stored value cannot be converted to a long that stores the exact same
|
||||
* value and {@code coerce} is false. */
|
||||
private static long toLong(String stringValue, boolean coerce) {
|
||||
try {
|
||||
return Long.parseLong(stringValue);
|
||||
} catch (NumberFormatException e) {
|
||||
// we will try again with BigDecimal
|
||||
}
|
||||
|
||||
final BigInteger bigIntegerValue;
|
||||
try {
|
||||
BigDecimal bigDecimalValue = new BigDecimal(stringValue);
|
||||
bigIntegerValue = coerce ? bigDecimalValue.toBigInteger() : bigDecimalValue.toBigIntegerExact();
|
||||
} catch (ArithmeticException e) {
|
||||
throw new IllegalArgumentException("Value [" + stringValue + "] has a decimal part");
|
||||
} catch (NumberFormatException e) {
|
||||
throw new IllegalArgumentException("For input string: \"" + stringValue + "\"");
|
||||
}
|
||||
|
||||
if (bigIntegerValue.compareTo(BigInteger.valueOf(Long.MAX_VALUE)) > 0 ||
|
||||
bigIntegerValue.compareTo(BigInteger.valueOf(Long.MIN_VALUE)) < 0) {
|
||||
throw new IllegalArgumentException("Value [" + stringValue + "] is out of range for a long");
|
||||
}
|
||||
|
||||
return bigIntegerValue.longValue();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long longValue() throws IOException {
|
||||
return longValue(DEFAULT_NUMBER_COERCE_POLICY);
|
||||
@ -188,7 +217,7 @@ public abstract class AbstractXContentParser implements XContentParser {
|
||||
Token token = currentToken();
|
||||
if (token == Token.VALUE_STRING) {
|
||||
checkCoerceString(coerce, Long.class);
|
||||
return Numbers.toLong(text(), coerce);
|
||||
return toLong(text(), coerce);
|
||||
}
|
||||
long result = doLongValue();
|
||||
ensureNumberConversion(coerce, result, Long.class);
|
||||
@ -369,7 +398,7 @@ public abstract class AbstractXContentParser implements XContentParser {
|
||||
if (token == XContentParser.Token.START_ARRAY) {
|
||||
token = parser.nextToken();
|
||||
} else {
|
||||
throw new ElasticsearchParseException("Failed to parse list: expecting "
|
||||
throw new XContentParseException(parser.getTokenLocation(), "Failed to parse list: expecting "
|
||||
+ XContentParser.Token.START_ARRAY + " but got " + token);
|
||||
}
|
||||
|
@ -20,7 +20,7 @@
|
||||
|
||||
package org.elasticsearch.common.xcontent.support.filtering;
|
||||
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.Glob;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
@ -49,7 +49,7 @@ public class FilterPath {
|
||||
}
|
||||
|
||||
public FilterPath matchProperty(String name) {
|
||||
if ((next != null) && (simpleWildcard || doubleWildcard || Regex.simpleMatch(segment, name))) {
|
||||
if ((next != null) && (simpleWildcard || doubleWildcard || Glob.globMatch(segment, name))) {
|
||||
return next;
|
||||
}
|
||||
return null;
|
@ -20,7 +20,6 @@
|
||||
package org.elasticsearch.common.xcontent.support.filtering;
|
||||
|
||||
import com.fasterxml.jackson.core.filter.TokenFilter;
|
||||
import org.elasticsearch.common.util.CollectionUtils;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
@ -47,7 +46,7 @@ public class FilterPathBasedFilter extends TokenFilter {
|
||||
private final boolean inclusive;
|
||||
|
||||
public FilterPathBasedFilter(FilterPath[] filters, boolean inclusive) {
|
||||
if (CollectionUtils.isEmpty(filters)) {
|
||||
if (filters == null || filters.length == 0) {
|
||||
throw new IllegalArgumentException("filters cannot be null or empty");
|
||||
}
|
||||
this.inclusive = inclusive;
|
7
libs/x-content/src/test/eclipse-build.gradle
Normal file
7
libs/x-content/src/test/eclipse-build.gradle
Normal file
@ -0,0 +1,7 @@
|
||||
|
||||
// this is just shell gradle file for eclipse to have separate projects for secure-sm src and tests
|
||||
apply from: '../../build.gradle'
|
||||
|
||||
dependencies {
|
||||
testCompile project(':libs:x-content')
|
||||
}
|
@ -123,7 +123,7 @@ public class XContentParserTests extends ESTestCase {
|
||||
readList(source);
|
||||
fail("should have thrown a parse exception");
|
||||
} catch (Exception e) {
|
||||
assertThat(e, instanceOf(ElasticsearchParseException.class));
|
||||
assertThat(e, instanceOf(XContentParseException.class));
|
||||
assertThat(e.getMessage(), containsString("Failed to parse list"));
|
||||
}
|
||||
}
|
@ -25,8 +25,9 @@ import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.analysis.AbstractTokenFilterFactory;
|
||||
import org.elasticsearch.index.analysis.MultiTermAwareComponent;
|
||||
|
||||
public class TrimTokenFilterFactory extends AbstractTokenFilterFactory {
|
||||
public class TrimTokenFilterFactory extends AbstractTokenFilterFactory implements MultiTermAwareComponent {
|
||||
|
||||
private static final String UPDATE_OFFSETS_KEY = "update_offsets";
|
||||
|
||||
@ -41,4 +42,9 @@ public class TrimTokenFilterFactory extends AbstractTokenFilterFactory {
|
||||
public TokenStream create(TokenStream tokenStream) {
|
||||
return new TrimFilter(tokenStream);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object getMultiTermComponent() {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
@ -1 +0,0 @@
|
||||
51fbb33cdb17bb36a0e86485685bba18eb1c2ccf
|
@ -0,0 +1 @@
|
||||
38ff5a1f4bcbfb6e1ffacd3263175c2a1ba23e9f
|
@ -27,9 +27,9 @@ import java.util.Objects;
|
||||
public final class Location {
|
||||
private final String sourceName;
|
||||
private final int offset;
|
||||
|
||||
|
||||
/**
|
||||
* Create a new Location
|
||||
* Create a new Location
|
||||
* @param sourceName script's name
|
||||
* @param offset character offset of script element
|
||||
*/
|
||||
@ -37,7 +37,7 @@ public final class Location {
|
||||
this.sourceName = Objects.requireNonNull(sourceName);
|
||||
this.offset = offset;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Return the script's name
|
||||
*/
|
||||
@ -68,43 +68,31 @@ public final class Location {
|
||||
|
||||
// This maximum length is theoretically 65535 bytes, but as it's CESU-8 encoded we don't know how large it is in bytes, so be safe
|
||||
private static final int MAX_NAME_LENGTH = 256;
|
||||
|
||||
|
||||
/** Computes the file name (mostly important for stacktraces) */
|
||||
public static String computeSourceName(String scriptName, String source) {
|
||||
public static String computeSourceName(String scriptName) {
|
||||
StringBuilder fileName = new StringBuilder();
|
||||
if (scriptName.equals(PainlessScriptEngine.INLINE_NAME)) {
|
||||
// its an anonymous script, include at least a portion of the source to help identify which one it is
|
||||
// but don't create stacktraces with filenames that contain newlines or huge names.
|
||||
// its an anonymous script, include at least a portion of the source to help identify which one it is
|
||||
// but don't create stacktraces with filenames that contain newlines or huge names.
|
||||
|
||||
// truncate to the first newline
|
||||
int limit = source.indexOf('\n');
|
||||
if (limit >= 0) {
|
||||
int limit2 = source.indexOf('\r');
|
||||
if (limit2 >= 0) {
|
||||
limit = Math.min(limit, limit2);
|
||||
}
|
||||
} else {
|
||||
limit = source.length();
|
||||
// truncate to the first newline
|
||||
int limit = scriptName.indexOf('\n');
|
||||
if (limit >= 0) {
|
||||
int limit2 = scriptName.indexOf('\r');
|
||||
if (limit2 >= 0) {
|
||||
limit = Math.min(limit, limit2);
|
||||
}
|
||||
|
||||
// truncate to our limit
|
||||
limit = Math.min(limit, MAX_NAME_LENGTH);
|
||||
fileName.append(source, 0, limit);
|
||||
|
||||
// if we truncated, make it obvious
|
||||
if (limit != source.length()) {
|
||||
fileName.append(" ...");
|
||||
}
|
||||
fileName.append(" @ <inline script>");
|
||||
} else {
|
||||
// its a named script, just use the name
|
||||
// but don't trust this has a reasonable length!
|
||||
if (scriptName.length() > MAX_NAME_LENGTH) {
|
||||
fileName.append(scriptName, 0, MAX_NAME_LENGTH);
|
||||
fileName.append(" ...");
|
||||
} else {
|
||||
fileName.append(scriptName);
|
||||
}
|
||||
limit = scriptName.length();
|
||||
}
|
||||
|
||||
// truncate to our limit
|
||||
limit = Math.min(limit, MAX_NAME_LENGTH);
|
||||
fileName.append(scriptName, 0, limit);
|
||||
|
||||
// if we truncated, make it obvious
|
||||
if (limit != scriptName.length()) {
|
||||
fileName.append(" ...");
|
||||
}
|
||||
return fileName.toString();
|
||||
}
|
||||
|
@ -91,14 +91,7 @@ public interface PainlessScript {
|
||||
scriptStack.add(element.toString());
|
||||
}
|
||||
}
|
||||
// build a name for the script:
|
||||
final String name;
|
||||
if (PainlessScriptEngine.INLINE_NAME.equals(getName())) {
|
||||
name = getSource();
|
||||
} else {
|
||||
name = getName();
|
||||
}
|
||||
ScriptException scriptException = new ScriptException("runtime error", t, scriptStack, name, PainlessScriptEngine.NAME);
|
||||
ScriptException scriptException = new ScriptException("runtime error", t, scriptStack, getName(), PainlessScriptEngine.NAME);
|
||||
for (Map.Entry<String, List<String>> entry : extraMetadata.entrySet()) {
|
||||
scriptException.addMetadata(entry.getKey(), entry.getValue());
|
||||
}
|
||||
|
@ -119,11 +119,6 @@ public final class PainlessScriptEngine extends AbstractComponent implements Scr
|
||||
return NAME;
|
||||
}
|
||||
|
||||
/**
|
||||
* When a script is anonymous (inline), we give it this name.
|
||||
*/
|
||||
static final String INLINE_NAME = "<inline>";
|
||||
|
||||
@Override
|
||||
public <T> T compile(String scriptName, String scriptSource, ScriptContext<T> context, Map<String, String> params) {
|
||||
Compiler compiler = contextsToCompilers.get(context);
|
||||
@ -425,7 +420,7 @@ public final class PainlessScriptEngine extends AbstractComponent implements Scr
|
||||
return AccessController.doPrivileged(new PrivilegedAction<Object>() {
|
||||
@Override
|
||||
public Object run() {
|
||||
String name = scriptName == null ? INLINE_NAME : scriptName;
|
||||
String name = scriptName == null ? source : scriptName;
|
||||
Constructor<?> constructor = compiler.compile(loader, new MainMethodReserved(), name, source, compilerSettings);
|
||||
|
||||
try {
|
||||
@ -488,7 +483,7 @@ public final class PainlessScriptEngine extends AbstractComponent implements Scr
|
||||
AccessController.doPrivileged(new PrivilegedAction<Void>() {
|
||||
@Override
|
||||
public Void run() {
|
||||
String name = scriptName == null ? INLINE_NAME : scriptName;
|
||||
String name = scriptName == null ? source : scriptName;
|
||||
compiler.compile(loader, reserved, name, source, compilerSettings);
|
||||
|
||||
return null;
|
||||
|
@ -198,7 +198,7 @@ public final class Walker extends PainlessParserBaseVisitor<ANode> {
|
||||
this.reserved.push(reserved);
|
||||
this.debugStream = debugStream;
|
||||
this.settings = settings;
|
||||
this.sourceName = Location.computeSourceName(sourceName, sourceText);
|
||||
this.sourceName = Location.computeSourceName(sourceName);
|
||||
this.sourceText = sourceText;
|
||||
this.globals = new Globals(new BitSet(sourceText.length()));
|
||||
this.definition = definition;
|
||||
|
@ -249,7 +249,7 @@ public final class SSource extends AStatement {
|
||||
}
|
||||
visitor.visit(WriterConstants.CLASS_VERSION, classAccess, className, null,
|
||||
Type.getType(scriptClassInfo.getBaseClass()).getInternalName(), classInterfaces);
|
||||
visitor.visitSource(Location.computeSourceName(name, source), null);
|
||||
visitor.visitSource(Location.computeSourceName(name), null);
|
||||
|
||||
// Write the a method to bootstrap def calls
|
||||
MethodWriter bootstrapDef = new MethodWriter(Opcodes.ACC_STATIC | Opcodes.ACC_VARARGS, DEF_BOOTSTRAP_METHOD, visitor,
|
||||
|
@ -97,7 +97,7 @@ public class HasChildQueryBuilderTests extends AbstractQueryTestCase<HasChildQue
|
||||
|
||||
@Override
|
||||
protected void initializeAdditionalMappings(MapperService mapperService) throws IOException {
|
||||
similarity = randomFrom("classic", "BM25");
|
||||
similarity = randomFrom("boolean", "BM25");
|
||||
XContentBuilder mapping = jsonBuilder().startObject().startObject("_doc").startObject("properties")
|
||||
.startObject("join_field")
|
||||
.field("type", "join")
|
||||
@ -336,9 +336,7 @@ public class HasChildQueryBuilderTests extends AbstractQueryTestCase<HasChildQue
|
||||
hasChildQuery(CHILD_DOC, new TermQueryBuilder("custom_string", "value"), ScoreMode.None);
|
||||
HasChildQueryBuilder.LateParsingQuery query = (HasChildQueryBuilder.LateParsingQuery) hasChildQueryBuilder.toQuery(shardContext);
|
||||
Similarity expected = SimilarityService.BUILT_IN.get(similarity)
|
||||
.create(similarity, Settings.EMPTY,
|
||||
Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(), null)
|
||||
.get();
|
||||
.apply(Settings.EMPTY, Version.CURRENT, null);
|
||||
assertThat(((PerFieldSimilarityWrapper) query.getSimilarity()).get("custom_string"), instanceOf(expected.getClass()));
|
||||
}
|
||||
|
||||
|
@ -87,7 +87,7 @@ public class LegacyHasChildQueryBuilderTests extends AbstractQueryTestCase<HasCh
|
||||
|
||||
@Override
|
||||
protected void initializeAdditionalMappings(MapperService mapperService) throws IOException {
|
||||
similarity = randomFrom("classic", "BM25");
|
||||
similarity = randomFrom("boolean", "BM25");
|
||||
// TODO: use a single type when inner hits have been changed to work with join field,
|
||||
// this test randomly generates queries with inner hits
|
||||
mapperService.merge(PARENT_TYPE, new CompressedXContent(Strings.toString(PutMappingRequest.buildFromSimplifiedDef(PARENT_TYPE,
|
||||
@ -323,9 +323,7 @@ public class LegacyHasChildQueryBuilderTests extends AbstractQueryTestCase<HasCh
|
||||
hasChildQuery(CHILD_TYPE, new TermQueryBuilder("custom_string", "value"), ScoreMode.None);
|
||||
HasChildQueryBuilder.LateParsingQuery query = (HasChildQueryBuilder.LateParsingQuery) hasChildQueryBuilder.toQuery(shardContext);
|
||||
Similarity expected = SimilarityService.BUILT_IN.get(similarity)
|
||||
.create(similarity, Settings.EMPTY,
|
||||
Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(), null)
|
||||
.get();
|
||||
.apply(Settings.EMPTY, Version.CURRENT, null);
|
||||
assertThat(((PerFieldSimilarityWrapper) query.getSimilarity()).get("custom_string"), instanceOf(expected.getClass()));
|
||||
}
|
||||
|
||||
|
@ -349,7 +349,7 @@ public class PercolateQueryBuilder extends AbstractQueryBuilder<PercolateQueryBu
|
||||
try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY,
|
||||
LoggingDeprecationHandler.INSTANCE, document)) {
|
||||
parser.nextToken();
|
||||
XContentHelper.copyCurrentStructure(builder.generator(), parser);
|
||||
builder.generator().copyCurrentStructure(parser);
|
||||
}
|
||||
}
|
||||
builder.endArray();
|
||||
|
@ -143,7 +143,7 @@ final class QueryAnalyzer {
|
||||
}
|
||||
|
||||
private static BiFunction<Query, Version, Result> matchNoDocsQuery() {
|
||||
return (query, version) -> new Result(true, Collections.emptySet(), 1);
|
||||
return (query, version) -> new Result(true, Collections.emptySet(), 0);
|
||||
}
|
||||
|
||||
private static BiFunction<Query, Version, Result> matchAllDocsQuery() {
|
||||
@ -179,28 +179,28 @@ final class QueryAnalyzer {
|
||||
for (BytesRef term = iterator.next(); term != null; term = iterator.next()) {
|
||||
terms.add(new QueryExtraction(new Term(iterator.field(), term)));
|
||||
}
|
||||
return new Result(true, terms, 1);
|
||||
return new Result(true, terms, Math.min(1, terms.size()));
|
||||
};
|
||||
}
|
||||
|
||||
private static BiFunction<Query, Version, Result> synonymQuery() {
|
||||
return (query, version) -> {
|
||||
Set<QueryExtraction> terms = ((SynonymQuery) query).getTerms().stream().map(QueryExtraction::new).collect(toSet());
|
||||
return new Result(true, terms, 1);
|
||||
return new Result(true, terms, Math.min(1, terms.size()));
|
||||
};
|
||||
}
|
||||
|
||||
private static BiFunction<Query, Version, Result> commonTermsQuery() {
|
||||
return (query, version) -> {
|
||||
Set<QueryExtraction> terms = ((CommonTermsQuery) query).getTerms().stream().map(QueryExtraction::new).collect(toSet());
|
||||
return new Result(false, terms, 1);
|
||||
return new Result(false, terms, Math.min(1, terms.size()));
|
||||
};
|
||||
}
|
||||
|
||||
private static BiFunction<Query, Version, Result> blendedTermQuery() {
|
||||
return (query, version) -> {
|
||||
Set<QueryExtraction> terms = ((BlendedTermQuery) query).getTerms().stream().map(QueryExtraction::new).collect(toSet());
|
||||
return new Result(true, terms, 1);
|
||||
return new Result(true, terms, Math.min(1, terms.size()));
|
||||
};
|
||||
}
|
||||
|
||||
@ -208,7 +208,7 @@ final class QueryAnalyzer {
|
||||
return (query, version) -> {
|
||||
Term[] terms = ((PhraseQuery) query).getTerms();
|
||||
if (terms.length == 0) {
|
||||
return new Result(true, Collections.emptySet(), 1);
|
||||
return new Result(true, Collections.emptySet(), 0);
|
||||
}
|
||||
|
||||
if (version.onOrAfter(Version.V_6_1_0)) {
|
||||
@ -232,7 +232,7 @@ final class QueryAnalyzer {
|
||||
return (query, version) -> {
|
||||
Term[][] terms = ((MultiPhraseQuery) query).getTermArrays();
|
||||
if (terms.length == 0) {
|
||||
return new Result(true, Collections.emptySet(), 1);
|
||||
return new Result(true, Collections.emptySet(), 0);
|
||||
}
|
||||
|
||||
if (version.onOrAfter(Version.V_6_1_0)) {
|
||||
@ -297,7 +297,7 @@ final class QueryAnalyzer {
|
||||
for (SpanQuery clause : spanOrQuery.getClauses()) {
|
||||
terms.addAll(analyze(clause, version).extractions);
|
||||
}
|
||||
return new Result(false, terms, 1);
|
||||
return new Result(false, terms, Math.min(1, terms.size()));
|
||||
};
|
||||
}
|
||||
|
||||
@ -334,6 +334,9 @@ final class QueryAnalyzer {
|
||||
numOptionalClauses++;
|
||||
}
|
||||
}
|
||||
if (minimumShouldMatch > numOptionalClauses) {
|
||||
return new Result(false, Collections.emptySet(), 0);
|
||||
}
|
||||
if (numRequiredClauses > 0) {
|
||||
if (version.onOrAfter(Version.V_6_1_0)) {
|
||||
UnsupportedQueryException uqe = null;
|
||||
@ -345,7 +348,12 @@ final class QueryAnalyzer {
|
||||
// since they are completely optional.
|
||||
|
||||
try {
|
||||
results.add(analyze(clause.getQuery(), version));
|
||||
Result subResult = analyze(clause.getQuery(), version);
|
||||
if (subResult.matchAllDocs == false && subResult.extractions.isEmpty()) {
|
||||
// doesn't match anything
|
||||
return subResult;
|
||||
}
|
||||
results.add(subResult);
|
||||
} catch (UnsupportedQueryException e) {
|
||||
uqe = e;
|
||||
}
|
||||
@ -400,7 +408,11 @@ final class QueryAnalyzer {
|
||||
}
|
||||
msm += resultMsm;
|
||||
|
||||
verified &= result.verified;
|
||||
if (result.verified == false
|
||||
// If some inner extractions are optional, the result can't be verified
|
||||
|| result.minimumShouldMatch < result.extractions.size()) {
|
||||
verified = false;
|
||||
}
|
||||
matchAllDocs &= result.matchAllDocs;
|
||||
extractions.addAll(result.extractions);
|
||||
}
|
||||
@ -492,7 +504,7 @@ final class QueryAnalyzer {
|
||||
// Need to check whether upper is not smaller than lower, otherwise NumericUtils.subtract(...) fails IAE
|
||||
// If upper is really smaller than lower then we deal with like MatchNoDocsQuery. (verified and no extractions)
|
||||
if (new BytesRef(lowerPoint).compareTo(new BytesRef(upperPoint)) > 0) {
|
||||
return new Result(true, Collections.emptySet(), 1);
|
||||
return new Result(true, Collections.emptySet(), 0);
|
||||
}
|
||||
|
||||
byte[] interval = new byte[16];
|
||||
@ -537,7 +549,15 @@ final class QueryAnalyzer {
|
||||
for (int i = 0; i < disjunctions.size(); i++) {
|
||||
Query disjunct = disjunctions.get(i);
|
||||
Result subResult = analyze(disjunct, version);
|
||||
verified &= subResult.verified;
|
||||
if (subResult.verified == false
|
||||
// one of the sub queries requires more than one term to match, we can't
|
||||
// verify it with a single top-level min_should_match
|
||||
|| subResult.minimumShouldMatch > 1
|
||||
// One of the inner clauses has multiple extractions, we won't be able to
|
||||
// verify it with a single top-level min_should_match
|
||||
|| (subResult.extractions.size() > 1 && requiredShouldClauses > 1)) {
|
||||
verified = false;
|
||||
}
|
||||
if (subResult.matchAllDocs) {
|
||||
numMatchAllClauses++;
|
||||
}
|
||||
@ -683,6 +703,10 @@ final class QueryAnalyzer {
|
||||
final boolean matchAllDocs;
|
||||
|
||||
Result(boolean verified, Set<QueryExtraction> extractions, int minimumShouldMatch) {
|
||||
if (minimumShouldMatch > extractions.size()) {
|
||||
throw new IllegalArgumentException("minimumShouldMatch can't be greater than the number of extractions: "
|
||||
+ minimumShouldMatch + " > " + extractions.size());
|
||||
}
|
||||
this.extractions = extractions;
|
||||
this.verified = verified;
|
||||
this.minimumShouldMatch = minimumShouldMatch;
|
||||
|
@ -210,12 +210,13 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
|
||||
new BytesRef(randomFrom(stringContent.get(field1)))));
|
||||
queryFunctions.add(() -> new TermInSetQuery(field2, new BytesRef(randomFrom(stringContent.get(field1))),
|
||||
new BytesRef(randomFrom(stringContent.get(field1)))));
|
||||
int numRandomBoolQueries = randomIntBetween(16, 32);
|
||||
// many iterations with boolean queries, which are the most complex queries to deal with when nested
|
||||
int numRandomBoolQueries = 1000;
|
||||
for (int i = 0; i < numRandomBoolQueries; i++) {
|
||||
queryFunctions.add(() -> createRandomBooleanQuery(1, stringFields, stringContent, intFieldType, intValues));
|
||||
}
|
||||
queryFunctions.add(() -> {
|
||||
int numClauses = randomIntBetween(1, 16);
|
||||
int numClauses = randomIntBetween(1, 1 << randomIntBetween(2, 4));
|
||||
List<Query> clauses = new ArrayList<>();
|
||||
for (int i = 0; i < numClauses; i++) {
|
||||
String field = randomFrom(stringFields);
|
||||
@ -266,7 +267,7 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
|
||||
private BooleanQuery createRandomBooleanQuery(int depth, List<String> fields, Map<String, List<String>> content,
|
||||
MappedFieldType intFieldType, List<Integer> intValues) {
|
||||
BooleanQuery.Builder builder = new BooleanQuery.Builder();
|
||||
int numClauses = randomIntBetween(1, 16);
|
||||
int numClauses = randomIntBetween(1, 1 << randomIntBetween(2, 4)); // use low numbers of clauses more often
|
||||
int numShouldClauses = 0;
|
||||
boolean onlyShouldClauses = rarely();
|
||||
for (int i = 0; i < numClauses; i++) {
|
||||
@ -313,7 +314,7 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
|
||||
numShouldClauses++;
|
||||
}
|
||||
}
|
||||
builder.setMinimumNumberShouldMatch(numShouldClauses);
|
||||
builder.setMinimumNumberShouldMatch(randomIntBetween(0, numShouldClauses));
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
|
@ -44,6 +44,7 @@ import org.apache.lucene.search.SynonymQuery;
|
||||
import org.apache.lucene.search.TermInSetQuery;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.TermRangeQuery;
|
||||
import org.apache.lucene.search.BooleanClause.Occur;
|
||||
import org.apache.lucene.search.join.QueryBitSetProducer;
|
||||
import org.apache.lucene.search.join.ScoreMode;
|
||||
import org.apache.lucene.search.spans.SpanFirstQuery;
|
||||
@ -227,23 +228,87 @@ public class QueryAnalyzerTests extends ESTestCase {
|
||||
public void testExtractQueryMetadata_booleanQuery_msm() {
|
||||
BooleanQuery.Builder builder = new BooleanQuery.Builder();
|
||||
builder.setMinimumNumberShouldMatch(2);
|
||||
TermQuery termQuery1 = new TermQuery(new Term("_field", "_term1"));
|
||||
Term term1 = new Term("_field", "_term1");
|
||||
TermQuery termQuery1 = new TermQuery(term1);
|
||||
builder.add(termQuery1, BooleanClause.Occur.SHOULD);
|
||||
TermQuery termQuery2 = new TermQuery(new Term("_field", "_term2"));
|
||||
Term term2 = new Term("_field", "_term2");
|
||||
TermQuery termQuery2 = new TermQuery(term2);
|
||||
builder.add(termQuery2, BooleanClause.Occur.SHOULD);
|
||||
TermQuery termQuery3 = new TermQuery(new Term("_field", "_term3"));
|
||||
Term term3 = new Term("_field", "_term3");
|
||||
TermQuery termQuery3 = new TermQuery(term3);
|
||||
builder.add(termQuery3, BooleanClause.Occur.SHOULD);
|
||||
|
||||
BooleanQuery booleanQuery = builder.build();
|
||||
Result result = analyze(booleanQuery, Version.CURRENT);
|
||||
assertThat(result.verified, is(true));
|
||||
assertThat(result.minimumShouldMatch, equalTo(2));
|
||||
List<QueryExtraction> extractions = new ArrayList<>(result.extractions);
|
||||
extractions.sort(Comparator.comparing(extraction -> extraction.term));
|
||||
assertThat(extractions.size(), equalTo(3));
|
||||
assertThat(extractions.get(0).term, equalTo(new Term("_field", "_term1")));
|
||||
assertThat(extractions.get(1).term, equalTo(new Term("_field", "_term2")));
|
||||
assertThat(extractions.get(2).term, equalTo(new Term("_field", "_term3")));
|
||||
assertTermsEqual(result.extractions, term1, term2, term3);
|
||||
|
||||
builder = new BooleanQuery.Builder()
|
||||
.add(new BooleanQuery.Builder()
|
||||
.add(termQuery1, Occur.SHOULD)
|
||||
.add(termQuery2, Occur.SHOULD)
|
||||
.build(), Occur.SHOULD)
|
||||
.add(termQuery3, Occur.SHOULD)
|
||||
.setMinimumNumberShouldMatch(2);
|
||||
booleanQuery = builder.build();
|
||||
result = analyze(booleanQuery, Version.CURRENT);
|
||||
assertThat(result.verified, is(false));
|
||||
assertThat(result.minimumShouldMatch, equalTo(2));
|
||||
assertTermsEqual(result.extractions, term1, term2, term3);
|
||||
|
||||
Term term4 = new Term("_field", "_term4");
|
||||
TermQuery termQuery4 = new TermQuery(term4);
|
||||
builder = new BooleanQuery.Builder()
|
||||
.add(new BooleanQuery.Builder()
|
||||
.add(termQuery1, Occur.MUST)
|
||||
.add(termQuery2, Occur.FILTER)
|
||||
.build(), Occur.SHOULD)
|
||||
.add(new BooleanQuery.Builder()
|
||||
.add(termQuery3, Occur.MUST)
|
||||
.add(termQuery4, Occur.FILTER)
|
||||
.build(), Occur.SHOULD);
|
||||
booleanQuery = builder.build();
|
||||
result = analyze(booleanQuery, Version.CURRENT);
|
||||
assertThat(result.verified, is(false));
|
||||
assertThat(result.minimumShouldMatch, equalTo(2));
|
||||
assertTermsEqual(result.extractions, term1, term2, term3, term4);
|
||||
|
||||
Term term5 = new Term("_field", "_term5");
|
||||
TermQuery termQuery5 = new TermQuery(term5);
|
||||
builder.add(termQuery5, Occur.SHOULD);
|
||||
booleanQuery = builder.build();
|
||||
result = analyze(booleanQuery, Version.CURRENT);
|
||||
assertThat(result.verified, is(false));
|
||||
assertThat(result.minimumShouldMatch, equalTo(1));
|
||||
assertTermsEqual(result.extractions, term1, term2, term3, term4, term5);
|
||||
|
||||
builder.setMinimumNumberShouldMatch(2);
|
||||
booleanQuery = builder.build();
|
||||
result = analyze(booleanQuery, Version.CURRENT);
|
||||
assertThat(result.verified, is(false));
|
||||
assertThat(result.minimumShouldMatch, equalTo(3));
|
||||
assertTermsEqual(result.extractions, term1, term2, term3, term4, term5);
|
||||
|
||||
builder.setMinimumNumberShouldMatch(3);
|
||||
booleanQuery = builder.build();
|
||||
result = analyze(booleanQuery, Version.CURRENT);
|
||||
assertThat(result.verified, is(false));
|
||||
assertThat(result.minimumShouldMatch, equalTo(5));
|
||||
assertTermsEqual(result.extractions, term1, term2, term3, term4, term5);
|
||||
|
||||
builder = new BooleanQuery.Builder()
|
||||
.add(new BooleanQuery.Builder()
|
||||
.add(termQuery1, Occur.SHOULD)
|
||||
.add(termQuery2, Occur.SHOULD)
|
||||
.build(), Occur.SHOULD)
|
||||
.add(new BooleanQuery.Builder().setMinimumNumberShouldMatch(1).build(), Occur.SHOULD)
|
||||
.setMinimumNumberShouldMatch(2);
|
||||
booleanQuery = builder.build();
|
||||
result = analyze(booleanQuery, Version.CURRENT);
|
||||
// ideally it would return no extractions, but the fact
|
||||
// that it doesn't consider them verified is probably good enough
|
||||
assertFalse(result.verified);
|
||||
}
|
||||
|
||||
public void testExtractQueryMetadata_booleanQuery_msm_pre6dot1() {
|
||||
@ -353,12 +418,15 @@ public class QueryAnalyzerTests extends ESTestCase {
|
||||
assertThat(result.minimumShouldMatch, equalTo(1));
|
||||
|
||||
builder = new BooleanQuery.Builder();
|
||||
builder.setMinimumNumberShouldMatch(randomIntBetween(2, 32));
|
||||
int msm = randomIntBetween(2, 3);
|
||||
builder.setMinimumNumberShouldMatch(msm);
|
||||
TermQuery termQuery3 = new TermQuery(new Term("_field", "_term3"));
|
||||
builder.add(termQuery1, BooleanClause.Occur.SHOULD);
|
||||
builder.add(termQuery2, BooleanClause.Occur.SHOULD);
|
||||
builder.add(termQuery3, BooleanClause.Occur.SHOULD);
|
||||
result = analyze(builder.build(), Version.CURRENT);
|
||||
assertThat("Minimum match has not impact on whether the result is verified", result.verified, is(true));
|
||||
assertThat("msm is at least two so result.minimumShouldMatch should 2 too", result.minimumShouldMatch, equalTo(2));
|
||||
assertThat("msm is at least two so result.minimumShouldMatch should 2 too", result.minimumShouldMatch, equalTo(msm));
|
||||
|
||||
builder = new BooleanQuery.Builder();
|
||||
builder.add(termQuery1, randomBoolean() ? BooleanClause.Occur.MUST : BooleanClause.Occur.FILTER);
|
||||
@ -379,6 +447,53 @@ public class QueryAnalyzerTests extends ESTestCase {
|
||||
result = analyze(builder.build(), Version.CURRENT);
|
||||
assertThat("Prohibited clause, so candidate matches are not verified", result.verified, is(false));
|
||||
assertThat(result.minimumShouldMatch, equalTo(1));
|
||||
|
||||
builder = new BooleanQuery.Builder();
|
||||
builder.add(termQuery1, randomBoolean() ? BooleanClause.Occur.MUST : BooleanClause.Occur.FILTER);
|
||||
builder.add(termQuery2, BooleanClause.Occur.MUST_NOT);
|
||||
result = analyze(builder.build(), Version.CURRENT);
|
||||
assertThat("Prohibited clause, so candidate matches are not verified", result.verified, is(false));
|
||||
assertThat(result.minimumShouldMatch, equalTo(1));
|
||||
|
||||
builder = new BooleanQuery.Builder()
|
||||
.add(new BooleanQuery.Builder()
|
||||
.add(termQuery1, Occur.FILTER)
|
||||
.add(termQuery2, Occur.FILTER)
|
||||
.build(), Occur.SHOULD)
|
||||
.add(termQuery3, Occur.SHOULD);
|
||||
result = analyze(builder.build(), Version.CURRENT);
|
||||
assertThat("Inner clause that is not a pure disjunction, so candidate matches are not verified", result.verified, is(false));
|
||||
assertThat(result.minimumShouldMatch, equalTo(1));
|
||||
|
||||
builder = new BooleanQuery.Builder()
|
||||
.add(new BooleanQuery.Builder()
|
||||
.add(termQuery1, Occur.SHOULD)
|
||||
.add(termQuery2, Occur.SHOULD)
|
||||
.build(), Occur.SHOULD)
|
||||
.add(termQuery3, Occur.SHOULD);
|
||||
result = analyze(builder.build(), Version.CURRENT);
|
||||
assertThat("Inner clause that is a pure disjunction, so candidate matches are verified", result.verified, is(true));
|
||||
assertThat(result.minimumShouldMatch, equalTo(1));
|
||||
|
||||
builder = new BooleanQuery.Builder()
|
||||
.add(new BooleanQuery.Builder()
|
||||
.add(termQuery1, Occur.SHOULD)
|
||||
.add(termQuery2, Occur.SHOULD)
|
||||
.build(), Occur.MUST)
|
||||
.add(termQuery3, Occur.FILTER);
|
||||
result = analyze(builder.build(), Version.CURRENT);
|
||||
assertThat("Disjunctions of conjunctions can't be verified", result.verified, is(false));
|
||||
assertThat(result.minimumShouldMatch, equalTo(2));
|
||||
|
||||
builder = new BooleanQuery.Builder()
|
||||
.add(new BooleanQuery.Builder()
|
||||
.add(termQuery1, Occur.MUST)
|
||||
.add(termQuery2, Occur.FILTER)
|
||||
.build(), Occur.SHOULD)
|
||||
.add(termQuery3, Occur.SHOULD);
|
||||
result = analyze(builder.build(), Version.CURRENT);
|
||||
assertThat("Conjunctions of disjunctions can't be verified", result.verified, is(false));
|
||||
assertThat(result.minimumShouldMatch, equalTo(1));
|
||||
}
|
||||
|
||||
public void testBooleanQueryWithMustAndShouldClauses() {
|
||||
@ -564,16 +679,15 @@ public class QueryAnalyzerTests extends ESTestCase {
|
||||
Result result = analyze(new MatchNoDocsQuery("sometimes there is no reason at all"), Version.CURRENT);
|
||||
assertThat(result.verified, is(true));
|
||||
assertEquals(0, result.extractions.size());
|
||||
assertThat(result.minimumShouldMatch, equalTo(1));
|
||||
assertThat(result.minimumShouldMatch, equalTo(0));
|
||||
|
||||
BooleanQuery.Builder bq = new BooleanQuery.Builder();
|
||||
bq.add(new TermQuery(new Term("field", "value")), BooleanClause.Occur.MUST);
|
||||
bq.add(new MatchNoDocsQuery("sometimes there is no reason at all"), BooleanClause.Occur.MUST);
|
||||
result = analyze(bq.build(), Version.CURRENT);
|
||||
assertThat(result.verified, is(true));
|
||||
assertEquals(1, result.extractions.size());
|
||||
assertThat(result.minimumShouldMatch, equalTo(2));
|
||||
assertTermsEqual(result.extractions, new Term("field", "value"));
|
||||
assertEquals(0, result.extractions.size());
|
||||
assertThat(result.minimumShouldMatch, equalTo(0));
|
||||
|
||||
bq = new BooleanQuery.Builder();
|
||||
bq.add(new TermQuery(new Term("field", "value")), BooleanClause.Occur.SHOULD);
|
||||
@ -785,7 +899,7 @@ public class QueryAnalyzerTests extends ESTestCase {
|
||||
SynonymQuery query = new SynonymQuery();
|
||||
Result result = analyze(query, Version.CURRENT);
|
||||
assertThat(result.verified, is(true));
|
||||
assertThat(result.minimumShouldMatch, equalTo(1));
|
||||
assertThat(result.minimumShouldMatch, equalTo(0));
|
||||
assertThat(result.extractions.isEmpty(), is(true));
|
||||
|
||||
query = new SynonymQuery(new Term("_field", "_value1"), new Term("_field", "_value2"));
|
||||
@ -997,7 +1111,7 @@ public class QueryAnalyzerTests extends ESTestCase {
|
||||
Query query = IntPoint.newRangeQuery("_field", 20, 10);
|
||||
Result result = analyze(query, Version.CURRENT);
|
||||
assertTrue(result.verified);
|
||||
assertThat(result.minimumShouldMatch, equalTo(1));
|
||||
assertThat(result.minimumShouldMatch, equalTo(0));
|
||||
assertThat(result.extractions.size(), equalTo(0));
|
||||
}
|
||||
|
||||
@ -1179,7 +1293,7 @@ public class QueryAnalyzerTests extends ESTestCase {
|
||||
BooleanClause.Occur.SHOULD
|
||||
);
|
||||
result = analyze(builder.build(), Version.CURRENT);
|
||||
assertThat(result.verified, is(true));
|
||||
assertThat(result.verified, is(false));
|
||||
assertThat(result.matchAllDocs, is(false));
|
||||
assertThat(result.minimumShouldMatch, equalTo(2));
|
||||
assertTermsEqual(result.extractions, new Term("field", "value1"), new Term("field", "value2"),
|
||||
|
@ -140,9 +140,12 @@ public class DiscountedCumulativeGain implements EvaluationMetric {
|
||||
|
||||
if (normalize) {
|
||||
Collections.sort(allRatings, Comparator.nullsLast(Collections.reverseOrder()));
|
||||
double idcg = computeDCG(
|
||||
allRatings.subList(0, Math.min(ratingsInSearchHits.size(), allRatings.size())));
|
||||
dcg = dcg / idcg;
|
||||
double idcg = computeDCG(allRatings.subList(0, Math.min(ratingsInSearchHits.size(), allRatings.size())));
|
||||
if (idcg > 0) {
|
||||
dcg = dcg / idcg;
|
||||
} else {
|
||||
dcg = 0;
|
||||
}
|
||||
}
|
||||
EvalQueryQuality evalQueryQuality = new EvalQueryQuality(taskId, dcg);
|
||||
evalQueryQuality.addHitsAndRatings(ratedHits);
|
||||
|
@ -228,6 +228,10 @@ public class MeanReciprocalRank implements EvaluationMetric {
|
||||
return NAME;
|
||||
}
|
||||
|
||||
/**
|
||||
* the ranking of the first relevant document, or -1 if no relevant document was
|
||||
* found
|
||||
*/
|
||||
int getFirstRelevantRank() {
|
||||
return firstRelevantRank;
|
||||
}
|
||||
|
@ -67,9 +67,9 @@ import static org.elasticsearch.common.xcontent.XContentHelper.createParser;
|
||||
* averaged precision at n.
|
||||
*/
|
||||
public class TransportRankEvalAction extends HandledTransportAction<RankEvalRequest, RankEvalResponse> {
|
||||
private Client client;
|
||||
private ScriptService scriptService;
|
||||
private NamedXContentRegistry namedXContentRegistry;
|
||||
private final Client client;
|
||||
private final ScriptService scriptService;
|
||||
private final NamedXContentRegistry namedXContentRegistry;
|
||||
|
||||
@Inject
|
||||
public TransportRankEvalAction(Settings settings, ThreadPool threadPool, ActionFilters actionFilters,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user