Merge branch 'master' into index-lifecycle

This commit is contained in:
Colin Goodheart-Smithe 2018-09-07 10:59:10 +01:00
commit 017ffe5d12
No known key found for this signature in database
GPG Key ID: F975E7BDD739B3C7
672 changed files with 6442 additions and 4939 deletions

View File

@ -539,9 +539,9 @@ class BuildPlugin implements Plugin<Project> {
from generatePOMTask.destination
into "${project.buildDir}/distributions"
rename {
generatePOMTask.ext.pomFileName == null ?
"${project.archivesBaseName}-${project.version}.pom" :
generatePOMTask.ext.pomFileName
generatePOMTask.ext.pomFileName == null ?
"${project.archivesBaseName}-${project.version}.pom" :
generatePOMTask.ext.pomFileName
}
}
}

View File

@ -360,13 +360,7 @@
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]TermsAggregator.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]TermsAggregatorFactory.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]support[/\\]IncludeExclude.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]cardinality[/\\]CardinalityAggregator.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]cardinality[/\\]HyperLogLogPlusPlus.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]geobounds[/\\]GeoBoundsAggregator.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]geocentroid[/\\]InternalGeoCentroid.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]percentiles[/\\]tdigest[/\\]TDigestPercentileRanksAggregator.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]percentiles[/\\]tdigest[/\\]TDigestPercentilesAggregator.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]scripted[/\\]ScriptedMetricAggregator.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]HyperLogLogPlusPlus.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]support[/\\]AggregationPath.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]dfs[/\\]AggregatedDfs.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]dfs[/\\]DfsSearchResult.java" checks="LineLength" />
@ -641,8 +635,6 @@
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]TermsDocCountErrorIT.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]TermsShardMinDocCountIT.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]nested[/\\]NestedAggregatorTests.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]AbstractGeoTestCase.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]TopHitsIT.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]pipeline[/\\]ExtendedStatsBucketIT.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]pipeline[/\\]moving[/\\]avg[/\\]MovAvgIT.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]pipeline[/\\]serialdiff[/\\]SerialDiffIT.java" checks="LineLength" />

View File

@ -1,5 +1,5 @@
elasticsearch = 7.0.0-alpha1
lucene = 7.5.0-snapshot-13b9e28f9d
lucene = 8.0.0-snapshot-4d78db26be
# optional dependencies
spatial4j = 0.7

View File

@ -43,7 +43,7 @@ public class GraphClient {
*/
public final GraphExploreResponse explore(GraphExploreRequest graphExploreRequest,
RequestOptions options) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(graphExploreRequest, RequestConverters::xPackGraphExplore,
return restHighLevelClient.performRequestAndParseEntity(graphExploreRequest, GraphRequestConverters::explore,
options, GraphExploreResponse::fromXContext, emptySet());
}
@ -56,7 +56,7 @@ public class GraphClient {
public final void exploreAsync(GraphExploreRequest graphExploreRequest,
RequestOptions options,
ActionListener<GraphExploreResponse> listener) {
restHighLevelClient.performRequestAsyncAndParseEntity(graphExploreRequest, RequestConverters::xPackGraphExplore,
restHighLevelClient.performRequestAsyncAndParseEntity(graphExploreRequest, GraphRequestConverters::explore,
options, GraphExploreResponse::fromXContext, listener, emptySet());
}

View File

@ -17,23 +17,19 @@
* under the License.
*/
package org.elasticsearch.index.analysis;
package org.elasticsearch.client;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
import org.apache.http.client.methods.HttpGet;
import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest;
import java.io.IOException;
public class StandardTokenFilterFactory extends AbstractTokenFilterFactory {
public class GraphRequestConverters {
public StandardTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
static Request explore(GraphExploreRequest exploreRequest) throws IOException {
String endpoint = RequestConverters.endpoint(exploreRequest.indices(), exploreRequest.types(), "_xpack/graph/_explore");
Request request = new Request(HttpGet.METHOD_NAME, endpoint);
request.setEntity(RequestConverters.createEntity(exploreRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE));
return request;
}
@Override
public TokenStream create(TokenStream tokenStream) {
return new StandardFilter(tokenStream);
}
}
}

View File

@ -65,7 +65,7 @@ public final class LicenseClient {
* @throws IOException in case there is a problem sending the request or parsing back the response
*/
public PutLicenseResponse putLicense(PutLicenseRequest request, RequestOptions options) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(request, RequestConverters::putLicense, options,
return restHighLevelClient.performRequestAndParseEntity(request, LicenseRequestConverters::putLicense, options,
PutLicenseResponse::fromXContent, emptySet());
}
@ -75,7 +75,7 @@ public final class LicenseClient {
* @param listener the listener to be notified upon request completion
*/
public void putLicenseAsync(PutLicenseRequest request, RequestOptions options, ActionListener<PutLicenseResponse> listener) {
restHighLevelClient.performRequestAsyncAndParseEntity(request, RequestConverters::putLicense, options,
restHighLevelClient.performRequestAsyncAndParseEntity(request, LicenseRequestConverters::putLicense, options,
PutLicenseResponse::fromXContent, listener, emptySet());
}
@ -86,7 +86,7 @@ public final class LicenseClient {
* @throws IOException in case there is a problem sending the request or parsing back the response
*/
public GetLicenseResponse getLicense(GetLicenseRequest request, RequestOptions options) throws IOException {
return restHighLevelClient.performRequest(request, RequestConverters::getLicense, options,
return restHighLevelClient.performRequest(request, LicenseRequestConverters::getLicense, options,
response -> new GetLicenseResponse(convertResponseToJson(response)), emptySet());
}
@ -96,7 +96,7 @@ public final class LicenseClient {
* @param listener the listener to be notified upon request completion
*/
public void getLicenseAsync(GetLicenseRequest request, RequestOptions options, ActionListener<GetLicenseResponse> listener) {
restHighLevelClient.performRequestAsync(request, RequestConverters::getLicense, options,
restHighLevelClient.performRequestAsync(request, LicenseRequestConverters::getLicense, options,
response -> new GetLicenseResponse(convertResponseToJson(response)), listener, emptySet());
}
@ -107,7 +107,7 @@ public final class LicenseClient {
* @throws IOException in case there is a problem sending the request or parsing back the response
*/
public AcknowledgedResponse deleteLicense(DeleteLicenseRequest request, RequestOptions options) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(request, RequestConverters::deleteLicense, options,
return restHighLevelClient.performRequestAndParseEntity(request, LicenseRequestConverters::deleteLicense, options,
AcknowledgedResponse::fromXContent, emptySet());
}
@ -117,7 +117,7 @@ public final class LicenseClient {
* @param listener the listener to be notified upon request completion
*/
public void deleteLicenseAsync(DeleteLicenseRequest request, RequestOptions options, ActionListener<AcknowledgedResponse> listener) {
restHighLevelClient.performRequestAsyncAndParseEntity(request, RequestConverters::deleteLicense, options,
restHighLevelClient.performRequestAsyncAndParseEntity(request, LicenseRequestConverters::deleteLicense, options,
AcknowledgedResponse::fromXContent, listener, emptySet());
}

View File

@ -0,0 +1,64 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import org.apache.http.client.methods.HttpDelete;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPut;
import org.elasticsearch.protocol.xpack.license.DeleteLicenseRequest;
import org.elasticsearch.protocol.xpack.license.GetLicenseRequest;
import org.elasticsearch.protocol.xpack.license.PutLicenseRequest;
public class LicenseRequestConverters {
static Request putLicense(PutLicenseRequest putLicenseRequest) {
String endpoint = new RequestConverters.EndpointBuilder()
.addPathPartAsIs("_xpack")
.addPathPartAsIs("license")
.build();
Request request = new Request(HttpPut.METHOD_NAME, endpoint);
RequestConverters.Params parameters = new RequestConverters.Params(request);
parameters.withTimeout(putLicenseRequest.timeout());
parameters.withMasterTimeout(putLicenseRequest.masterNodeTimeout());
if (putLicenseRequest.isAcknowledge()) {
parameters.putParam("acknowledge", "true");
}
request.setJsonEntity(putLicenseRequest.getLicenseDefinition());
return request;
}
static Request getLicense(GetLicenseRequest getLicenseRequest) {
String endpoint = new RequestConverters.EndpointBuilder()
.addPathPartAsIs("_xpack")
.addPathPartAsIs("license")
.build();
Request request = new Request(HttpGet.METHOD_NAME, endpoint);
RequestConverters.Params parameters = new RequestConverters.Params(request);
parameters.withLocal(getLicenseRequest.local());
return request;
}
static Request deleteLicense(DeleteLicenseRequest deleteLicenseRequest) {
Request request = new Request(HttpDelete.METHOD_NAME, "/_xpack/license");
RequestConverters.Params parameters = new RequestConverters.Params(request);
parameters.withTimeout(deleteLicenseRequest.timeout());
parameters.withMasterTimeout(deleteLicenseRequest.masterNodeTimeout());
return request;
}
}

View File

@ -35,6 +35,7 @@ import org.elasticsearch.client.ml.GetOverallBucketsRequest;
import org.elasticsearch.client.ml.GetRecordsRequest;
import org.elasticsearch.client.ml.OpenJobRequest;
import org.elasticsearch.client.ml.PutJobRequest;
import org.elasticsearch.client.ml.UpdateJobRequest;
import org.elasticsearch.common.Strings;
import java.io.IOException;
@ -146,6 +147,19 @@ final class MLRequestConverters {
return request;
}
static Request updateJob(UpdateJobRequest updateJobRequest) throws IOException {
String endpoint = new EndpointBuilder()
.addPathPartAsIs("_xpack")
.addPathPartAsIs("ml")
.addPathPartAsIs("anomaly_detectors")
.addPathPart(updateJobRequest.getJobUpdate().getJobId())
.addPathPartAsIs("_update")
.build();
Request request = new Request(HttpPost.METHOD_NAME, endpoint);
request.setEntity(createEntity(updateJobRequest.getJobUpdate(), REQUEST_BODY_CONTENT_TYPE));
return request;
}
static Request getBuckets(GetBucketsRequest getBucketsRequest) throws IOException {
String endpoint = new EndpointBuilder()
.addPathPartAsIs("_xpack")

View File

@ -19,6 +19,7 @@
package org.elasticsearch.client;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.client.ml.UpdateJobRequest;
import org.elasticsearch.client.ml.CloseJobRequest;
import org.elasticsearch.client.ml.CloseJobResponse;
import org.elasticsearch.client.ml.DeleteJobRequest;
@ -319,6 +320,7 @@ public final class MachineLearningClient {
*
* @param request The {@link FlushJobRequest} object enclosing the `jobId` and additional request options
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @throws IOException when there is a serialization issue sending the request or receiving the response
*/
public FlushJobResponse flushJob(FlushJobRequest request, RequestOptions options) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(request,
@ -356,6 +358,38 @@ public final class MachineLearningClient {
Collections.emptySet());
}
/**
* Updates a Machine Learning {@link org.elasticsearch.client.ml.job.config.Job}
*
* @param request the {@link UpdateJobRequest} object enclosing the desired updates
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return a PutJobResponse object containing the updated job object
* @throws IOException when there is a serialization issue sending the request or receiving the response
*/
public PutJobResponse updateJob(UpdateJobRequest request, RequestOptions options) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(request,
MLRequestConverters::updateJob,
options,
PutJobResponse::fromXContent,
Collections.emptySet());
}
/**
* Updates a Machine Learning {@link org.elasticsearch.client.ml.job.config.Job} asynchronously
*
* @param request the {@link UpdateJobRequest} object enclosing the desired updates
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @param listener Listener to be notified upon request completion
*/
public void updateJobAsync(UpdateJobRequest request, RequestOptions options, ActionListener<PutJobResponse> listener) {
restHighLevelClient.performRequestAsyncAndParseEntity(request,
MLRequestConverters::updateJob,
options,
PutJobResponse::fromXContent,
listener,
Collections.emptySet());
}
/**
* Gets the buckets for a Machine Learning Job.
* <p>

View File

@ -30,8 +30,6 @@ import org.apache.http.entity.ContentType;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest;
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
@ -114,17 +112,13 @@ import org.elasticsearch.index.reindex.ReindexRequest;
import org.elasticsearch.index.reindex.UpdateByQueryRequest;
import org.elasticsearch.protocol.xpack.XPackInfoRequest;
import org.elasticsearch.protocol.xpack.XPackUsageRequest;
import org.elasticsearch.protocol.xpack.license.DeleteLicenseRequest;
import org.elasticsearch.protocol.xpack.indexlifecycle.ExplainLifecycleRequest;
import org.elasticsearch.protocol.xpack.indexlifecycle.SetIndexLifecyclePolicyRequest;
import org.elasticsearch.protocol.xpack.indexlifecycle.StartILMRequest;
import org.elasticsearch.protocol.xpack.indexlifecycle.StopILMRequest;
import org.elasticsearch.protocol.xpack.license.GetLicenseRequest;
import org.elasticsearch.protocol.xpack.license.PutLicenseRequest;
import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoRequest;
import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest;
import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest;
import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest;
import org.elasticsearch.rest.action.search.RestSearchAction;
import org.elasticsearch.script.mustache.MultiSearchTemplateRequest;
import org.elasticsearch.script.mustache.SearchTemplateRequest;
@ -148,17 +142,6 @@ final class RequestConverters {
// Contains only status utility methods
}
static Request cancelTasks(CancelTasksRequest cancelTasksRequest) {
Request request = new Request(HttpPost.METHOD_NAME, "/_tasks/_cancel");
Params params = new Params(request);
params.withTimeout(cancelTasksRequest.getTimeout())
.withTaskId(cancelTasksRequest.getTaskId())
.withNodes(cancelTasksRequest.getNodes())
.withParentTaskId(cancelTasksRequest.getParentTaskId())
.withActions(cancelTasksRequest.getActions());
return request;
}
static Request delete(DeleteRequest deleteRequest) {
String endpoint = endpoint(deleteRequest.index(), deleteRequest.type(), deleteRequest.id());
Request request = new Request(HttpDelete.METHOD_NAME, endpoint);
@ -770,22 +753,6 @@ final class RequestConverters {
return request;
}
static Request listTasks(ListTasksRequest listTaskRequest) {
if (listTaskRequest.getTaskId() != null && listTaskRequest.getTaskId().isSet()) {
throw new IllegalArgumentException("TaskId cannot be used for list tasks request");
}
Request request = new Request(HttpGet.METHOD_NAME, "/_tasks");
Params params = new Params(request);
params.withTimeout(listTaskRequest.getTimeout())
.withDetailed(listTaskRequest.getDetailed())
.withWaitForCompletion(listTaskRequest.getWaitForCompletion())
.withParentTaskId(listTaskRequest.getParentTaskId())
.withNodes(listTaskRequest.getNodes())
.withActions(listTaskRequest.getActions())
.putParam("group_by", "none");
return request;
}
static Request reindex(ReindexRequest reindexRequest) throws IOException {
String endpoint = new EndpointBuilder().addPathPart("_reindex").build();
Request request = new Request(HttpPost.METHOD_NAME, endpoint);
@ -1159,13 +1126,6 @@ final class RequestConverters {
return request;
}
static Request xPackGraphExplore(GraphExploreRequest exploreRequest) throws IOException {
String endpoint = endpoint(exploreRequest.indices(), exploreRequest.types(), "_xpack/graph/_explore");
Request request = new Request(HttpGet.METHOD_NAME, endpoint);
request.setEntity(createEntity(exploreRequest, REQUEST_BODY_CONTENT_TYPE));
return request;
}
static Request xPackWatcherPutWatch(PutWatchRequest putWatchRequest) {
String endpoint = new EndpointBuilder()
.addPathPartAsIs("_xpack")
@ -1293,41 +1253,6 @@ final class RequestConverters {
return request;
}
static Request putLicense(PutLicenseRequest putLicenseRequest) {
String endpoint = new EndpointBuilder()
.addPathPartAsIs("_xpack")
.addPathPartAsIs("license")
.build();
Request request = new Request(HttpPut.METHOD_NAME, endpoint);
Params parameters = new Params(request);
parameters.withTimeout(putLicenseRequest.timeout());
parameters.withMasterTimeout(putLicenseRequest.masterNodeTimeout());
if (putLicenseRequest.isAcknowledge()) {
parameters.putParam("acknowledge", "true");
}
request.setJsonEntity(putLicenseRequest.getLicenseDefinition());
return request;
}
static Request getLicense(GetLicenseRequest getLicenseRequest) {
String endpoint = new EndpointBuilder()
.addPathPartAsIs("_xpack")
.addPathPartAsIs("license")
.build();
Request request = new Request(HttpGet.METHOD_NAME, endpoint);
Params parameters = new Params(request);
parameters.withLocal(getLicenseRequest.local());
return request;
}
static Request deleteLicense(DeleteLicenseRequest deleteLicenseRequest) {
Request request = new Request(HttpDelete.METHOD_NAME, "/_xpack/license");
Params parameters = new Params(request);
parameters.withTimeout(deleteLicenseRequest.timeout());
parameters.withMasterTimeout(deleteLicenseRequest.masterNodeTimeout());
return request;
}
static Request getMigrationAssistance(IndexUpgradeInfoRequest indexUpgradeInfoRequest) {
EndpointBuilder endpointBuilder = new EndpointBuilder()
.addPathPartAsIs("_xpack/migration/assistance")

View File

@ -120,38 +120,38 @@ import org.elasticsearch.search.aggregations.bucket.terms.ParsedDoubleTerms;
import org.elasticsearch.search.aggregations.bucket.terms.ParsedLongTerms;
import org.elasticsearch.search.aggregations.bucket.terms.ParsedStringTerms;
import org.elasticsearch.search.aggregations.bucket.terms.StringTerms;
import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.avg.ParsedAvg;
import org.elasticsearch.search.aggregations.metrics.cardinality.CardinalityAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.cardinality.ParsedCardinality;
import org.elasticsearch.search.aggregations.metrics.geobounds.GeoBoundsAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.geobounds.ParsedGeoBounds;
import org.elasticsearch.search.aggregations.metrics.geocentroid.GeoCentroidAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.geocentroid.ParsedGeoCentroid;
import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.max.ParsedMax;
import org.elasticsearch.search.aggregations.metrics.min.MinAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.min.ParsedMin;
import org.elasticsearch.search.aggregations.metrics.percentiles.hdr.InternalHDRPercentileRanks;
import org.elasticsearch.search.aggregations.metrics.percentiles.hdr.InternalHDRPercentiles;
import org.elasticsearch.search.aggregations.metrics.percentiles.hdr.ParsedHDRPercentileRanks;
import org.elasticsearch.search.aggregations.metrics.percentiles.hdr.ParsedHDRPercentiles;
import org.elasticsearch.search.aggregations.metrics.percentiles.tdigest.InternalTDigestPercentileRanks;
import org.elasticsearch.search.aggregations.metrics.percentiles.tdigest.InternalTDigestPercentiles;
import org.elasticsearch.search.aggregations.metrics.percentiles.tdigest.ParsedTDigestPercentileRanks;
import org.elasticsearch.search.aggregations.metrics.percentiles.tdigest.ParsedTDigestPercentiles;
import org.elasticsearch.search.aggregations.metrics.scripted.ParsedScriptedMetric;
import org.elasticsearch.search.aggregations.metrics.scripted.ScriptedMetricAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.stats.ParsedStats;
import org.elasticsearch.search.aggregations.metrics.stats.StatsAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStatsAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.stats.extended.ParsedExtendedStats;
import org.elasticsearch.search.aggregations.metrics.sum.ParsedSum;
import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.tophits.ParsedTopHits;
import org.elasticsearch.search.aggregations.metrics.tophits.TopHitsAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.valuecount.ParsedValueCount;
import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCountAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.AvgAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.ParsedAvg;
import org.elasticsearch.search.aggregations.metrics.CardinalityAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.ParsedCardinality;
import org.elasticsearch.search.aggregations.metrics.GeoBoundsAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.ParsedGeoBounds;
import org.elasticsearch.search.aggregations.metrics.GeoCentroidAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.ParsedGeoCentroid;
import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.ParsedMax;
import org.elasticsearch.search.aggregations.metrics.MinAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.ParsedMin;
import org.elasticsearch.search.aggregations.metrics.InternalHDRPercentileRanks;
import org.elasticsearch.search.aggregations.metrics.InternalHDRPercentiles;
import org.elasticsearch.search.aggregations.metrics.ParsedHDRPercentileRanks;
import org.elasticsearch.search.aggregations.metrics.ParsedHDRPercentiles;
import org.elasticsearch.search.aggregations.metrics.InternalTDigestPercentileRanks;
import org.elasticsearch.search.aggregations.metrics.InternalTDigestPercentiles;
import org.elasticsearch.search.aggregations.metrics.ParsedTDigestPercentileRanks;
import org.elasticsearch.search.aggregations.metrics.ParsedTDigestPercentiles;
import org.elasticsearch.search.aggregations.metrics.ParsedScriptedMetric;
import org.elasticsearch.search.aggregations.metrics.ScriptedMetricAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.ParsedStats;
import org.elasticsearch.search.aggregations.metrics.StatsAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.ExtendedStatsAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.ParsedExtendedStats;
import org.elasticsearch.search.aggregations.metrics.ParsedSum;
import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.ParsedTopHits;
import org.elasticsearch.search.aggregations.metrics.TopHitsAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.ParsedValueCount;
import org.elasticsearch.search.aggregations.metrics.ValueCountAggregationBuilder;
import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue;
import org.elasticsearch.search.aggregations.pipeline.ParsedSimpleValue;
import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.InternalBucketMetricValue;

View File

@ -51,7 +51,7 @@ public final class TasksClient {
* @throws IOException in case there is a problem sending the request or parsing back the response
*/
public ListTasksResponse list(ListTasksRequest request, RequestOptions options) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(request, RequestConverters::listTasks, options,
return restHighLevelClient.performRequestAndParseEntity(request, TasksRequestConverters::listTasks, options,
ListTasksResponse::fromXContent, emptySet());
}
@ -64,7 +64,7 @@ public final class TasksClient {
* @param listener the listener to be notified upon request completion
*/
public void listAsync(ListTasksRequest request, RequestOptions options, ActionListener<ListTasksResponse> listener) {
restHighLevelClient.performRequestAsyncAndParseEntity(request, RequestConverters::listTasks, options,
restHighLevelClient.performRequestAsyncAndParseEntity(request, TasksRequestConverters::listTasks, options,
ListTasksResponse::fromXContent, listener, emptySet());
}
@ -82,7 +82,7 @@ public final class TasksClient {
public CancelTasksResponse cancel(CancelTasksRequest cancelTasksRequest, RequestOptions options ) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(
cancelTasksRequest,
RequestConverters::cancelTasks,
TasksRequestConverters::cancelTasks,
options,
CancelTasksResponse::fromXContent,
emptySet()
@ -101,7 +101,7 @@ public final class TasksClient {
public void cancelAsync(CancelTasksRequest cancelTasksRequest, RequestOptions options, ActionListener<CancelTasksResponse> listener) {
restHighLevelClient.performRequestAsyncAndParseEntity(
cancelTasksRequest,
RequestConverters::cancelTasks,
TasksRequestConverters::cancelTasks,
options,
CancelTasksResponse::fromXContent,
listener,

View File

@ -0,0 +1,55 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost;
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest;
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
public class TasksRequestConverters {
static Request cancelTasks(CancelTasksRequest cancelTasksRequest) {
Request request = new Request(HttpPost.METHOD_NAME, "/_tasks/_cancel");
RequestConverters.Params params = new RequestConverters.Params(request);
params.withTimeout(cancelTasksRequest.getTimeout())
.withTaskId(cancelTasksRequest.getTaskId())
.withNodes(cancelTasksRequest.getNodes())
.withParentTaskId(cancelTasksRequest.getParentTaskId())
.withActions(cancelTasksRequest.getActions());
return request;
}
static Request listTasks(ListTasksRequest listTaskRequest) {
if (listTaskRequest.getTaskId() != null && listTaskRequest.getTaskId().isSet()) {
throw new IllegalArgumentException("TaskId cannot be used for list tasks request");
}
Request request = new Request(HttpGet.METHOD_NAME, "/_tasks");
RequestConverters.Params params = new RequestConverters.Params(request);
params.withTimeout(listTaskRequest.getTimeout())
.withDetailed(listTaskRequest.getDetailed())
.withWaitForCompletion(listTaskRequest.getWaitForCompletion())
.withParentTaskId(listTaskRequest.getParentTaskId())
.withNodes(listTaskRequest.getNodes())
.withActions(listTaskRequest.getActions())
.putParam("group_by", "none");
return request;
}
}

View File

@ -0,0 +1,80 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.ml;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.client.ml.job.config.JobUpdate;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.Objects;
/**
* Updates a {@link org.elasticsearch.client.ml.job.config.Job} with the passed {@link JobUpdate}
* settings
*/
public class UpdateJobRequest extends ActionRequest implements ToXContentObject {
private final JobUpdate update;
public UpdateJobRequest(JobUpdate update) {
this.update = update;
}
public JobUpdate getJobUpdate() {
return update;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return update.toXContent(builder, params);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
UpdateJobRequest that = (UpdateJobRequest) o;
return Objects.equals(update, that.update);
}
@Override
public int hashCode() {
return Objects.hash(update);
}
@Override
public final String toString() {
return Strings.toString(this);
}
@Override
public ActionRequestValidationException validate() {
return null;
}
}

View File

@ -0,0 +1,454 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.ml.job.config;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.Objects;
/**
* POJO for updating an existing Machine Learning {@link Job}
*/
public class JobUpdate implements ToXContentObject {
public static final ParseField DETECTORS = new ParseField("detectors");
public static final ConstructingObjectParser<Builder, Void> PARSER = new ConstructingObjectParser<>(
"job_update", true, args -> new Builder((String) args[0]));
static {
PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), Job.ID);
PARSER.declareStringArray(Builder::setGroups, Job.GROUPS);
PARSER.declareStringOrNull(Builder::setDescription, Job.DESCRIPTION);
PARSER.declareObjectArray(Builder::setDetectorUpdates, DetectorUpdate.PARSER, DETECTORS);
PARSER.declareObject(Builder::setModelPlotConfig, ModelPlotConfig.PARSER, Job.MODEL_PLOT_CONFIG);
PARSER.declareObject(Builder::setAnalysisLimits, AnalysisLimits.PARSER, Job.ANALYSIS_LIMITS);
PARSER.declareString((builder, val) -> builder.setBackgroundPersistInterval(
TimeValue.parseTimeValue(val, Job.BACKGROUND_PERSIST_INTERVAL.getPreferredName())), Job.BACKGROUND_PERSIST_INTERVAL);
PARSER.declareLong(Builder::setRenormalizationWindowDays, Job.RENORMALIZATION_WINDOW_DAYS);
PARSER.declareLong(Builder::setResultsRetentionDays, Job.RESULTS_RETENTION_DAYS);
PARSER.declareLong(Builder::setModelSnapshotRetentionDays, Job.MODEL_SNAPSHOT_RETENTION_DAYS);
PARSER.declareStringArray(Builder::setCategorizationFilters, AnalysisConfig.CATEGORIZATION_FILTERS);
PARSER.declareField(Builder::setCustomSettings, (p, c) -> p.map(), Job.CUSTOM_SETTINGS, ObjectParser.ValueType.OBJECT);
}
private final String jobId;
private final List<String> groups;
private final String description;
private final List<DetectorUpdate> detectorUpdates;
private final ModelPlotConfig modelPlotConfig;
private final AnalysisLimits analysisLimits;
private final Long renormalizationWindowDays;
private final TimeValue backgroundPersistInterval;
private final Long modelSnapshotRetentionDays;
private final Long resultsRetentionDays;
private final List<String> categorizationFilters;
private final Map<String, Object> customSettings;
private JobUpdate(String jobId, @Nullable List<String> groups, @Nullable String description,
@Nullable List<DetectorUpdate> detectorUpdates, @Nullable ModelPlotConfig modelPlotConfig,
@Nullable AnalysisLimits analysisLimits, @Nullable TimeValue backgroundPersistInterval,
@Nullable Long renormalizationWindowDays, @Nullable Long resultsRetentionDays,
@Nullable Long modelSnapshotRetentionDays, @Nullable List<String> categorisationFilters,
@Nullable Map<String, Object> customSettings) {
this.jobId = jobId;
this.groups = groups;
this.description = description;
this.detectorUpdates = detectorUpdates;
this.modelPlotConfig = modelPlotConfig;
this.analysisLimits = analysisLimits;
this.renormalizationWindowDays = renormalizationWindowDays;
this.backgroundPersistInterval = backgroundPersistInterval;
this.modelSnapshotRetentionDays = modelSnapshotRetentionDays;
this.resultsRetentionDays = resultsRetentionDays;
this.categorizationFilters = categorisationFilters;
this.customSettings = customSettings;
}
public String getJobId() {
return jobId;
}
public List<String> getGroups() {
return groups;
}
public String getDescription() {
return description;
}
public List<DetectorUpdate> getDetectorUpdates() {
return detectorUpdates;
}
public ModelPlotConfig getModelPlotConfig() {
return modelPlotConfig;
}
public AnalysisLimits getAnalysisLimits() {
return analysisLimits;
}
public Long getRenormalizationWindowDays() {
return renormalizationWindowDays;
}
public TimeValue getBackgroundPersistInterval() {
return backgroundPersistInterval;
}
public Long getModelSnapshotRetentionDays() {
return modelSnapshotRetentionDays;
}
public Long getResultsRetentionDays() {
return resultsRetentionDays;
}
public List<String> getCategorizationFilters() {
return categorizationFilters;
}
public Map<String, Object> getCustomSettings() {
return customSettings;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(Job.ID.getPreferredName(), jobId);
if (groups != null) {
builder.field(Job.GROUPS.getPreferredName(), groups);
}
if (description != null) {
builder.field(Job.DESCRIPTION.getPreferredName(), description);
}
if (detectorUpdates != null) {
builder.field(DETECTORS.getPreferredName(), detectorUpdates);
}
if (modelPlotConfig != null) {
builder.field(Job.MODEL_PLOT_CONFIG.getPreferredName(), modelPlotConfig);
}
if (analysisLimits != null) {
builder.field(Job.ANALYSIS_LIMITS.getPreferredName(), analysisLimits);
}
if (renormalizationWindowDays != null) {
builder.field(Job.RENORMALIZATION_WINDOW_DAYS.getPreferredName(), renormalizationWindowDays);
}
if (backgroundPersistInterval != null) {
builder.field(Job.BACKGROUND_PERSIST_INTERVAL.getPreferredName(), backgroundPersistInterval);
}
if (modelSnapshotRetentionDays != null) {
builder.field(Job.MODEL_SNAPSHOT_RETENTION_DAYS.getPreferredName(), modelSnapshotRetentionDays);
}
if (resultsRetentionDays != null) {
builder.field(Job.RESULTS_RETENTION_DAYS.getPreferredName(), resultsRetentionDays);
}
if (categorizationFilters != null) {
builder.field(AnalysisConfig.CATEGORIZATION_FILTERS.getPreferredName(), categorizationFilters);
}
if (customSettings != null) {
builder.field(Job.CUSTOM_SETTINGS.getPreferredName(), customSettings);
}
builder.endObject();
return builder;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other == null || getClass() != other.getClass()) {
return false;
}
JobUpdate that = (JobUpdate) other;
return Objects.equals(this.jobId, that.jobId)
&& Objects.equals(this.groups, that.groups)
&& Objects.equals(this.description, that.description)
&& Objects.equals(this.detectorUpdates, that.detectorUpdates)
&& Objects.equals(this.modelPlotConfig, that.modelPlotConfig)
&& Objects.equals(this.analysisLimits, that.analysisLimits)
&& Objects.equals(this.renormalizationWindowDays, that.renormalizationWindowDays)
&& Objects.equals(this.backgroundPersistInterval, that.backgroundPersistInterval)
&& Objects.equals(this.modelSnapshotRetentionDays, that.modelSnapshotRetentionDays)
&& Objects.equals(this.resultsRetentionDays, that.resultsRetentionDays)
&& Objects.equals(this.categorizationFilters, that.categorizationFilters)
&& Objects.equals(this.customSettings, that.customSettings);
}
@Override
public int hashCode() {
return Objects.hash(jobId, groups, description, detectorUpdates, modelPlotConfig, analysisLimits, renormalizationWindowDays,
backgroundPersistInterval, modelSnapshotRetentionDays, resultsRetentionDays, categorizationFilters, customSettings);
}
public static class DetectorUpdate implements ToXContentObject {
@SuppressWarnings("unchecked")
public static final ConstructingObjectParser<DetectorUpdate, Void> PARSER =
new ConstructingObjectParser<>("detector_update", true, a -> new DetectorUpdate((int) a[0], (String) a[1],
(List<DetectionRule>) a[2]));
static {
PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), Detector.DETECTOR_INDEX);
PARSER.declareStringOrNull(ConstructingObjectParser.optionalConstructorArg(), Job.DESCRIPTION);
PARSER.declareObjectArray(ConstructingObjectParser.optionalConstructorArg(), (parser, parseFieldMatcher) ->
DetectionRule.PARSER.apply(parser, parseFieldMatcher).build(), Detector.CUSTOM_RULES_FIELD);
}
private final int detectorIndex;
private final String description;
private final List<DetectionRule> rules;
/**
* A detector update to apply to the Machine Learning Job
*
* @param detectorIndex The identifier of the detector to update.
* @param description The new description for the detector.
* @param rules The new list of rules for the detector.
*/
public DetectorUpdate(int detectorIndex, String description, List<DetectionRule> rules) {
this.detectorIndex = detectorIndex;
this.description = description;
this.rules = rules;
}
public int getDetectorIndex() {
return detectorIndex;
}
public String getDescription() {
return description;
}
public List<DetectionRule> getRules() {
return rules;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(Detector.DETECTOR_INDEX.getPreferredName(), detectorIndex);
if (description != null) {
builder.field(Job.DESCRIPTION.getPreferredName(), description);
}
if (rules != null) {
builder.field(Detector.CUSTOM_RULES_FIELD.getPreferredName(), rules);
}
builder.endObject();
return builder;
}
@Override
public int hashCode() {
return Objects.hash(detectorIndex, description, rules);
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other == null || getClass() != other.getClass()) {
return false;
}
DetectorUpdate that = (DetectorUpdate) other;
return this.detectorIndex == that.detectorIndex && Objects.equals(this.description, that.description)
&& Objects.equals(this.rules, that.rules);
}
}
public static class Builder {
private final String jobId;
private List<String> groups;
private String description;
private List<DetectorUpdate> detectorUpdates;
private ModelPlotConfig modelPlotConfig;
private AnalysisLimits analysisLimits;
private Long renormalizationWindowDays;
private TimeValue backgroundPersistInterval;
private Long modelSnapshotRetentionDays;
private Long resultsRetentionDays;
private List<String> categorizationFilters;
private Map<String, Object> customSettings;
/**
* New {@link JobUpdate.Builder} object for the existing job
*
* @param jobId non-null `jobId` for referencing an exising {@link Job}
*/
public Builder(String jobId) {
this.jobId = jobId;
}
/**
* Set the job groups
*
* Updates the {@link Job#groups} setting
*
* @param groups A list of group names
*/
public Builder setGroups(List<String> groups) {
this.groups = groups;
return this;
}
/**
* Set the job description
*
* Updates the {@link Job#description} setting
*
* @param description the desired Machine Learning job description
*/
public Builder setDescription(String description) {
this.description = description;
return this;
}
/**
* The detector updates to apply to the job
*
* Updates the {@link AnalysisConfig#detectors} setting
*
* @param detectorUpdates list of {@link JobUpdate.DetectorUpdate} objects
*/
public Builder setDetectorUpdates(List<DetectorUpdate> detectorUpdates) {
this.detectorUpdates = detectorUpdates;
return this;
}
/**
* Enables/disables the model plot config setting through {@link ModelPlotConfig#enabled}
*
* Updates the {@link Job#modelPlotConfig} setting
*
* @param modelPlotConfig {@link ModelPlotConfig} object with updated fields
*/
public Builder setModelPlotConfig(ModelPlotConfig modelPlotConfig) {
this.modelPlotConfig = modelPlotConfig;
return this;
}
/**
* Sets new {@link AnalysisLimits} for the {@link Job}
*
* Updates the {@link Job#analysisLimits} setting
*
* @param analysisLimits Updates to {@link AnalysisLimits}
*/
public Builder setAnalysisLimits(AnalysisLimits analysisLimits) {
this.analysisLimits = analysisLimits;
return this;
}
/**
* Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen
*
* Updates the {@link Job#renormalizationWindowDays} setting
*
* @param renormalizationWindowDays number of renormalization window days
*/
public Builder setRenormalizationWindowDays(Long renormalizationWindowDays) {
this.renormalizationWindowDays = renormalizationWindowDays;
return this;
}
/**
* Advanced configuration option. The time between each periodic persistence of the model
*
* Updates the {@link Job#backgroundPersistInterval} setting
*
* @param backgroundPersistInterval the time between background persistence
*/
public Builder setBackgroundPersistInterval(TimeValue backgroundPersistInterval) {
this.backgroundPersistInterval = backgroundPersistInterval;
return this;
}
/**
* The time in days that model snapshots are retained for the job.
*
* Updates the {@link Job#modelSnapshotRetentionDays} setting
*
* @param modelSnapshotRetentionDays number of days to keep a model snapshot
*/
public Builder setModelSnapshotRetentionDays(Long modelSnapshotRetentionDays) {
this.modelSnapshotRetentionDays = modelSnapshotRetentionDays;
return this;
}
/**
* Advanced configuration option. The number of days for which job results are retained
*
* Updates the {@link Job#resultsRetentionDays} setting
*
* @param resultsRetentionDays number of days to keep results.
*/
public Builder setResultsRetentionDays(Long resultsRetentionDays) {
this.resultsRetentionDays = resultsRetentionDays;
return this;
}
/**
* Sets the categorization filters on the {@link Job}
*
* Updates the {@link AnalysisConfig#categorizationFilters} setting.
* Requires {@link AnalysisConfig#categorizationFieldName} to have been set on the existing Job.
*
* @param categorizationFilters list of categorization filters for the Job's {@link AnalysisConfig}
*/
public Builder setCategorizationFilters(List<String> categorizationFilters) {
this.categorizationFilters = categorizationFilters;
return this;
}
/**
* Contains custom meta data about the job.
*
* Updates the {@link Job#customSettings} setting
*
* @param customSettings custom settings map for the job
*/
public Builder setCustomSettings(Map<String, Object> customSettings) {
this.customSettings = customSettings;
return this;
}
public JobUpdate build() {
return new JobUpdate(jobId, groups, description, detectorUpdates, modelPlotConfig, analysisLimits, backgroundPersistInterval,
renormalizationWindowDays, resultsRetentionDays, modelSnapshotRetentionDays, categorizationFilters, customSettings);
}
}
}

View File

@ -0,0 +1,67 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import org.apache.http.client.methods.HttpGet;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.TermQueryBuilder;
import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest;
import org.elasticsearch.protocol.xpack.graph.Hop;
import org.elasticsearch.test.ESTestCase;
import org.junit.Assert;
import java.util.HashMap;
import java.util.Map;
import static org.hamcrest.Matchers.is;
public class GrapRequestConvertersTests extends ESTestCase{
public void testGraphExplore() throws Exception {
Map<String, String> expectedParams = new HashMap<>();
GraphExploreRequest graphExploreRequest = new GraphExploreRequest();
graphExploreRequest.sampleDiversityField("diversity");
graphExploreRequest.indices("index1", "index2");
graphExploreRequest.types("type1", "type2");
int timeout = ESTestCase.randomIntBetween(10000, 20000);
graphExploreRequest.timeout(TimeValue.timeValueMillis(timeout));
graphExploreRequest.useSignificance(ESTestCase.randomBoolean());
int numHops = ESTestCase.randomIntBetween(1, 5);
for (int i = 0; i < numHops; i++) {
int hopNumber = i + 1;
QueryBuilder guidingQuery = null;
if (ESTestCase.randomBoolean()) {
guidingQuery = new TermQueryBuilder("field" + hopNumber, "value" + hopNumber);
}
Hop hop = graphExploreRequest.createNextHop(guidingQuery);
hop.addVertexRequest("field" + hopNumber);
hop.getVertexRequest(0).addInclude("value" + hopNumber, hopNumber);
}
Request request = GraphRequestConverters.explore(graphExploreRequest);
Assert.assertEquals(HttpGet.METHOD_NAME, request.getMethod());
Assert.assertEquals("/index1,index2/type1,type2/_xpack/graph/_explore", request.getEndpoint());
Assert.assertEquals(expectedParams, request.getParameters());
Assert.assertThat(request.getEntity().getContentType().getValue(), is(XContentType.JSON.mediaTypeWithoutParameters()));
RequestConvertersTests.assertToXContentBody(graphExploreRequest, request.getEntity());
}
}

View File

@ -34,9 +34,12 @@ import org.elasticsearch.client.ml.GetOverallBucketsRequest;
import org.elasticsearch.client.ml.GetRecordsRequest;
import org.elasticsearch.client.ml.OpenJobRequest;
import org.elasticsearch.client.ml.PutJobRequest;
import org.elasticsearch.client.ml.UpdateJobRequest;
import org.elasticsearch.client.ml.job.config.AnalysisConfig;
import org.elasticsearch.client.ml.job.config.Detector;
import org.elasticsearch.client.ml.job.config.Job;
import org.elasticsearch.client.ml.job.config.JobUpdate;
import org.elasticsearch.client.ml.job.config.JobUpdateTests;
import org.elasticsearch.client.ml.job.util.PageParams;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentParser;
@ -166,6 +169,20 @@ public class MLRequestConvertersTests extends ESTestCase {
requestEntityToString(request));
}
public void testUpdateJob() throws Exception {
String jobId = randomAlphaOfLength(10);
JobUpdate updates = JobUpdateTests.createRandom(jobId);
UpdateJobRequest updateJobRequest = new UpdateJobRequest(updates);
Request request = MLRequestConverters.updateJob(updateJobRequest);
assertEquals(HttpPost.METHOD_NAME, request.getMethod());
assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/_update", request.getEndpoint());
try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) {
JobUpdate.Builder parsedRequest = JobUpdate.PARSER.apply(parser, null);
assertThat(parsedRequest.build(), equalTo(updates));
}
}
public void testGetBuckets() throws IOException {
String jobId = randomAlphaOfLength(10);
GetBucketsRequest getBucketsRequest = new GetBucketsRequest(jobId);

View File

@ -20,6 +20,8 @@ package org.elasticsearch.client;
import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator;
import org.elasticsearch.ElasticsearchStatusException;
import org.elasticsearch.client.ml.UpdateJobRequest;
import org.elasticsearch.client.ml.job.config.JobUpdate;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.client.ml.GetJobStatsRequest;
import org.elasticsearch.client.ml.GetJobStatsResponse;
@ -218,6 +220,23 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase {
assertThat(exception.status().getStatus(), equalTo(404));
}
public void testUpdateJob() throws Exception {
String jobId = randomValidJobId();
Job job = buildJob(jobId);
MachineLearningClient machineLearningClient = highLevelClient().machineLearning();
machineLearningClient.putJob(new PutJobRequest(job), RequestOptions.DEFAULT);
UpdateJobRequest request = new UpdateJobRequest(new JobUpdate.Builder(jobId).setDescription("Updated description").build());
PutJobResponse response = execute(request, machineLearningClient::updateJob, machineLearningClient::updateJobAsync);
assertEquals("Updated description", response.getResponse().getDescription());
GetJobRequest getRequest = new GetJobRequest(jobId);
GetJobResponse getResponse = machineLearningClient.getJob(getRequest, RequestOptions.DEFAULT);
assertEquals("Updated description", getResponse.jobs().get(0).getDescription());
}
public static String randomValidJobId() {
CodepointSetGenerator generator = new CodepointSetGenerator("abcdefghijklmnopqrstuvwxyz0123456789".toCharArray());
return generator.ofCodePointsLength(random(), 10, 10);

View File

@ -29,8 +29,6 @@ import org.apache.http.entity.ByteArrayEntity;
import org.apache.http.util.EntityUtils;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest;
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
@ -118,7 +116,6 @@ import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.index.RandomCreateIndexGenerator;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.index.query.TermQueryBuilder;
import org.elasticsearch.index.rankeval.PrecisionAtK;
@ -137,8 +134,6 @@ import org.elasticsearch.protocol.xpack.indexlifecycle.StartILMRequest;
import org.elasticsearch.protocol.xpack.indexlifecycle.StopILMRequest;
import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoRequest;
import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest;
import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest;
import org.elasticsearch.protocol.xpack.graph.Hop;
import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest;
import org.elasticsearch.repositories.fs.FsRepository;
import org.elasticsearch.rest.action.search.RestSearchAction;
@ -156,7 +151,6 @@ import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder;
import org.elasticsearch.search.rescore.QueryRescorerBuilder;
import org.elasticsearch.search.suggest.SuggestBuilder;
import org.elasticsearch.search.suggest.completion.CompletionSuggestionBuilder;
import org.elasticsearch.tasks.TaskId;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.RandomObjects;
@ -196,7 +190,6 @@ import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.Matchers.hasEntry;
import static org.hamcrest.Matchers.hasKey;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue;
public class RequestConvertersTests extends ESTestCase {
@ -2012,83 +2005,6 @@ public class RequestConvertersTests extends ESTestCase {
assertEquals(expectedParams, request.getParameters());
}
public void testCancelTasks() {
CancelTasksRequest request = new CancelTasksRequest();
Map<String, String> expectedParams = new HashMap<>();
TaskId taskId = new TaskId(randomAlphaOfLength(5), randomNonNegativeLong());
TaskId parentTaskId = new TaskId(randomAlphaOfLength(5), randomNonNegativeLong());
request.setTaskId(taskId);
request.setParentTaskId(parentTaskId);
expectedParams.put("task_id", taskId.toString());
expectedParams.put("parent_task_id", parentTaskId.toString());
Request httpRequest = RequestConverters.cancelTasks(request);
assertThat(httpRequest, notNullValue());
assertThat(httpRequest.getMethod(), equalTo(HttpPost.METHOD_NAME));
assertThat(httpRequest.getEntity(), nullValue());
assertThat(httpRequest.getEndpoint(), equalTo("/_tasks/_cancel"));
assertThat(httpRequest.getParameters(), equalTo(expectedParams));
}
public void testListTasks() {
{
ListTasksRequest request = new ListTasksRequest();
Map<String, String> expectedParams = new HashMap<>();
if (randomBoolean()) {
request.setDetailed(randomBoolean());
if (request.getDetailed()) {
expectedParams.put("detailed", "true");
}
}
if (randomBoolean()) {
request.setWaitForCompletion(randomBoolean());
if (request.getWaitForCompletion()) {
expectedParams.put("wait_for_completion", "true");
}
}
if (randomBoolean()) {
String timeout = randomTimeValue();
request.setTimeout(timeout);
expectedParams.put("timeout", timeout);
}
if (randomBoolean()) {
if (randomBoolean()) {
TaskId taskId = new TaskId(randomAlphaOfLength(5), randomNonNegativeLong());
request.setParentTaskId(taskId);
expectedParams.put("parent_task_id", taskId.toString());
} else {
request.setParentTask(TaskId.EMPTY_TASK_ID);
}
}
if (randomBoolean()) {
String[] nodes = generateRandomStringArray(10, 8, false);
request.setNodes(nodes);
if (nodes.length > 0) {
expectedParams.put("nodes", String.join(",", nodes));
}
}
if (randomBoolean()) {
String[] actions = generateRandomStringArray(10, 8, false);
request.setActions(actions);
if (actions.length > 0) {
expectedParams.put("actions", String.join(",", actions));
}
}
expectedParams.put("group_by", "none");
Request httpRequest = RequestConverters.listTasks(request);
assertThat(httpRequest, notNullValue());
assertThat(httpRequest.getMethod(), equalTo(HttpGet.METHOD_NAME));
assertThat(httpRequest.getEntity(), nullValue());
assertThat(httpRequest.getEndpoint(), equalTo("/_tasks"));
assertThat(httpRequest.getParameters(), equalTo(expectedParams));
}
{
ListTasksRequest request = new ListTasksRequest();
request.setTaskId(new TaskId(randomAlphaOfLength(5), randomNonNegativeLong()));
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> RequestConverters.listTasks(request));
assertEquals("TaskId cannot be used for list tasks request", exception.getMessage());
}
}
public void testGetRepositories() {
Map<String, String> expectedParams = new HashMap<>();
StringBuilder endpoint = new StringBuilder("/_snapshot");
@ -2666,35 +2582,6 @@ public class RequestConvertersTests extends ESTestCase {
assertThat(bos.toString("UTF-8"), is(body));
}
public void testGraphExplore() throws Exception {
Map<String, String> expectedParams = new HashMap<>();
GraphExploreRequest graphExploreRequest = new GraphExploreRequest();
graphExploreRequest.sampleDiversityField("diversity");
graphExploreRequest.indices("index1", "index2");
graphExploreRequest.types("type1", "type2");
int timeout = randomIntBetween(10000, 20000);
graphExploreRequest.timeout(TimeValue.timeValueMillis(timeout));
graphExploreRequest.useSignificance(randomBoolean());
int numHops = randomIntBetween(1, 5);
for (int i = 0; i < numHops; i++) {
int hopNumber = i + 1;
QueryBuilder guidingQuery = null;
if (randomBoolean()) {
guidingQuery = new TermQueryBuilder("field" + hopNumber, "value" + hopNumber);
}
Hop hop = graphExploreRequest.createNextHop(guidingQuery);
hop.addVertexRequest("field" + hopNumber);
hop.getVertexRequest(0).addInclude("value" + hopNumber, hopNumber);
}
Request request = RequestConverters.xPackGraphExplore(graphExploreRequest);
assertEquals(HttpGet.METHOD_NAME, request.getMethod());
assertEquals("/index1,index2/type1,type2/_xpack/graph/_explore", request.getEndpoint());
assertEquals(expectedParams, request.getParameters());
assertThat(request.getEntity().getContentType().getValue(), is(XContentType.JSON.mediaTypeWithoutParameters()));
assertToXContentBody(graphExploreRequest, request.getEntity());
}
public void testPutLifecyclePolicy() throws Exception {
String name = randomAlphaOfLengthBetween(2, 20);
LifecyclePolicy policy = createRandomPolicy(name);

View File

@ -1034,7 +1034,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
assertTrue(explainResponse.isExists());
assertTrue(explainResponse.isMatch());
assertTrue(explainResponse.hasExplanation());
assertThat(explainResponse.getExplanation().getValue(), greaterThan(0.0f));
assertThat(explainResponse.getExplanation().getValue().floatValue(), greaterThan(0.0f));
assertNull(explainResponse.getGetResult());
}
{

View File

@ -0,0 +1,115 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost;
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest;
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
import org.elasticsearch.tasks.TaskId;
import org.elasticsearch.test.ESTestCase;
import java.util.HashMap;
import java.util.Map;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue;
public class TasksRequestConvertersTests extends ESTestCase {
public void testCancelTasks() {
CancelTasksRequest request = new CancelTasksRequest();
Map<String, String> expectedParams = new HashMap<>();
TaskId taskId = new TaskId(randomAlphaOfLength(5), randomNonNegativeLong());
TaskId parentTaskId = new TaskId(randomAlphaOfLength(5), randomNonNegativeLong());
request.setTaskId(taskId);
request.setParentTaskId(parentTaskId);
expectedParams.put("task_id", taskId.toString());
expectedParams.put("parent_task_id", parentTaskId.toString());
Request httpRequest = TasksRequestConverters.cancelTasks(request);
assertThat(httpRequest, notNullValue());
assertThat(httpRequest.getMethod(), equalTo(HttpPost.METHOD_NAME));
assertThat(httpRequest.getEntity(), nullValue());
assertThat(httpRequest.getEndpoint(), equalTo("/_tasks/_cancel"));
assertThat(httpRequest.getParameters(), equalTo(expectedParams));
}
public void testListTasks() {
{
ListTasksRequest request = new ListTasksRequest();
Map<String, String> expectedParams = new HashMap<>();
if (randomBoolean()) {
request.setDetailed(randomBoolean());
if (request.getDetailed()) {
expectedParams.put("detailed", "true");
}
}
if (randomBoolean()) {
request.setWaitForCompletion(randomBoolean());
if (request.getWaitForCompletion()) {
expectedParams.put("wait_for_completion", "true");
}
}
if (randomBoolean()) {
String timeout = randomTimeValue();
request.setTimeout(timeout);
expectedParams.put("timeout", timeout);
}
if (randomBoolean()) {
if (randomBoolean()) {
TaskId taskId = new TaskId(randomAlphaOfLength(5), randomNonNegativeLong());
request.setParentTaskId(taskId);
expectedParams.put("parent_task_id", taskId.toString());
} else {
request.setParentTask(TaskId.EMPTY_TASK_ID);
}
}
if (randomBoolean()) {
String[] nodes = generateRandomStringArray(10, 8, false);
request.setNodes(nodes);
if (nodes.length > 0) {
expectedParams.put("nodes", String.join(",", nodes));
}
}
if (randomBoolean()) {
String[] actions = generateRandomStringArray(10, 8, false);
request.setActions(actions);
if (actions.length > 0) {
expectedParams.put("actions", String.join(",", actions));
}
}
expectedParams.put("group_by", "none");
Request httpRequest = TasksRequestConverters.listTasks(request);
assertThat(httpRequest, notNullValue());
assertThat(httpRequest.getMethod(), equalTo(HttpGet.METHOD_NAME));
assertThat(httpRequest.getEntity(), nullValue());
assertThat(httpRequest.getEndpoint(), equalTo("/_tasks"));
assertThat(httpRequest.getParameters(), equalTo(expectedParams));
}
{
ListTasksRequest request = new ListTasksRequest();
request.setTaskId(new TaskId(randomAlphaOfLength(5), randomNonNegativeLong()));
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, ()
-> TasksRequestConverters.listTasks(request));
assertEquals("TaskId cannot be used for list tasks request", exception.getMessage());
}
}
}

View File

@ -51,10 +51,17 @@ import org.elasticsearch.client.ml.OpenJobRequest;
import org.elasticsearch.client.ml.OpenJobResponse;
import org.elasticsearch.client.ml.PutJobRequest;
import org.elasticsearch.client.ml.PutJobResponse;
import org.elasticsearch.client.ml.UpdateJobRequest;
import org.elasticsearch.client.ml.job.config.AnalysisConfig;
import org.elasticsearch.client.ml.job.config.AnalysisLimits;
import org.elasticsearch.client.ml.job.config.DataDescription;
import org.elasticsearch.client.ml.job.config.DetectionRule;
import org.elasticsearch.client.ml.job.config.Detector;
import org.elasticsearch.client.ml.job.config.Job;
import org.elasticsearch.client.ml.job.config.JobUpdate;
import org.elasticsearch.client.ml.job.config.ModelPlotConfig;
import org.elasticsearch.client.ml.job.config.Operator;
import org.elasticsearch.client.ml.job.config.RuleCondition;
import org.elasticsearch.client.ml.job.results.AnomalyRecord;
import org.elasticsearch.client.ml.job.results.Bucket;
import org.elasticsearch.client.ml.job.results.Influencer;
@ -66,9 +73,12 @@ import org.elasticsearch.common.xcontent.XContentType;
import org.junit.After;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
@ -372,6 +382,93 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
}
}
public void testUpdateJob() throws Exception {
RestHighLevelClient client = highLevelClient();
String jobId = "test-update-job";
Job tempJob = MachineLearningIT.buildJob(jobId);
Job job = new Job.Builder(tempJob)
.setAnalysisConfig(new AnalysisConfig.Builder(tempJob.getAnalysisConfig())
.setCategorizationFieldName("categorization-field")
.setDetector(0,
new Detector.Builder().setFieldName("total")
.setFunction("sum")
.setPartitionFieldName("mlcategory")
.setDetectorDescription(randomAlphaOfLength(10))
.build()))
.build();
client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT);
{
List<DetectionRule> detectionRules = Arrays.asList(
new DetectionRule.Builder(Arrays.asList(RuleCondition.createTime(Operator.GT, 100L))).build());
Map<String, Object> customSettings = new HashMap<>();
customSettings.put("custom-setting-1", "custom-value");
//tag::x-pack-ml-update-job-detector-options
JobUpdate.DetectorUpdate detectorUpdate = new JobUpdate.DetectorUpdate(0, //<1>
"detector description", //<2>
detectionRules); //<3>
//end::x-pack-ml-update-job-detector-options
//tag::x-pack-ml-update-job-options
JobUpdate update = new JobUpdate.Builder(jobId) //<1>
.setDescription("My description") //<2>
.setAnalysisLimits(new AnalysisLimits(1000L, null)) //<3>
.setBackgroundPersistInterval(TimeValue.timeValueHours(3)) //<4>
.setCategorizationFilters(Arrays.asList("categorization-filter")) //<5>
.setDetectorUpdates(Arrays.asList(detectorUpdate)) //<6>
.setGroups(Arrays.asList("job-group-1")) //<7>
.setResultsRetentionDays(10L) //<8>
.setModelPlotConfig(new ModelPlotConfig(true, null)) //<9>
.setModelSnapshotRetentionDays(7L) //<10>
.setCustomSettings(customSettings) //<11>
.setRenormalizationWindowDays(3L) //<12>
.build();
//end::x-pack-ml-update-job-options
//tag::x-pack-ml-update-job-request
UpdateJobRequest updateJobRequest = new UpdateJobRequest(update); //<1>
//end::x-pack-ml-update-job-request
//tag::x-pack-ml-update-job-execute
PutJobResponse updateJobResponse = client.machineLearning().updateJob(updateJobRequest, RequestOptions.DEFAULT);
//end::x-pack-ml-update-job-execute
//tag::x-pack-ml-update-job-response
Job updatedJob = updateJobResponse.getResponse(); //<1>
//end::x-pack-ml-update-job-response
assertEquals(update.getDescription(), updatedJob.getDescription());
}
{
//tag::x-pack-ml-update-job-listener
ActionListener<PutJobResponse> listener = new ActionListener<PutJobResponse>() {
@Override
public void onResponse(PutJobResponse updateJobResponse) {
//<1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
};
//end::x-pack-ml-update-job-listener
UpdateJobRequest updateJobRequest = new UpdateJobRequest(new JobUpdate.Builder(jobId).build());
// Replace the empty listener by a blocking listener in test
final CountDownLatch latch = new CountDownLatch(1);
listener = new LatchedActionListener<>(listener, latch);
// tag::x-pack-ml-update-job-execute-async
client.machineLearning().updateJobAsync(updateJobRequest, RequestOptions.DEFAULT, listener); //<1>
// end::x-pack-ml-update-job-execute-async
assertTrue(latch.await(30L, TimeUnit.SECONDS));
}
}
public void testGetBuckets() throws IOException, InterruptedException {
RestHighLevelClient client = highLevelClient();

View File

@ -86,7 +86,7 @@ import org.elasticsearch.search.aggregations.bucket.range.Range;
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket;
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.avg.Avg;
import org.elasticsearch.search.aggregations.metrics.Avg;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder;

View File

@ -0,0 +1,44 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.ml;
import org.elasticsearch.client.ml.job.config.JobTests;
import org.elasticsearch.client.ml.job.config.JobUpdate;
import org.elasticsearch.client.ml.job.config.JobUpdateTests;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.test.AbstractXContentTestCase;
public class UpdateJobRequestTests extends AbstractXContentTestCase<UpdateJobRequest> {
@Override
protected UpdateJobRequest createTestInstance() {
return new UpdateJobRequest(JobUpdateTests.createRandom(JobTests.randomValidJobId()));
}
@Override
protected UpdateJobRequest doParseInstance(XContentParser parser) {
return new UpdateJobRequest(JobUpdate.PARSER.apply(parser, null).build());
}
@Override
protected boolean supportsUnknownFields() {
return false;
}
}

View File

@ -30,7 +30,7 @@ import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.SearchModule;
import org.elasticsearch.search.aggregations.AggregationBuilders;
import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder;
import org.elasticsearch.search.builder.SearchSourceBuilder.ScriptField;
import org.elasticsearch.test.AbstractXContentTestCase;

View File

@ -0,0 +1,120 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.ml.job.config;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.test.AbstractXContentTestCase;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.function.Predicate;
public class JobUpdateTests extends AbstractXContentTestCase<JobUpdate> {
@Override
protected JobUpdate createTestInstance() {
return createRandom(randomAlphaOfLength(4));
}
/**
* Creates a completely random update when the job is null
* or a random update that is is valid for the given job
*/
public static JobUpdate createRandom(String jobId) {
JobUpdate.Builder update = new JobUpdate.Builder(jobId);
if (randomBoolean()) {
int groupsNum = randomIntBetween(0, 10);
List<String> groups = new ArrayList<>(groupsNum);
for (int i = 0; i < groupsNum; i++) {
groups.add(JobTests.randomValidJobId());
}
update.setGroups(groups);
}
if (randomBoolean()) {
update.setDescription(randomAlphaOfLength(20));
}
if (randomBoolean()) {
update.setDetectorUpdates(createRandomDetectorUpdates());
}
if (randomBoolean()) {
update.setModelPlotConfig(new ModelPlotConfig(randomBoolean(), randomAlphaOfLength(10)));
}
if (randomBoolean()) {
update.setAnalysisLimits(AnalysisLimitsTests.createRandomized());
}
if (randomBoolean()) {
update.setRenormalizationWindowDays(randomNonNegativeLong());
}
if (randomBoolean()) {
update.setBackgroundPersistInterval(TimeValue.timeValueHours(randomIntBetween(1, 24)));
}
if (randomBoolean()) {
update.setModelSnapshotRetentionDays(randomNonNegativeLong());
}
if (randomBoolean()) {
update.setResultsRetentionDays(randomNonNegativeLong());
}
if (randomBoolean()) {
update.setCategorizationFilters(Arrays.asList(generateRandomStringArray(10, 10, false)));
}
if (randomBoolean()) {
update.setCustomSettings(Collections.singletonMap(randomAlphaOfLength(10), randomAlphaOfLength(10)));
}
return update.build();
}
private static List<JobUpdate.DetectorUpdate> createRandomDetectorUpdates() {
int size = randomInt(10);
List<JobUpdate.DetectorUpdate> detectorUpdates = new ArrayList<>(size);
for (int i = 0; i < size; i++) {
String detectorDescription = null;
if (randomBoolean()) {
detectorDescription = randomAlphaOfLength(12);
}
List<DetectionRule> detectionRules = null;
if (randomBoolean()) {
detectionRules = new ArrayList<>();
detectionRules.add(new DetectionRule.Builder(
Collections.singletonList(new RuleCondition(RuleCondition.AppliesTo.ACTUAL, Operator.GT, 5))).build());
}
detectorUpdates.add(new JobUpdate.DetectorUpdate(i, detectorDescription, detectionRules));
}
return detectorUpdates;
}
@Override
protected JobUpdate doParseInstance(XContentParser parser) {
return JobUpdate.PARSER.apply(parser, null).build();
}
@Override
protected boolean supportsUnknownFields() {
return true;
}
@Override
protected Predicate<String> getRandomFieldsExcludeFilter() {
return field -> !field.isEmpty();
}
}

View File

@ -21,7 +21,7 @@ package org.elasticsearch.plugins;
import joptsimple.OptionSet;
import joptsimple.OptionSpec;
import org.apache.lucene.search.spell.LevensteinDistance;
import org.apache.lucene.search.spell.LevenshteinDistance;
import org.apache.lucene.util.CollectionUtil;
import org.bouncycastle.bcpg.ArmoredInputStream;
import org.bouncycastle.jce.provider.BouncyCastleProvider;
@ -355,7 +355,7 @@ class InstallPluginCommand extends EnvironmentAwareCommand {
/** Returns all the official plugin names that look similar to pluginId. **/
private List<String> checkMisspelledPlugin(String pluginId) {
LevensteinDistance ld = new LevensteinDistance();
LevenshteinDistance ld = new LevenshteinDistance();
List<Tuple<Float, String>> scoredKeys = new ArrayList<>();
for (String officialPlugin : OFFICIAL_PLUGINS) {
float distance = ld.getDistance(pluginId, officialPlugin);

View File

@ -1,7 +1,7 @@
:version: 7.0.0-alpha1
:major-version: 7.x
:lucene_version: 7.5.0
:lucene_version_path: 7_5_0
:lucene_version: 8.0.0
:lucene_version_path: 8_0_0
:branch: master
:jdk: 1.8.0_131
:jdk_major: 8

View File

@ -0,0 +1,93 @@
[[java-rest-high-x-pack-ml-update-job]]
=== Update Job API
The Update Job API provides the ability to update a {ml} job.
It accepts a `UpdateJobRequest` object and responds
with a `PutJobResponse` object.
[[java-rest-high-x-pack-ml-update-job-request]]
==== Update Job Request
An `UpdateJobRequest` object gets created with a `JobUpdate` object.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-update-job-request]
--------------------------------------------------
<1> Constructing a new request referencing a `JobUpdate` object
==== Optional Arguments
The `JobUpdate` object has many optional arguments with which to update an existing {ml}
job. An existing, non-null `jobId` must be referenced in its creation.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-update-job-options]
--------------------------------------------------
<1> Mandatory, non-null `jobId` referencing an existing {ml} job
<2> Updated description
<3> Updated analysis limits
<4> Updated background persistence interval
<5> Updated analysis config's categorization filters
<6> Updated detectors through the `JobUpdate.DetectorUpdate` object
<7> Updated group membership
<8> Updated result retention
<9> Updated model plot configuration
<10> Updated model snapshot retention setting
<11> Updated custom settings
<12> Updated renormalization window
Included with these options are specific optional `JobUpdate.DetectorUpdate` updates.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-update-job-detector-options]
--------------------------------------------------
<1> The index of the detector. `O` means unknown
<2> The optional description of the detector
<3> The `DetectionRule` rules that apply to this detector
[[java-rest-high-x-pack-ml-update-job-execution]]
==== Execution
The request can be executed through the `MachineLearningClient` contained
in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-update-job-execute]
--------------------------------------------------
[[java-rest-high-x-pack-ml-update-job-execution-async]]
==== Asynchronous Execution
The request can also be executed asynchronously:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-update-job-execute-async]
--------------------------------------------------
<1> The `UpdateJobRequest` to execute and the `ActionListener` to use when
the execution completes
The method does not block and returns immediately. The passed `ActionListener` is used
to notify the caller of completion. A typical `ActionListener` for `PutJobResponse` may
look like
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-update-job-listener]
--------------------------------------------------
<1> `onResponse` is called back when the action is completed successfully
<2> `onFailure` is called back when some unexpected error occurs
[[java-rest-high-x-pack-ml-update-job-response]]
==== Update Job Response
A `PutJobResponse` contains the updated `Job` object
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-update-job-response]
--------------------------------------------------
<1> `getResponse()` returns the updated `Job` object

View File

@ -216,6 +216,7 @@ The Java High Level REST Client supports the following Machine Learning APIs:
* <<java-rest-high-x-pack-ml-open-job>>
* <<java-rest-high-x-pack-ml-close-job>>
* <<java-rest-high-x-pack-ml-flush-job>>
* <<java-rest-high-x-pack-ml-update-job>>
* <<java-rest-high-x-pack-ml-get-job-stats>>
* <<java-rest-high-x-pack-ml-get-buckets>>
* <<java-rest-high-x-pack-ml-get-overall-buckets>>
@ -227,6 +228,7 @@ include::ml/get-job.asciidoc[]
include::ml/delete-job.asciidoc[]
include::ml/open-job.asciidoc[]
include::ml/close-job.asciidoc[]
include::ml/update-job.asciidoc[]
include::ml/flush-job.asciidoc[]
include::ml/get-job-stats.asciidoc[]
include::ml/get-buckets.asciidoc[]

View File

@ -38,7 +38,6 @@ PUT phonetic_sample
"my_analyzer": {
"tokenizer": "standard",
"filter": [
"standard",
"lowercase",
"my_metaphone"
]

View File

@ -320,7 +320,7 @@ Top hits response snippet with a nested hit, which resides in the first slot of
"by_nested": {
"hits": {
"total": 1,
"max_score": 0.2876821,
"max_score": 0.3616575,
"hits": [
{
"_index": "sales",
@ -330,7 +330,7 @@ Top hits response snippet with a nested hit, which resides in the first slot of
"field": "comments", <1>
"offset": 0 <2>
},
"_score": 0.2876821,
"_score": 0.3616575,
"_source": {
"comment": "This car could have better brakes", <3>
"username": "baddriver007"

View File

@ -273,7 +273,6 @@ Tokenizer::
* <<analysis-standard-tokenizer,Standard Tokenizer>>
Token Filters::
* <<analysis-standard-tokenfilter,Standard Token Filter>>
* <<analysis-lowercase-tokenfilter,Lower Case Token Filter>>
* <<analysis-stop-tokenfilter,Stop Token Filter>> (disabled by default)
@ -292,7 +291,6 @@ PUT /standard_example
"rebuilt_standard": {
"tokenizer": "standard",
"filter": [
"standard",
"lowercase" <1>
]
}

View File

@ -9,8 +9,6 @@ or add tokens (eg synonyms).
Elasticsearch has a number of built in token filters which can be
used to build <<analysis-custom-analyzer,custom analyzers>>.
include::tokenfilters/standard-tokenfilter.asciidoc[]
include::tokenfilters/asciifolding-tokenfilter.asciidoc[]
include::tokenfilters/flatten-graph-tokenfilter.asciidoc[]

View File

@ -15,7 +15,7 @@ PUT /asciifold_example
"analyzer" : {
"default" : {
"tokenizer" : "standard",
"filter" : ["standard", "asciifolding"]
"filter" : ["asciifolding"]
}
}
}
@ -37,7 +37,7 @@ PUT /asciifold_example
"analyzer" : {
"default" : {
"tokenizer" : "standard",
"filter" : ["standard", "my_ascii_folding"]
"filter" : ["my_ascii_folding"]
}
},
"filter" : {

View File

@ -16,7 +16,7 @@ PUT /elision_example
"analyzer" : {
"default" : {
"tokenizer" : "standard",
"filter" : ["standard", "elision"]
"filter" : ["elision"]
}
},
"filter" : {

View File

@ -26,7 +26,7 @@ PUT /keep_types_example
"analyzer" : {
"my_analyzer" : {
"tokenizer" : "standard",
"filter" : ["standard", "lowercase", "extract_numbers"]
"filter" : ["lowercase", "extract_numbers"]
}
},
"filter" : {
@ -87,7 +87,7 @@ PUT /keep_types_exclude_example
"analyzer" : {
"my_analyzer" : {
"tokenizer" : "standard",
"filter" : ["standard", "lowercase", "remove_numbers"]
"filter" : ["lowercase", "remove_numbers"]
}
},
"filter" : {

View File

@ -27,11 +27,11 @@ PUT /keep_words_example
"analyzer" : {
"example_1" : {
"tokenizer" : "standard",
"filter" : ["standard", "lowercase", "words_till_three"]
"filter" : ["lowercase", "words_till_three"]
},
"example_2" : {
"tokenizer" : "standard",
"filter" : ["standard", "lowercase", "words_in_file"]
"filter" : ["lowercase", "words_in_file"]
}
},
"filter" : {

View File

@ -19,7 +19,7 @@ PUT /my_index
"analyzer" : {
"my_analyzer" : {
"tokenizer" : "standard",
"filter" : ["standard", "lowercase", "my_snow"]
"filter" : ["lowercase", "my_snow"]
}
},
"filter" : {

View File

@ -1,15 +0,0 @@
[[analysis-standard-tokenfilter]]
=== Standard Token Filter
A token filter of type `standard` that normalizes tokens extracted with
the
<<analysis-standard-tokenizer,Standard
Tokenizer>>.
[TIP]
==================================================
The `standard` token filter currently does nothing. It remains as a placeholder
in case some filtering function needs to be added in a future version.
==================================================

View File

@ -13,7 +13,7 @@ PUT /my_index
"analyzer" : {
"my_analyzer" : {
"tokenizer" : "standard",
"filter" : ["standard", "lowercase", "my_stemmer"]
"filter" : ["lowercase", "my_stemmer"]
}
},
"filter" : {

View File

@ -143,13 +143,13 @@ GET index/_search
},
"hits": {
"total": 1,
"max_score": 0.80259144,
"max_score": 0.8025915,
"hits": [
{
"_index": "index",
"_type": "_doc",
"_id": "1",
"_score": 0.80259144,
"_score": 0.8025915,
"_source": {
"body": "Ski resort"
}
@ -200,13 +200,13 @@ GET index/_search
},
"hits": {
"total": 1,
"max_score": 0.80259144,
"max_score": 0.8025915,
"hits": [
{
"_index": "index",
"_type": "_doc",
"_id": "1",
"_score": 0.80259144,
"_score": 0.8025915,
"_source": {
"body": "Ski resort"
}

View File

@ -295,27 +295,27 @@ Which yields:
"details": []
},
{
"value": 2.0,
"value": 2,
"description": "field.docCount",
"details": []
},
{
"value": 4.0,
"value": 4,
"description": "field.sumDocFreq",
"details": []
},
{
"value": 5.0,
"value": 5,
"description": "field.sumTotalTermFreq",
"details": []
},
{
"value": 1.0,
"value": 1,
"description": "term.docFreq",
"details": []
},
{
"value": 2.0,
"value": 2,
"description": "term.totalTermFreq",
"details": []
},
@ -325,7 +325,7 @@ Which yields:
"details": []
},
{
"value": 3.0,
"value": 3,
"description": "doc.length",
"details": []
}
@ -469,27 +469,27 @@ GET /index/_search?explain=true
"details": []
},
{
"value": 2.0,
"value": 2,
"description": "field.docCount",
"details": []
},
{
"value": 4.0,
"value": 4,
"description": "field.sumDocFreq",
"details": []
},
{
"value": 5.0,
"value": 5,
"description": "field.sumTotalTermFreq",
"details": []
},
{
"value": 1.0,
"value": 1,
"description": "term.docFreq",
"details": []
},
{
"value": 2.0,
"value": 2,
"description": "term.totalTermFreq",
"details": []
},
@ -499,7 +499,7 @@ GET /index/_search?explain=true
"details": []
},
{
"value": 3.0,
"value": 3,
"description": "doc.length",
"details": []
}

View File

@ -446,7 +446,6 @@ PUT my_queries1
"type": "custom",
"tokenizer": "standard",
"filter": [
"standard",
"lowercase",
"wildcard_edge_ngram"
]
@ -597,7 +596,6 @@ PUT my_queries2
"type": "custom",
"tokenizer": "standard",
"filter": [
"standard",
"lowercase",
"reverse",
"wildcard_edge_ngram"
@ -607,7 +605,6 @@ PUT my_queries2
"type": "custom",
"tokenizer": "standard",
"filter": [
"standard",
"lowercase",
"reverse"
]

View File

@ -22,3 +22,7 @@ The `delimited_payload_filter` was deprecated and renamed to `delimited_payload`
Using it in indices created before 7.0 will issue deprecation warnings. Using the old
name in new indices created in 7.0 will throw an error. Use the new name `delimited_payload`
instead.
==== `standard` filter has been removed
The `standard` token filter has been removed because it doesn't change anything in the stream.

View File

@ -12,3 +12,9 @@
The `prepareExecute` method which created a request builder has been
removed from the client api. Instead, construct a builder for the
appropriate request directly.
=== Some Aggregation classes have moved packages
* All classes present in `org.elasticsearch.search.aggregations.metrics.*` packages
were moved to a single `org.elasticsearch.search.aggregations.metrics` package.

View File

@ -0,0 +1,78 @@
[role="xpack"]
[testenv="platinum"]
[[ml-delete-forecast]]
=== Delete Forecast API
++++
<titleabbrev>Delete Forecast</titleabbrev>
++++
Deletes forecasts from a {ml} job.
==== Request
`DELETE _xpack/ml/anomaly_detectors/<job_id>/_forecast` +
`DELETE _xpack/ml/anomaly_detectors/<job_id>/_forecast/<forecast_id>` +
`DELETE _xpack/ml/anomaly_detectors/<job_id>/_forecast/_all`
==== Description
By default, forecasts are retained for 14 days. You can specify a different
retention period with the `expires_in` parameter in the <<ml-forecast,forecast jobs API>>. The delete forecast API enables you to delete one or more forecasts before they expire.
NOTE: When you delete a job its associated forecasts are deleted.
For more information, see {stack-ov}/ml-overview.html#ml-forecasting[Forecasting the Future].
==== Path Parameters
`job_id` (required)::
(string) Identifier for the job.
`forecast_id`::
(string) A comma-separated list of forecast identifiers.
If you do not specify this optional parameter or if you specify `_all`, the
API deletes all forecasts from the job.
==== Request Parameters
`allow_no_forecasts`::
(boolean) Specifies whether an error occurs when there are no forecasts. In
particular, if this parameter is set to `false` and there are no forecasts
associated with the job, attempts to delete all forecasts return an error.
The default value is `true`.
`timeout`::
(time units) Specifies the period of time to wait for the completion of the
delete operation. When this period of time elapses, the API fails and returns
an error. The default value is `30s`. For more information about time units,
see <<time-units>>.
==== Authorization
You must have `manage_ml`, or `manage` cluster privileges to use this API.
For more information, see {stack-ov}/security-privileges.html[Security Privileges].
==== Examples
The following example deletes all forecasts from the `total-requests` job:
[source,js]
--------------------------------------------------
DELETE _xpack/ml/anomaly_detectors/total-requests/_forecast/_all
--------------------------------------------------
// CONSOLE
// TEST[skip:setup:server_metrics_openjob]
If the request does not encounter errors, you receive the following result:
[source,js]
----
{
"acknowledged": true
}
----
// NOTCONSOLE

View File

@ -48,7 +48,7 @@ machine learning APIs and in advanced job configuration options in Kibana.
* <<ml-flush-job,Flush job>>
* <<ml-post-data,Post data to job>>
* <<ml-update-job,Update job>>
* <<ml-forecast,Forecast job behavior>>
* <<ml-forecast,Forecast job behavior>>, <<ml-delete-forecast,Delete forecasts>>
[float]
[[ml-api-snapshot-endpoint]]
@ -85,6 +85,7 @@ include::delete-calendar.asciidoc[]
include::delete-datafeed.asciidoc[]
include::delete-calendar-event.asciidoc[]
include::delete-filter.asciidoc[]
include::delete-forecast.asciidoc[]
include::delete-job.asciidoc[]
include::delete-calendar-job.asciidoc[]
include::delete-snapshot.asciidoc[]

View File

@ -555,3 +555,8 @@ See <<commands>>.
See <<api-definitions>>.
[role="exclude",id="analysis-standard-tokenfilter"]
=== Standard filter removed
The standard token filter has been removed.

View File

@ -30,62 +30,67 @@ This will yield the following result:
[source,js]
--------------------------------------------------
{
"_index": "twitter",
"_type": "_doc",
"_id": "0",
"matched": true,
"explanation": {
"value": 1.6943599,
"description": "weight(message:elasticsearch in 0) [PerFieldSimilarity], result of:",
"details": [
"_index":"twitter",
"_type":"_doc",
"_id":"0",
"matched":true,
"explanation":{
"value":1.6943597,
"description":"weight(message:elasticsearch in 0) [PerFieldSimilarity], result of:",
"details":[
{
"value": 1.6943599,
"description": "score(doc=0,freq=1.0 = termFreq=1.0\n), product of:",
"details": [
"value":1.6943597,
"description":"score(freq=1.0), product of:",
"details":[
{
"value": 1.3862944,
"description": "idf, computed as log(1 + (docCount - docFreq + 0.5) / (docFreq + 0.5)) from:",
"details": [
{
"value": 1.0,
"description": "docFreq",
"details": []
},
{
"value": 5.0,
"description": "docCount",
"details": []
}
]
"value":2.2,
"description":"scaling factor, k1 + 1",
"details":[]
},
{
"value": 1.2222223,
"description": "tfNorm, computed as (freq * (k1 + 1)) / (freq + k1 * (1 - b + b * fieldLength / avgFieldLength)) from:",
"details": [
{
"value":1.3862944,
"description":"idf, computed as log(1 + (N - n + 0.5) / (n + 0.5)) from:",
"details":[
{
"value": 1.0,
"description": "termFreq=1.0",
"details": []
"value":1,
"description":"n, number of documents containing term",
"details":[]
},
{
"value": 1.2,
"description": "parameter k1",
"details": []
"value":5,
"description":"N, total number of documents with field",
"details":[]
}
]
},
{
"value":0.5555555,
"description":"tf, computed as freq / (freq + k1 * (1 - b + b * dl / avgdl)) from:",
"details":[
{
"value":1.0,
"description":"freq, occurrences of term within document",
"details":[]
},
{
"value": 0.75,
"description": "parameter b",
"details": []
"value":1.2,
"description":"k1, term saturation parameter",
"details":[]
},
{
"value": 5.4,
"description": "avgFieldLength",
"details": []
"value":0.75,
"description":"b, length normalization parameter",
"details":[]
},
{
"value": 3.0,
"description": "fieldLength",
"details": []
"value":3.0,
"description":"dl, length of field",
"details":[]
},
{
"value":5.4,
"description":"avgdl, average length of field",
"details":[]
}
]
}

View File

@ -72,7 +72,11 @@ This will yield the following result:
"next_doc": 53876,
"next_doc_count": 5,
"advance": 0,
"advance_count": 0
"advance_count": 0,
"compute_max_score": 0,
"compute_max_score_count": 0,
"shallow_advance": 0,
"shallow_advance_count": 0
},
"children": [
{
@ -91,7 +95,11 @@ This will yield the following result:
"next_doc": 10111,
"next_doc_count": 5,
"advance": 0,
"advance_count": 0
"advance_count": 0,
"compute_max_score": 0,
"compute_max_score_count": 0,
"shallow_advance": 0,
"shallow_advance_count": 0
}
},
{
@ -110,7 +118,11 @@ This will yield the following result:
"next_doc": 2852,
"next_doc_count": 5,
"advance": 0,
"advance_count": 0
"advance_count": 0,
"compute_max_score": 0,
"compute_max_score_count": 0,
"shallow_advance": 0,
"shallow_advance_count": 0
}
}
]
@ -288,7 +300,11 @@ The `breakdown` component lists detailed timing statistics about low-level Lucen
"next_doc": 53876,
"next_doc_count": 5,
"advance": 0,
"advance_count": 0
"advance_count": 0,
"compute_max_score": 0,
"compute_max_score_count": 0,
"shallow_advance": 0,
"shallow_advance_count": 0
}
--------------------------------------------------
// TESTRESPONSE[s/^/{\n"took": $body.took,\n"timed_out": $body.timed_out,\n"_shards": $body._shards,\n"hits": $body.hits,\n"profile": {\n"shards": [ {\n"id": "$body.$_path",\n"searches": [{\n"query": [{\n"type": "BooleanQuery",\n"description": "message:some message:number",\n"time_in_nanos": $body.$_path,/]
@ -548,7 +564,11 @@ And the response:
"score_count": 1,
"build_scorer": 377872,
"advance": 0,
"advance_count": 0
"advance_count": 0,
"compute_max_score": 0,
"compute_max_score_count": 0,
"shallow_advance": 0,
"shallow_advance_count": 0
}
},
{
@ -567,7 +587,11 @@ And the response:
"score_count": 1,
"build_scorer": 112551,
"advance": 0,
"advance_count": 0
"advance_count": 0,
"compute_max_score": 0,
"compute_max_score_count": 0,
"shallow_advance": 0,
"shallow_advance_count": 0
}
}
],

View File

@ -265,19 +265,19 @@ Response not included in text but tested for completeness sake.
...,
"hits": {
"total": 1,
"max_score": 1.0444683,
"max_score": 1.0444684,
"hits": [
{
"_index": "test",
"_type": "_doc",
"_id": "1",
"_score": 1.0444683,
"_score": 1.0444684,
"_source": ...,
"inner_hits": {
"comments": { <1>
"hits": {
"total": 1,
"max_score": 1.0444683,
"max_score": 1.0444684,
"hits": [
{
"_index": "test",
@ -287,7 +287,7 @@ Response not included in text but tested for completeness sake.
"field": "comments",
"offset": 1
},
"_score": 1.0444683,
"_score": 1.0444684,
"fields": {
"comments.text.keyword": [
"words words words"

View File

@ -33,12 +33,12 @@ PUT test
"trigram": {
"type": "custom",
"tokenizer": "standard",
"filter": ["standard", "shingle"]
"filter": ["shingle"]
},
"reverse": {
"type": "custom",
"tokenizer": "standard",
"filter": ["standard", "reverse"]
"filter": ["reverse"]
}
},
"filter": {

View File

@ -0,0 +1,168 @@
[role="xpack"]
[testenv="basic"]
[[sql-functions-aggs]]
=== Aggregate Functions
Functions for computing a _single_ result from a set of input values.
{es-sql} supports aggregate functions only alongside <<sql-syntax-group-by,grouping>> (implicit or explicit).
==== General Purpose
[[sql-functions-aggs-avg]]
===== `AVG`
*Input*: Numeric, *Output*: `double`
https://en.wikipedia.org/wiki/Arithmetic_mean[Average] (arithmetic mean) of input values.
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[aggAvg]
----
[[sql-functions-aggs-count]]
===== `COUNT`
*Input*: Any, *Output*: `bigint`
Total number (count) of input values.
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[aggCountStar]
----
[[sql-functions-aggs-count-distinct]]
===== `COUNT(DISTINCT)`
*Input*: Any, *Output*: `bigint`
Total number of _distinct_ values in input values.
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[aggCountDistinct]
----
[[sql-functions-aggs-max]]
===== `MAX`
*Input*: Numeric, *Output*: Same as input
Maximum value across input values.
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[aggMax]
----
[[sql-functions-aggs-min]]
===== `MIN`
*Input*: Numeric, *Output*: Same as input
Minimum value across input values.
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[aggMin]
----
[[sql-functions-aggs-sum]]
===== `SUM`
*Input*: Numeric, *Output*: `bigint` for integer input, `double` for floating points
Sum of input values.
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[aggSum]
----
==== Statistics
[[sql-functions-aggs-kurtosis]]
===== `KURTOSIS`
*Input*: Numeric, *Output*: `double`
https://en.wikipedia.org/wiki/Kurtosis[Quantify] the shape of the distribution of input values.
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[aggKurtosis]
----
[[sql-functions-aggs-percentile]]
===== `PERCENTILE`
*Input*: Numeric, *Output*: `double`
The nth https://en.wikipedia.org/wiki/Percentile[percentile] of input values.
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[aggPercentile]
----
[[sql-functions-aggs-percentile-rank]]
===== `PERCENTILE_RANK`
*Input*: Numeric, *Output*: `double`
The https://en.wikipedia.org/wiki/Percentile_rank[percentile rank] of input values of input values.
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[aggPercentileRank]
----
[[sql-functions-aggs-skewness]]
===== `SKEWNESS`
*Input*: Numeric, *Output*: `double`
https://en.wikipedia.org/wiki/Skewness[Quantify] the asymmetric distribution of input values.
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[aggSkewness]
----
[[sql-functions-aggs-stddev-pop]]
===== `STDDEV_POP`
*Input*: Numeric, *Output*: `double`
https://en.wikipedia.org/wiki/Standard_deviations[Population standard deviation] of input values.
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[aggStddevPop]
----
[[sql-functions-aggs-sum-squares]]
===== `SUM_OF_SQUARES`
*Input*: Numeric, *Output*: `double`
https://en.wikipedia.org/wiki/Total_sum_of_squares[Sum of squares] of input values.
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[aggSumOfSquares]
----
[[sql-functions-aggs-var-pop]]
===== `VAR_POP`
*Input*: Numeric, *Output*: `double`
https://en.wikipedia.org/wiki/Variance[Population] variance of input values.
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[aggVarPop]
----

View File

@ -0,0 +1,94 @@
[role="xpack"]
[testenv="basic"]
[[sql-functions-datetime]]
=== Date and Time Functions
* Extract the year from a date (`YEAR`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/datetime.csv-spec[year]
--------------------------------------------------
* Extract the month of the year from a date (`MONTH_OF_YEAR` or `MONTH`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/datetime.csv-spec[monthOfYear]
--------------------------------------------------
* Extract the week of the year from a date (`WEEK_OF_YEAR` or `WEEK`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/datetime.csv-spec[weekOfYear]
--------------------------------------------------
* Extract the day of the year from a date (`DAY_OF_YEAR` or `DOY`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/datetime.csv-spec[dayOfYear]
--------------------------------------------------
* Extract the day of the month from a date (`DAY_OF_MONTH`, `DOM`, or `DAY`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/datetime.csv-spec[dayOfMonth]
--------------------------------------------------
* Extract the day of the week from a date (`DAY_OF_WEEK` or `DOW`).
Monday is `1`, Tuesday is `2`, etc.
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/datetime.csv-spec[dayOfWeek]
--------------------------------------------------
* Extract the hour of the day from a date (`HOUR_OF_DAY` or `HOUR`).
Monday is `1`, Tuesday is `2`, etc.
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/datetime.csv-spec[hourOfDay]
--------------------------------------------------
* Extract the minute of the day from a date (`MINUTE_OF_DAY`).
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/datetime.csv-spec[minuteOfDay]
--------------------------------------------------
* Extract the minute of the hour from a date (`MINUTE_OF_HOUR`, `MINUTE`).
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/datetime.csv-spec[minuteOfHour]
--------------------------------------------------
* Extract the second of the minute from a date (`SECOND_OF_MINUTE`, `SECOND`).
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/datetime.csv-spec[secondOfMinute]
--------------------------------------------------
* Extract
As an alternative, one can support `EXTRACT` to extract fields from datetimes.
You can run any <<sql-functions-datetime,datetime function>>
with `EXTRACT(<datetime_function> FROM <expression>)`. So
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/datetime.csv-spec[extractDayOfYear]
--------------------------------------------------
is the equivalent to
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/datetime.csv-spec[dayOfYear]
--------------------------------------------------

View File

@ -3,416 +3,20 @@
[[sql-functions]]
== Functions and Operators
{es-sql} provides a number of built-in operators and functions.
=== Comparison Operators
{es-sql} supports the following comparison operators:
* Equality (`=`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/filter.sql-spec[whereFieldEquality]
--------------------------------------------------
* Inequality (`<>` or `!=` or `<=>`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/filter.sql-spec[whereFieldNonEquality]
--------------------------------------------------
* Comparison (`<`, `<=`, `>`, `>=`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/filter.sql-spec[whereFieldLessThan]
--------------------------------------------------
* `BETWEEN`
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/filter.sql-spec[whereBetween]
--------------------------------------------------
* `IS NULL`/`IS NOT NULL`
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/filter.sql-spec[whereIsNotNullAndIsNull]
--------------------------------------------------
=== Logical Operators
{es-sql} supports the following logical operators:
* `AND`
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/filter.sql-spec[whereFieldAndComparison]
--------------------------------------------------
* `OR`
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/filter.sql-spec[whereFieldOrComparison]
--------------------------------------------------
* `NOT`
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/filter.sql-spec[whereFieldEqualityNot]
--------------------------------------------------
=== Math Operators
{es-sql} supports the following math operators:
* Add (`+`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/arithmetic.sql-spec[plus]
--------------------------------------------------
* Subtract (infix `-`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/arithmetic.sql-spec[minus]
--------------------------------------------------
* Negate (unary `-`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/arithmetic.sql-spec[unaryMinus]
--------------------------------------------------
* Multiply (`*`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/arithmetic.sql-spec[multiply]
--------------------------------------------------
* Divide (`/`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/arithmetic.sql-spec[divide]
--------------------------------------------------
* https://en.wikipedia.org/wiki/Modulo_operation[Modulo] or Reminder(`%`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/arithmetic.sql-spec[mod]
--------------------------------------------------
=== Math Functions
All math and trigonometric functions require their input (where applicable)
to be numeric.
==== Generic
* `ABS`
https://en.wikipedia.org/wiki/Absolute_value[Absolute value], returns \[same type as input]
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/math.sql-spec[abs]
--------------------------------------------------
* `CBRT`
https://en.wikipedia.org/wiki/Cube_root[Cube root], returns `double`
// TODO make the example in the tests presentable
* `CEIL`
https://en.wikipedia.org/wiki/Floor_and_ceiling_functions[Ceiling], returns `double`
* `CEILING`
Same as `CEIL`
// TODO make the example in the tests presentable
* `E`
https://en.wikipedia.org/wiki/E_%28mathematical_constant%29[Euler's number], returns `2.7182818284590452354`
* https://en.wikipedia.org/wiki/Rounding#Round_half_up[Round] (`ROUND`)
// TODO make the example in the tests presentable
NOTE: This rounds "half up" meaning that `ROUND(-1.5)` results in `-1`.
* https://en.wikipedia.org/wiki/Floor_and_ceiling_functions[Floor] (`FLOOR`)
// TODO make the example in the tests presentable
* https://en.wikipedia.org/wiki/Natural_logarithm[Natural logarithm] (`LOG`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/math.sql-spec[log]
--------------------------------------------------
* https://en.wikipedia.org/wiki/Logarithm[Logarithm] base 10 (`LOG10`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/math.sql-spec[log10]
--------------------------------------------------
* https://en.wikipedia.org/wiki/Square_root[Square root] (`SQRT`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/math.sql-spec[sqrt]
--------------------------------------------------
* https://en.wikipedia.org/wiki/Exponential_function[e^x^] (`EXP`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/math.sql-spec[exp]
--------------------------------------------------
* https://docs.oracle.com/javase/8/docs/api/java/lang/Math.html#expm1-double-[e^x^ - 1] (`EXPM1`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/math.sql-spec[expm1]
--------------------------------------------------
==== Trigonometric
* Convert from https://en.wikipedia.org/wiki/Radian[radians]
to https://en.wikipedia.org/wiki/Degree_(angle)[degrees] (`DEGREES`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/math.sql-spec[degrees]
--------------------------------------------------
* Convert from https://en.wikipedia.org/wiki/Degree_(angle)[degrees]
to https://en.wikipedia.org/wiki/Radian[radians] (`RADIANS`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/math.sql-spec[degrees]
--------------------------------------------------
* https://en.wikipedia.org/wiki/Trigonometric_functions#sine[Sine] (`SIN`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/math.sql-spec[sin]
--------------------------------------------------
* https://en.wikipedia.org/wiki/Trigonometric_functions#cosine[Cosine] (`COS`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/math.sql-spec[cos]
--------------------------------------------------
* https://en.wikipedia.org/wiki/Trigonometric_functions#tangent[Tangent] (`TAN`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/math.sql-spec[tan]
--------------------------------------------------
* https://en.wikipedia.org/wiki/Inverse_trigonometric_functions[Arc sine] (`ASIN`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/math.sql-spec[asin]
--------------------------------------------------
* https://en.wikipedia.org/wiki/Inverse_trigonometric_functions[Arc cosine] (`ACOS`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/math.sql-spec[acos]
--------------------------------------------------
* https://en.wikipedia.org/wiki/Inverse_trigonometric_functions[Arc tangent] (`ATAN`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/math.sql-spec[atan]
--------------------------------------------------
* https://en.wikipedia.org/wiki/Hyperbolic_function[Hyperbolic sine] (`SINH`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/math.sql-spec[sinh]
--------------------------------------------------
* https://en.wikipedia.org/wiki/Hyperbolic_function[Hyperbolic cosine] (`COSH`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/math.sql-spec[cosh]
--------------------------------------------------
[[sql-functions-datetime]]
=== Date and Time Functions
* Extract the year from a date (`YEAR`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/datetime.csv-spec[year]
--------------------------------------------------
* Extract the month of the year from a date (`MONTH_OF_YEAR` or `MONTH`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/datetime.csv-spec[monthOfYear]
--------------------------------------------------
* Extract the week of the year from a date (`WEEK_OF_YEAR` or `WEEK`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/datetime.csv-spec[weekOfYear]
--------------------------------------------------
* Extract the day of the year from a date (`DAY_OF_YEAR` or `DOY`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/datetime.csv-spec[dayOfYear]
--------------------------------------------------
* Extract the day of the month from a date (`DAY_OF_MONTH`, `DOM`, or `DAY`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/datetime.csv-spec[dayOfMonth]
--------------------------------------------------
* Extract the day of the week from a date (`DAY_OF_WEEK` or `DOW`).
Monday is `1`, Tuesday is `2`, etc.
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/datetime.csv-spec[dayOfWeek]
--------------------------------------------------
* Extract the hour of the day from a date (`HOUR_OF_DAY` or `HOUR`).
Monday is `1`, Tuesday is `2`, etc.
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/datetime.csv-spec[hourOfDay]
--------------------------------------------------
* Extract the minute of the day from a date (`MINUTE_OF_DAY`).
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/datetime.csv-spec[minuteOfDay]
--------------------------------------------------
* Extract the minute of the hour from a date (`MINUTE_OF_HOUR`, `MINUTE`).
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/datetime.csv-spec[minuteOfHour]
--------------------------------------------------
* Extract the second of the minute from a date (`SECOND_OF_MINUTE`, `SECOND`).
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/datetime.csv-spec[secondOfMinute]
--------------------------------------------------
* Extract
As an alternative, one can support `EXTRACT` to extract fields from datetimes.
You can run any <<sql-functions-datetime,datetime function>>
with `EXTRACT(<datetime_function> FROM <expression>)`. So
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/datetime.csv-spec[extractDayOfYear]
--------------------------------------------------
is the equivalent to
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/datetime.csv-spec[dayOfYear]
--------------------------------------------------
[[sql-functions-aggregate]]
=== Aggregate Functions
==== Basic
* https://en.wikipedia.org/wiki/Arithmetic_mean[Average] (`AVG`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/agg.sql-spec[avg]
--------------------------------------------------
* Count the number of matching fields (`COUNT`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/agg.sql-spec[countStar]
--------------------------------------------------
* Count the number of distinct values in matching documents (`COUNT(DISTINCT`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/agg.sql-spec[countDistinct]
--------------------------------------------------
* Find the maximum value in matching documents (`MAX`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/agg.sql-spec[max]
--------------------------------------------------
* Find the minimum value in matching documents (`MIN`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/agg.sql-spec[min]
--------------------------------------------------
* https://en.wikipedia.org/wiki/Kahan_summation_algorithm[Sum]
all values of matching documents (`SUM`).
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/agg.csv-spec[sum]
--------------------------------------------------
{es-sql} provides a comprehensive set of built-in operators and functions:
* <<sql-operators, Operators>>
* <<sql-functions-aggs, Aggregate>>
* <<sql-functions-datetime, Date-Time>>
* <<sql-functions-search, Full-Text Search>>
* <<sql-functions-math, Mathematical>>
* <<sql-functions-string, String>>
* <<sql-functions-type-conversion,Type Conversion>>
include::operators.asciidoc[]
include::aggs.asciidoc[]
include::date-time.asciidoc[]
include::search.asciidoc[]
include::math.asciidoc[]
include::string.asciidoc[]
include::type-conversion.asciidoc[]

View File

@ -0,0 +1,159 @@
[role="xpack"]
[testenv="basic"]
[[sql-functions-math]]
=== Math Functions
All math and trigonometric functions require their input (where applicable)
to be numeric.
==== Generic
* `ABS`
https://en.wikipedia.org/wiki/Absolute_value[Absolute value], returns \[same type as input]
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/math.sql-spec[abs]
--------------------------------------------------
* `CBRT`
https://en.wikipedia.org/wiki/Cube_root[Cube root], returns `double`
// TODO make the example in the tests presentable
* `CEIL`
https://en.wikipedia.org/wiki/Floor_and_ceiling_functions[Ceiling], returns `double`
* `CEILING`
Same as `CEIL`
// TODO make the example in the tests presentable
* `E`
https://en.wikipedia.org/wiki/E_%28mathematical_constant%29[Euler's number], returns `2.7182818284590452354`
* https://en.wikipedia.org/wiki/Rounding#Round_half_up[Round] (`ROUND`)
// TODO make the example in the tests presentable
NOTE: This rounds "half up" meaning that `ROUND(-1.5)` results in `-1`.
* https://en.wikipedia.org/wiki/Floor_and_ceiling_functions[Floor] (`FLOOR`)
// TODO make the example in the tests presentable
* https://en.wikipedia.org/wiki/Natural_logarithm[Natural logarithm] (`LOG`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/math.sql-spec[log]
--------------------------------------------------
* https://en.wikipedia.org/wiki/Logarithm[Logarithm] base 10 (`LOG10`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/math.sql-spec[log10]
--------------------------------------------------
* https://en.wikipedia.org/wiki/Square_root[Square root] (`SQRT`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/math.sql-spec[sqrt]
--------------------------------------------------
* https://en.wikipedia.org/wiki/Exponential_function[e^x^] (`EXP`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/math.sql-spec[exp]
--------------------------------------------------
* https://docs.oracle.com/javase/8/docs/api/java/lang/Math.html#expm1-double-[e^x^ - 1] (`EXPM1`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/math.sql-spec[expm1]
--------------------------------------------------
==== Trigonometric
* Convert from https://en.wikipedia.org/wiki/Radian[radians]
to https://en.wikipedia.org/wiki/Degree_(angle)[degrees] (`DEGREES`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/math.sql-spec[degrees]
--------------------------------------------------
* Convert from https://en.wikipedia.org/wiki/Degree_(angle)[degrees]
to https://en.wikipedia.org/wiki/Radian[radians] (`RADIANS`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/math.sql-spec[degrees]
--------------------------------------------------
* https://en.wikipedia.org/wiki/Trigonometric_functions#sine[Sine] (`SIN`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/math.sql-spec[sin]
--------------------------------------------------
* https://en.wikipedia.org/wiki/Trigonometric_functions#cosine[Cosine] (`COS`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/math.sql-spec[cos]
--------------------------------------------------
* https://en.wikipedia.org/wiki/Trigonometric_functions#tangent[Tangent] (`TAN`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/math.sql-spec[tan]
--------------------------------------------------
* https://en.wikipedia.org/wiki/Inverse_trigonometric_functions[Arc sine] (`ASIN`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/math.sql-spec[asin]
--------------------------------------------------
* https://en.wikipedia.org/wiki/Inverse_trigonometric_functions[Arc cosine] (`ACOS`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/math.sql-spec[acos]
--------------------------------------------------
* https://en.wikipedia.org/wiki/Inverse_trigonometric_functions[Arc tangent] (`ATAN`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/math.sql-spec[atan]
--------------------------------------------------
* https://en.wikipedia.org/wiki/Hyperbolic_function[Hyperbolic sine] (`SINH`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/math.sql-spec[sinh]
--------------------------------------------------
* https://en.wikipedia.org/wiki/Hyperbolic_function[Hyperbolic cosine] (`COSH`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/math.sql-spec[cosh]
--------------------------------------------------

View File

@ -0,0 +1,115 @@
[role="xpack"]
[testenv="basic"]
[[sql-operators]]
=== Comparison Operators
Boolean operator for comparing one or two expressions.
* Equality (`=`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/filter.sql-spec[whereFieldEquality]
--------------------------------------------------
* Inequality (`<>` or `!=` or `<=>`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/filter.sql-spec[whereFieldNonEquality]
--------------------------------------------------
* Comparison (`<`, `<=`, `>`, `>=`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/filter.sql-spec[whereFieldLessThan]
--------------------------------------------------
* `BETWEEN`
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/filter.sql-spec[whereBetween]
--------------------------------------------------
* `IS NULL`/`IS NOT NULL`
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/filter.sql-spec[whereIsNotNullAndIsNull]
--------------------------------------------------
[[sql-operators-logical]]
=== Logical Operators
Boolean operator for evaluating one or two expressions.
* `AND`
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/filter.sql-spec[whereFieldAndComparison]
--------------------------------------------------
* `OR`
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/filter.sql-spec[whereFieldOrComparison]
--------------------------------------------------
* `NOT`
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/filter.sql-spec[whereFieldEqualityNot]
--------------------------------------------------
[[sql-operators-math]]
=== Math Operators
Perform mathematical operations affecting one or two values.
The result is a value of numeric type.
* Add (`+`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/arithmetic.sql-spec[plus]
--------------------------------------------------
* Subtract (infix `-`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/arithmetic.sql-spec[minus]
--------------------------------------------------
* Negate (unary `-`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/arithmetic.sql-spec[unaryMinus]
--------------------------------------------------
* Multiply (`*`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/arithmetic.sql-spec[multiply]
--------------------------------------------------
* Divide (`/`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/arithmetic.sql-spec[divide]
--------------------------------------------------
* https://en.wikipedia.org/wiki/Modulo_operation[Modulo] or Reminder(`%`)
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/arithmetic.sql-spec[mod]
--------------------------------------------------

View File

@ -0,0 +1,35 @@
[role="xpack"]
[testenv="basic"]
[[sql-functions-search]]
=== Full-Text Search Functions
Search functions should be used when performing full-text search, namely
when the `MATCH` or `QUERY` predicates are being used.
Outside a, so-called, search context, these functions will return default values
such as `0` or `NULL`.
[[sql-functions-search-score]]
==== `SCORE`
*Input*: None, *Output*: `double`
Returns the {defguide}/relevance-intro.html[relevance] of a given input to the executed query.
The higher score, the more relevant the data.
NOTE: When doing multiple text queries in the `WHERE` clause then, their scores will be
combined using the same rules as {es}'s
<<query-dsl-bool-query,bool query>>.
Typically `SCORE` is used for ordering the results of a query based on their relevance:
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[orderByScore]
----
However, it is perfectly fine to return the score without sorting by it:
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[scoreWithMatch]
----

View File

@ -0,0 +1,240 @@
[role="xpack"]
[testenv="basic"]
[[sql-functions-string]]
=== String Functions
Functions for performing string manipulation.
[[sql-functions-string-ascii]]
==== `ASCII`
*Input*: `string`, *Output*: `integer`
Returns the ASCII code value of the leftmost character of string_exp as an integer.
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[stringAscii]
----
[[sql-functions-string-bit-length]]
==== `BIT_LENGTH`
*Input*: `string`, *Output*: `integer`
Returns the length in bits of the input.
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[stringBitLength]
----
[[sql-functions-string-char]]
==== `CHAR`
*Input*: `numeric`, *Output*: `string`
Returns the character that has the ASCII code value specified by the numeric input. The value should be between 0 and 255; otherwise, the return value is data sourcedependent.
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[stringChar]
----
[[sql-functions-string-char-length]]
==== `CHAR_LENGTH`
*Input*: `string`, *Output*: `integer`
Returns the length in characters of the input, if the string expression is of a character data type; otherwise, returns the length in bytes of the string expression (the smallest integer not less than the number of bits divided by 8).
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[stringCharLength]
----
[[sql-functions-string-concat]]
==== `CONCAT`
*Input*: `string1`, `string2`, *Output*: `string`
turns a character string that is the result of concatenating string1 to string2. If one of the string is `NULL`,
the other string will be returned.
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[stringConcat]
----
[[sql-functions-string-insert]]
==== `INSERT`
*Input*: `string1`, `start`, `length`, `string2`, *Output*: `string`
Returns a string where length characters have been deleted from string1, beginning at start, and where string2 has been inserted into string1, beginning at start.
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[stringInsert]
----
[[sql-functions-string-lcase]]
==== `LCASE`
*Input*: `string`, *Output*: `string`
Returns a string equal to that in string, with all uppercase characters converted to lowercase.
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[stringLCase]
----
[[sql-functions-string-left]]
==== `LEFT`
*Input*: `string`, *Output*: `string`
Returns the leftmost count characters of string.
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[stringLeft]
----
[[sql-functions-string-length]]
==== `LENGTH`
*Input*: `string`, *Output*: `integer`
Returns the number of characters in string, excluding trailing blanks.
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[stringLength]
----
[[sql-functions-string-locate]]
==== `LOCATE`
*Input*: `string1`, `string2`[, `start`]`, *Output*: `integer`
Returns the starting position of the first occurrence of string1 within string2. The search for the first occurrence of string1 begins with the first character position in string2 unless the optional argument, start, is specified. If start is specified, the search begins with the character position indicated by the value of start. The first character position in string2 is indicated by the value 1. If string1 is not found within string2, the value 0 is returned.
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[stringLocateWoStart]
----
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[stringLocateWithStart]
----
[[sql-functions-string-ltrim]]
==== `LTRIM`
*Input*: `string`, *Output*: `string`
Returns the characters of string_exp, with leading blanks removed.
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[stringLTrim]
----
[[sql-functions-string-position]]
==== `POSITION`
*Input*: `string1`, `string2`, *Output*: `integer`
Returns the position of the string1 in string2. The result is an exact numeric.
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[stringPosition]
----
[[sql-functions-string-repeat]]
==== `REPEAT`
*Input*: `string`, `count`, *Output*: `string`
Returns a character string composed of string1 repeated count times.
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[stringRepeat]
----
[[sql-functions-string-replace]]
==== `REPLACE`
*Input*: `string1`, `string2`, `string3`, *Output*: `string`
Search string1 for occurrences of string2, and replace with string3.
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[stringReplace]
----
[[sql-functions-string-right]]
==== `RIGHT`
*Input*: `string`, `count`, *Output*: `string`
Returns the rightmost count characters of string.
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[stringRight]
----
[[sql-functions-string-rtrim]]
==== `RTRIM`
*Input*: `string`, *Output*: `string`
Returns the characters of string with trailing blanks removed.
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[stringRTrim]
----
[[sql-functions-string-space]]
==== `SPACE`
*Input*: `integer`, *Output*: `string`
Returns a character string consisting of count spaces.
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[stringSpace]
----
[[sql-functions-string-substring]]
==== `SUBSTRING`
*Input*: `string`, `start`, `length`, *Output*: `integer`
Returns a character string that is derived from the string, beginning at the character position specified by `start` for `length` characters.
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[stringSubString]
----
[[sql-functions-string-ucase]]
==== `UCASE`
*Input*: `string`, *Output*: `string`
Returns a string equal to that of the input, with all lowercase characters converted to uppercase.
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[stringUCase]
----

View File

@ -0,0 +1,39 @@
[role="xpack"]
[testenv="basic"]
[[sql-functions-type-conversion]]
=== Type Conversion Functions
Functions for converting an expression of one data type to another.
[[sql-functions-type-conversion-cast]]
==== `CAST`
.Synopsis
[source, sql]
----
CAST ( expression<1> AS data_type<2> )
----
<1> Expression to cast
<2> Target data type to cast to
.Description
Casts the result of the given expression to the target type.
If the cast is not possible (for example because of target type is too narrow or because
the value itself cannot be converted), the query fails.
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[conversionStringToIntCast]
----
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[conversionIntToStringCast]
----
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[conversionStringToDateCast]
----

View File

@ -17,7 +17,7 @@ Most of {es} <<mapping-types, data types>> are available in {es-sql}, as indicat
| <<number, `byte`>> | `tinyint` | 3
| <<number, `short`>> | `smallint` | 5
| <<number, `integer`>> | `integer` | 10
| <<number, `long`>> | `long` | 19
| <<number, `long`>> | `bigint` | 19
| <<number, `double`>> | `double` | 15
| <<number, `float`>> | `real` | 7
| <<number, `half_float`>> | `float` | 16

View File

@ -19,6 +19,7 @@
package org.elasticsearch.search.aggregations.matrix.stats;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.ScoreMode;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.ObjectArray;
@ -61,8 +62,8 @@ final class MatrixStatsAggregator extends MetricsAggregator {
}
@Override
public boolean needsScores() {
return (valuesSources == null) ? false : valuesSources.needsScores();
public ScoreMode scoreMode() {
return (valuesSources != null && valuesSources.needsScores()) ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES;
}
@Override

View File

@ -19,6 +19,7 @@
package org.elasticsearch.analysis.common;
import org.apache.lucene.analysis.en.EnglishAnalyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
@ -35,7 +36,7 @@ public class ChineseAnalyzerProvider extends AbstractIndexAnalyzerProvider<Stand
ChineseAnalyzerProvider(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
// old index: best effort
analyzer = new StandardAnalyzer();
analyzer = new StandardAnalyzer(EnglishAnalyzer.ENGLISH_STOP_WORDS_SET);
analyzer.setVersion(version);
}

View File

@ -44,7 +44,6 @@ import org.apache.lucene.analysis.core.DecimalDigitFilter;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.analysis.core.LetterTokenizer;
import org.apache.lucene.analysis.core.LowerCaseTokenizer;
import org.apache.lucene.analysis.core.StopAnalyzer;
import org.apache.lucene.analysis.core.UpperCaseFilter;
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
import org.apache.lucene.analysis.cz.CzechAnalyzer;
@ -167,8 +166,7 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin, Scri
}
@Override
@SuppressWarnings("rawtypes") // TODO ScriptPlugin needs to change this to pass precommit?
public List<ScriptContext> getContexts() {
public List<ScriptContext<?>> getContexts() {
return Collections.singletonList(AnalysisPredicateScript.CONTEXT);
}
@ -326,7 +324,7 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin, Scri
() -> new PatternAnalyzer(Regex.compile("\\W+" /*PatternAnalyzer.NON_WORD_PATTERN*/, null), true,
CharArraySet.EMPTY_SET)));
analyzers.add(new PreBuiltAnalyzerProviderFactory("snowball", CachingStrategy.LUCENE,
() -> new SnowballAnalyzer("English", StopAnalyzer.ENGLISH_STOP_WORDS_SET)));
() -> new SnowballAnalyzer("English", EnglishAnalyzer.ENGLISH_STOP_WORDS_SET)));
// Language analyzers:
analyzers.add(new PreBuiltAnalyzerProviderFactory("arabic", CachingStrategy.LUCENE, ArabicAnalyzer::new));
@ -337,7 +335,8 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin, Scri
analyzers.add(new PreBuiltAnalyzerProviderFactory("bulgarian", CachingStrategy.LUCENE, BulgarianAnalyzer::new));
analyzers.add(new PreBuiltAnalyzerProviderFactory("catalan", CachingStrategy.LUCENE, CatalanAnalyzer::new));
// chinese analyzer: only for old indices, best effort
analyzers.add(new PreBuiltAnalyzerProviderFactory("chinese", CachingStrategy.ONE, StandardAnalyzer::new));
analyzers.add(new PreBuiltAnalyzerProviderFactory("chinese", CachingStrategy.ONE,
() -> new StandardAnalyzer(EnglishAnalyzer.ENGLISH_STOP_WORDS_SET)));
analyzers.add(new PreBuiltAnalyzerProviderFactory("cjk", CachingStrategy.LUCENE, CJKAnalyzer::new));
analyzers.add(new PreBuiltAnalyzerProviderFactory("czech", CachingStrategy.LUCENE, CzechAnalyzer::new));
analyzers.add(new PreBuiltAnalyzerProviderFactory("danish", CachingStrategy.LUCENE, DanishAnalyzer::new));
@ -409,14 +408,14 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin, Scri
DelimitedPayloadTokenFilterFactory.DEFAULT_ENCODER)));
filters.add(PreConfiguredTokenFilter.singleton("dutch_stem", false, input -> new SnowballFilter(input, new DutchStemmer())));
filters.add(PreConfiguredTokenFilter.singleton("edge_ngram", false, input ->
new EdgeNGramTokenFilter(input, EdgeNGramTokenFilter.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenFilter.DEFAULT_MAX_GRAM_SIZE)));
new EdgeNGramTokenFilter(input, 1)));
filters.add(PreConfiguredTokenFilter.singletonWithVersion("edgeNGram", false, (reader, version) -> {
if (version.onOrAfter(org.elasticsearch.Version.V_6_4_0)) {
DEPRECATION_LOGGER.deprecatedAndMaybeLog("edgeNGram_deprecation",
"The [edgeNGram] token filter name is deprecated and will be removed in a future version. "
+ "Please change the filter name to [edge_ngram] instead.");
}
return new EdgeNGramTokenFilter(reader, EdgeNGramTokenFilter.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenFilter.DEFAULT_MAX_GRAM_SIZE);
return new EdgeNGramTokenFilter(reader, 1);
}));
filters.add(PreConfiguredTokenFilter.singleton("elision", true,
input -> new ElisionFilter(input, FrenchAnalyzer.DEFAULT_ARTICLES)));
@ -433,14 +432,14 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin, Scri
new LimitTokenCountFilter(input,
LimitTokenCountFilterFactory.DEFAULT_MAX_TOKEN_COUNT,
LimitTokenCountFilterFactory.DEFAULT_CONSUME_ALL_TOKENS)));
filters.add(PreConfiguredTokenFilter.singleton("ngram", false, NGramTokenFilter::new));
filters.add(PreConfiguredTokenFilter.singleton("ngram", false, reader -> new NGramTokenFilter(reader, 1, 2, false)));
filters.add(PreConfiguredTokenFilter.singletonWithVersion("nGram", false, (reader, version) -> {
if (version.onOrAfter(org.elasticsearch.Version.V_6_4_0)) {
DEPRECATION_LOGGER.deprecatedAndMaybeLog("nGram_deprecation",
"The [nGram] token filter name is deprecated and will be removed in a future version. "
+ "Please change the filter name to [ngram] instead.");
}
return new NGramTokenFilter(reader);
return new NGramTokenFilter(reader, 1, 2, false);
}));
filters.add(PreConfiguredTokenFilter.singleton("persian_normalization", true, PersianNormalizationFilter::new));
filters.add(PreConfiguredTokenFilter.singleton("porter_stem", false, PorterStemFilter::new));
@ -463,7 +462,8 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin, Scri
filters.add(PreConfiguredTokenFilter.singleton("sorani_normalization", true, SoraniNormalizationFilter::new));
filters.add(PreConfiguredTokenFilter.singleton("stemmer", false, PorterStemFilter::new));
// The stop filter is in lucene-core but the English stop words set is in lucene-analyzers-common
filters.add(PreConfiguredTokenFilter.singleton("stop", false, input -> new StopFilter(input, StopAnalyzer.ENGLISH_STOP_WORDS_SET)));
filters.add(PreConfiguredTokenFilter.singleton("stop", false,
input -> new StopFilter(input, EnglishAnalyzer.ENGLISH_STOP_WORDS_SET)));
filters.add(PreConfiguredTokenFilter.singleton("trim", true, TrimFilter::new));
filters.add(PreConfiguredTokenFilter.singleton("truncate", false, input -> new TruncateTokenFilter(input, 10)));
filters.add(PreConfiguredTokenFilter.singleton("type_as_payload", false, TypeAsPayloadTokenFilter::new));

View File

@ -21,7 +21,6 @@ package org.elasticsearch.analysis.common;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter;
import org.apache.lucene.analysis.ngram.NGramTokenFilter;
import org.apache.lucene.analysis.reverse.ReverseStringFilter;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
@ -41,8 +40,8 @@ public class EdgeNGramTokenFilterFactory extends AbstractTokenFilterFactory {
EdgeNGramTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
this.minGram = settings.getAsInt("min_gram", NGramTokenFilter.DEFAULT_MIN_NGRAM_SIZE);
this.maxGram = settings.getAsInt("max_gram", NGramTokenFilter.DEFAULT_MAX_NGRAM_SIZE);
this.minGram = settings.getAsInt("min_gram", 1);
this.maxGram = settings.getAsInt("max_gram", 2);
this.side = parseSide(settings.get("side", "front"));
}
@ -63,7 +62,8 @@ public class EdgeNGramTokenFilterFactory extends AbstractTokenFilterFactory {
result = new ReverseStringFilter(result);
}
result = new EdgeNGramTokenFilter(result, minGram, maxGram);
// TODO: Expose preserveOriginal
result = new EdgeNGramTokenFilter(result, minGram, maxGram, false);
// side=BACK is not supported anymore but applying ReverseStringFilter up-front and after the token filter has the same effect
if (side == SIDE_BACK) {

View File

@ -39,8 +39,8 @@ public class NGramTokenFilterFactory extends AbstractTokenFilterFactory {
NGramTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
int maxAllowedNgramDiff = indexSettings.getMaxNgramDiff();
this.minGram = settings.getAsInt("min_gram", NGramTokenFilter.DEFAULT_MIN_NGRAM_SIZE);
this.maxGram = settings.getAsInt("max_gram", NGramTokenFilter.DEFAULT_MAX_NGRAM_SIZE);
this.minGram = settings.getAsInt("min_gram", 1);
this.maxGram = settings.getAsInt("max_gram", 2);
int ngramDiff = maxGram - minGram;
if (ngramDiff > maxAllowedNgramDiff) {
if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_7_0_0_alpha1)) {
@ -57,6 +57,7 @@ public class NGramTokenFilterFactory extends AbstractTokenFilterFactory {
@Override
public TokenStream create(TokenStream tokenStream) {
return new NGramTokenFilter(tokenStream, minGram, maxGram);
// TODO: Expose preserveOriginal
return new NGramTokenFilter(tokenStream, minGram, maxGram, false);
}
}

View File

@ -27,11 +27,10 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.en.EnglishPossessiveFilter;
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.apache.lucene.analysis.tr.TurkishLowerCaseFilter;
/** Filters {@link StandardTokenizer} with {@link StandardFilter}, {@link
/** Filters {@link StandardTokenizer} with {@link
* LowerCaseFilter}, {@link StopFilter} and {@link SnowballFilter}.
*
* Available stemmers are listed in org.tartarus.snowball.ext. The name of a
@ -57,8 +56,7 @@ public final class SnowballAnalyzer extends Analyzer {
stopSet = CharArraySet.unmodifiableSet(CharArraySet.copy(stopWords));
}
/** Constructs a {@link StandardTokenizer} filtered by a {@link
StandardFilter}, a {@link LowerCaseFilter}, a {@link StopFilter},
/** Constructs a {@link StandardTokenizer} filtered by a {@link LowerCaseFilter}, a {@link StopFilter},
and a {@link SnowballFilter} */
@Override
public TokenStreamComponents createComponents(String fieldName) {

View File

@ -19,8 +19,8 @@
package org.elasticsearch.analysis.common;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.core.StopAnalyzer;
import org.apache.lucene.analysis.de.GermanAnalyzer;
import org.apache.lucene.analysis.en.EnglishAnalyzer;
import org.apache.lucene.analysis.fr.FrenchAnalyzer;
import org.apache.lucene.analysis.nl.DutchAnalyzer;
import org.elasticsearch.common.settings.Settings;
@ -42,7 +42,7 @@ import static java.util.Collections.unmodifiableMap;
* Configuration of language is done with the "language" attribute or the analyzer.
* Also supports additional stopwords via "stopwords" attribute
* <p>
* The SnowballAnalyzer comes with a StandardFilter, LowerCaseFilter, StopFilter
* The SnowballAnalyzer comes with a LowerCaseFilter, StopFilter
* and the SnowballFilter.
*
*
@ -52,7 +52,7 @@ public class SnowballAnalyzerProvider extends AbstractIndexAnalyzerProvider<Snow
static {
Map<String, CharArraySet> defaultLanguageStopwords = new HashMap<>();
defaultLanguageStopwords.put("English", StopAnalyzer.ENGLISH_STOP_WORDS_SET);
defaultLanguageStopwords.put("English", EnglishAnalyzer.ENGLISH_STOP_WORDS_SET);
defaultLanguageStopwords.put("Dutch", DutchAnalyzer.getDefaultStopSet());
defaultLanguageStopwords.put("German", GermanAnalyzer.getDefaultStopSet());
defaultLanguageStopwords.put("German2", GermanAnalyzer.getDefaultStopSet());

View File

@ -25,8 +25,7 @@ import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.StopwordAnalyzerBase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.StopAnalyzer;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.en.EnglishAnalyzer;
import org.apache.lucene.analysis.standard.StandardTokenizer;
public class StandardHtmlStripAnalyzer extends StopwordAnalyzerBase {
@ -36,7 +35,7 @@ public class StandardHtmlStripAnalyzer extends StopwordAnalyzerBase {
*/
@Deprecated
public StandardHtmlStripAnalyzer() {
super(StopAnalyzer.ENGLISH_STOP_WORDS_SET);
super(EnglishAnalyzer.ENGLISH_STOP_WORDS_SET);
}
StandardHtmlStripAnalyzer(CharArraySet stopwords) {
@ -46,8 +45,7 @@ public class StandardHtmlStripAnalyzer extends StopwordAnalyzerBase {
@Override
protected TokenStreamComponents createComponents(final String fieldName) {
final Tokenizer src = new StandardTokenizer();
TokenStream tok = new StandardFilter(src);
tok = new LowerCaseFilter(tok);
TokenStream tok = new LowerCaseFilter(src);
if (!stopwords.isEmpty()) {
tok = new StopFilter(tok, stopwords);
}

View File

@ -20,7 +20,7 @@ package org.elasticsearch.analysis.common;
*/
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.core.StopAnalyzer;
import org.apache.lucene.analysis.en.EnglishAnalyzer;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.test.ESTokenStreamTestCase;
@ -44,7 +44,7 @@ public class PatternAnalyzerTests extends ESTokenStreamTestCase {
// split on non-letter pattern, lowercase, english stopwords
PatternAnalyzer b = new PatternAnalyzer(Pattern.compile("\\W+"), true,
StopAnalyzer.ENGLISH_STOP_WORDS_SET);
EnglishAnalyzer.ENGLISH_STOP_WORDS_SET);
assertAnalyzesTo(b, "The quick brown Fox,the abcd1234 (56.78) dc.",
new String[] { "quick", "brown", "fox", "abcd1234", "56", "78", "dc" });
}
@ -61,7 +61,7 @@ public class PatternAnalyzerTests extends ESTokenStreamTestCase {
// Split on whitespace patterns, lowercase, english stopwords
PatternAnalyzer b = new PatternAnalyzer(Pattern.compile("\\s+"), true,
StopAnalyzer.ENGLISH_STOP_WORDS_SET);
EnglishAnalyzer.ENGLISH_STOP_WORDS_SET);
assertAnalyzesTo(b, "The quick brown Fox,the abcd1234 (56.78) dc.",
new String[] { "quick", "brown", "fox,the", "abcd1234", "(56.78)", "dc." });
}
@ -78,7 +78,7 @@ public class PatternAnalyzerTests extends ESTokenStreamTestCase {
// split on comma, lowercase, english stopwords
PatternAnalyzer b = new PatternAnalyzer(Pattern.compile(","), true,
StopAnalyzer.ENGLISH_STOP_WORDS_SET);
EnglishAnalyzer.ENGLISH_STOP_WORDS_SET);
assertAnalyzesTo(b, "Here,Are,some,Comma,separated,words,",
new String[] { "here", "some", "comma", "separated", "words" });
}
@ -109,7 +109,7 @@ public class PatternAnalyzerTests extends ESTokenStreamTestCase {
/** blast some random strings through the analyzer */
public void testRandomStrings() throws Exception {
Analyzer a = new PatternAnalyzer(Pattern.compile(","), true, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
Analyzer a = new PatternAnalyzer(Pattern.compile(","), true, EnglishAnalyzer.ENGLISH_STOP_WORDS_SET);
checkRandomData(random(), a, 10000*RANDOM_MULTIPLIER);
}

View File

@ -20,7 +20,7 @@ package org.elasticsearch.analysis.common;
*/
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.en.EnglishAnalyzer;
import org.elasticsearch.test.ESTokenStreamTestCase;
public class SnowballAnalyzerTests extends ESTokenStreamTestCase {
@ -33,7 +33,7 @@ public class SnowballAnalyzerTests extends ESTokenStreamTestCase {
public void testStopwords() throws Exception {
Analyzer a = new SnowballAnalyzer("English",
StandardAnalyzer.STOP_WORDS_SET);
EnglishAnalyzer.ENGLISH_STOP_WORDS_SET);
assertAnalyzesTo(a, "the quick brown fox jumped",
new String[]{"quick", "brown", "fox", "jump"});
}

View File

@ -1 +0,0 @@
fded6bb485b8b01bb2a9280162fd14d4d3ce4510

View File

@ -0,0 +1 @@
5f469e925dde5dff81b9d56f465a8babb56cd26b

View File

@ -39,7 +39,7 @@ import org.elasticsearch.script.ScriptType;
import org.elasticsearch.search.SearchHits;
import org.elasticsearch.search.aggregations.AggregationBuilders;
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
import org.elasticsearch.search.aggregations.metrics.stats.Stats;
import org.elasticsearch.search.aggregations.metrics.Stats;
import org.elasticsearch.search.aggregations.pipeline.SimpleValue;
import org.elasticsearch.search.sort.SortBuilders;
import org.elasticsearch.search.sort.SortOrder;

View File

@ -26,6 +26,7 @@ import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.store.RAMDirectory;
@ -550,7 +551,7 @@ public class PainlessExecuteAction extends Action<PainlessExecuteAction.Response
Query luceneQuery = request.contextSetup.query.rewrite(context).toQuery(context);
IndexSearcher indexSearcher = new IndexSearcher(leafReaderContext.reader());
luceneQuery = indexSearcher.rewrite(luceneQuery);
Weight weight = indexSearcher.createWeight(luceneQuery, true, 1f);
Weight weight = indexSearcher.createWeight(luceneQuery, ScoreMode.COMPLETE, 1f);
Scorer scorer = weight.scorer(indexSearcher.getIndexReader().leaves().get(0));
// Consume the first (and only) match.
int docID = scorer.iterator().nextDoc();

View File

@ -105,8 +105,8 @@ public final class PainlessPlugin extends Plugin implements ScriptPlugin, Extens
}
}
@SuppressWarnings("rawtypes")
public List<ScriptContext> getContexts() {
@Override
public List<ScriptContext<?>> getContexts() {
return Collections.singletonList(PainlessExecuteAction.PainlessTestScript.CONTEXT);
}

View File

@ -49,6 +49,11 @@ public class ScoreTests extends ScriptTestCase {
public float score() throws IOException {
return 2.5f;
}
@Override
public float getMaxScore(int upTo) throws IOException {
return 2.5f;
}
},
true));
}
@ -60,6 +65,11 @@ public class ScoreTests extends ScriptTestCase {
public float score() throws IOException {
throw new AssertionError("score() should not be called");
}
@Override
public float getMaxScore(int upTo) throws IOException {
return Float.MAX_VALUE;
}
},
true));
}
@ -75,6 +85,11 @@ public class ScoreTests extends ScriptTestCase {
}
throw new AssertionError("score() should not be called twice");
}
@Override
public float getMaxScore(int upTo) throws IOException {
return 4.5f;
}
},
true));
}

View File

@ -25,6 +25,7 @@ import org.elasticsearch.painless.spi.Whitelist;
import org.elasticsearch.script.ScriptContext;
import org.elasticsearch.script.ScriptedMetricAggContexts;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
@ -74,6 +75,11 @@ public class ScriptedMetricAggContextsTests extends ScriptTestCase {
@Override
public DocIdSetIterator iterator() { return null; }
@Override
public float getMaxScore(int upTo) throws IOException {
return 0.5f;
}
};
ScriptedMetricAggContexts.MapScript.LeafFactory leafFactory = factory.newFactory(params, state, null);

View File

@ -89,7 +89,7 @@ public class SimilarityScriptTests extends ScriptTestCase {
.add(new TermQuery(new Term("match", "yes")), Occur.FILTER)
.build(), 3.2f);
TopDocs topDocs = searcher.search(query, 1);
assertEquals(1, topDocs.totalHits);
assertEquals(1, topDocs.totalHits.value);
assertEquals((float) (3.2 * 2 / 3), topDocs.scoreDocs[0].score, 0);
w.close();
dir.close();
@ -128,7 +128,7 @@ public class SimilarityScriptTests extends ScriptTestCase {
.add(new TermQuery(new Term("match", "yes")), Occur.FILTER)
.build(), 3.2f);
TopDocs topDocs = searcher.search(query, 1);
assertEquals(1, topDocs.totalHits);
assertEquals(1, topDocs.totalHits.value);
assertEquals((float) (3.2 * 2 / 3), topDocs.scoreDocs[0].score, 0);
w.close();
dir.close();

View File

@ -161,7 +161,7 @@
"script_score": {
"script": {
"lang": "painless",
"source": "-doc['num1'].value"
"source": "3 - doc['num1'].value"
}
}
}]

View File

@ -24,6 +24,7 @@ import org.apache.lucene.index.SortedSetDocValues;
import org.apache.lucene.search.ConstantScoreScorer;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.Bits;
@ -78,8 +79,8 @@ public class ParentToChildrenAggregator extends BucketsAggregator implements Sin
throws IOException {
super(name, factories, context, parent, pipelineAggregators, metaData);
// these two filters are cached in the parser
this.childFilter = context.searcher().createNormalizedWeight(childFilter, false);
this.parentFilter = context.searcher().createNormalizedWeight(parentFilter, false);
this.childFilter = context.searcher().createWeight(context.searcher().rewrite(childFilter), ScoreMode.COMPLETE_NO_SCORES, 1f);
this.parentFilter = context.searcher().createWeight(context.searcher().rewrite(parentFilter), ScoreMode.COMPLETE_NO_SCORES, 1f);
this.parentOrdToBuckets = context.bigArrays().newLongArray(maxOrd, false);
this.parentOrdToBuckets.fill(0, maxOrd, -1);
this.parentOrdToOtherBuckets = new LongObjectPagedHashMap<>(context.bigArrays());

View File

@ -23,16 +23,21 @@ import org.apache.lucene.index.ReaderUtil;
import org.apache.lucene.index.SortedDocValues;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.MultiCollector;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.TopDocsCollector;
import org.apache.lucene.search.TopFieldCollector;
import org.apache.lucene.search.TopScoreDocCollector;
import org.apache.lucene.search.TotalHitCountCollector;
import org.apache.lucene.search.TotalHits;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.search.MaxScoreCollector;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore;
import org.elasticsearch.index.mapper.IdFieldMapper;
import org.elasticsearch.index.query.InnerHitBuilder;
import org.elasticsearch.index.query.InnerHitContextBuilder;
@ -92,14 +97,14 @@ class ParentChildInnerHitContextBuilder extends InnerHitContextBuilder {
}
@Override
public TopDocs[] topDocs(SearchHit[] hits) throws IOException {
public TopDocsAndMaxScore[] topDocs(SearchHit[] hits) throws IOException {
Weight innerHitQueryWeight = createInnerHitQueryWeight();
TopDocs[] result = new TopDocs[hits.length];
TopDocsAndMaxScore[] result = new TopDocsAndMaxScore[hits.length];
for (int i = 0; i < hits.length; i++) {
SearchHit hit = hits[i];
String joinName = getSortedDocValue(joinFieldMapper.name(), context, hit.docId());
if (joinName == null) {
result[i] = Lucene.EMPTY_TOP_DOCS;
result[i] = new TopDocsAndMaxScore(Lucene.EMPTY_TOP_DOCS, Float.NaN);
continue;
}
@ -107,7 +112,7 @@ class ParentChildInnerHitContextBuilder extends InnerHitContextBuilder {
ParentIdFieldMapper parentIdFieldMapper =
joinFieldMapper.getParentIdFieldMapper(typeName, fetchChildInnerHits == false);
if (parentIdFieldMapper == null) {
result[i] = Lucene.EMPTY_TOP_DOCS;
result[i] = new TopDocsAndMaxScore(Lucene.EMPTY_TOP_DOCS, Float.NaN);
continue;
}
@ -125,29 +130,41 @@ class ParentChildInnerHitContextBuilder extends InnerHitContextBuilder {
q = context.mapperService().fullName(IdFieldMapper.NAME).termQuery(parentId, qsc);
}
Weight weight = context.searcher().createNormalizedWeight(q, false);
Weight weight = context.searcher().createWeight(context.searcher().rewrite(q), ScoreMode.COMPLETE_NO_SCORES, 1f);
if (size() == 0) {
TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector();
for (LeafReaderContext ctx : context.searcher().getIndexReader().leaves()) {
intersect(weight, innerHitQueryWeight, totalHitCountCollector, ctx);
}
result[i] = new TopDocs(totalHitCountCollector.getTotalHits(), Lucene.EMPTY_SCORE_DOCS, Float.NaN);
result[i] = new TopDocsAndMaxScore(
new TopDocs(new TotalHits(totalHitCountCollector.getTotalHits(), TotalHits.Relation.EQUAL_TO),
Lucene.EMPTY_SCORE_DOCS), Float.NaN);
} else {
int topN = Math.min(from() + size(), context.searcher().getIndexReader().maxDoc());
TopDocsCollector<?> topDocsCollector;
MaxScoreCollector maxScoreCollector = null;
if (sort() != null) {
topDocsCollector = TopFieldCollector.create(sort().sort, topN, true, trackScores(), trackScores(), true);
topDocsCollector = TopFieldCollector.create(sort().sort, topN, Integer.MAX_VALUE);
if (trackScores()) {
maxScoreCollector = new MaxScoreCollector();
}
} else {
topDocsCollector = TopScoreDocCollector.create(topN);
topDocsCollector = TopScoreDocCollector.create(topN, Integer.MAX_VALUE);
maxScoreCollector = new MaxScoreCollector();
}
try {
for (LeafReaderContext ctx : context.searcher().getIndexReader().leaves()) {
intersect(weight, innerHitQueryWeight, topDocsCollector, ctx);
intersect(weight, innerHitQueryWeight, MultiCollector.wrap(topDocsCollector, maxScoreCollector), ctx);
}
} finally {
clearReleasables(Lifetime.COLLECTION);
}
result[i] = topDocsCollector.topDocs(from(), size());
TopDocs topDocs = topDocsCollector.topDocs(from(), size());
float maxScore = Float.NaN;
if (maxScoreCollector != null) {
maxScore = maxScoreCollector.getMaxScore();
}
result[i] = new TopDocsAndMaxScore(topDocs, maxScore);
}
}
return result;

View File

@ -30,8 +30,8 @@ import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.aggregations.AggregationBuilders;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
import org.elasticsearch.search.aggregations.metrics.sum.Sum;
import org.elasticsearch.search.aggregations.metrics.tophits.TopHits;
import org.elasticsearch.search.aggregations.metrics.Sum;
import org.elasticsearch.search.aggregations.metrics.TopHits;
import org.elasticsearch.search.sort.SortOrder;
import org.junit.Before;

View File

@ -49,8 +49,8 @@ import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.join.mapper.MetaJoinFieldMapper;
import org.elasticsearch.join.mapper.ParentJoinFieldMapper;
import org.elasticsearch.search.aggregations.AggregatorTestCase;
import org.elasticsearch.search.aggregations.metrics.min.InternalMin;
import org.elasticsearch.search.aggregations.metrics.min.MinAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.InternalMin;
import org.elasticsearch.search.aggregations.metrics.MinAggregationBuilder;
import java.io.IOException;
import java.util.Arrays;

View File

@ -26,11 +26,14 @@ import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.ScorerSupplier;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.TwoPhaseIterator;
import org.apache.lucene.search.Weight;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.Bits;
import org.elasticsearch.common.CheckedFunction;
@ -53,14 +56,17 @@ final class PercolateQuery extends Query implements Accountable {
private final Query candidateMatchesQuery;
private final Query verifiedMatchesQuery;
private final IndexSearcher percolatorIndexSearcher;
private final Query nonNestedDocsFilter;
PercolateQuery(String name, QueryStore queryStore, List<BytesReference> documents,
Query candidateMatchesQuery, IndexSearcher percolatorIndexSearcher, Query verifiedMatchesQuery) {
Query candidateMatchesQuery, IndexSearcher percolatorIndexSearcher,
Query nonNestedDocsFilter, Query verifiedMatchesQuery) {
this.name = name;
this.documents = Objects.requireNonNull(documents);
this.candidateMatchesQuery = Objects.requireNonNull(candidateMatchesQuery);
this.queryStore = Objects.requireNonNull(queryStore);
this.percolatorIndexSearcher = Objects.requireNonNull(percolatorIndexSearcher);
this.nonNestedDocsFilter = nonNestedDocsFilter;
this.verifiedMatchesQuery = Objects.requireNonNull(verifiedMatchesQuery);
}
@ -68,16 +74,17 @@ final class PercolateQuery extends Query implements Accountable {
public Query rewrite(IndexReader reader) throws IOException {
Query rewritten = candidateMatchesQuery.rewrite(reader);
if (rewritten != candidateMatchesQuery) {
return new PercolateQuery(name, queryStore, documents, rewritten, percolatorIndexSearcher, verifiedMatchesQuery);
return new PercolateQuery(name, queryStore, documents, rewritten, percolatorIndexSearcher,
nonNestedDocsFilter, verifiedMatchesQuery);
} else {
return this;
}
}
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
final Weight verifiedMatchesWeight = verifiedMatchesQuery.createWeight(searcher, false, boost);
final Weight candidateMatchesWeight = candidateMatchesQuery.createWeight(searcher, false, boost);
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
final Weight verifiedMatchesWeight = verifiedMatchesQuery.createWeight(searcher, ScoreMode.COMPLETE_NO_SCORES, boost);
final Weight candidateMatchesWeight = candidateMatchesQuery.createWeight(searcher, ScoreMode.COMPLETE_NO_SCORES, boost);
return new Weight(this) {
@Override
public void extractTerms(Set<Term> set) {
@ -91,7 +98,7 @@ final class PercolateQuery extends Query implements Accountable {
int result = twoPhaseIterator.approximation().advance(docId);
if (result == docId) {
if (twoPhaseIterator.matches()) {
if (needsScores) {
if (scoreMode.needsScores()) {
CheckedFunction<Integer, Query, IOException> percolatorQueries = queryStore.getQueries(leafReaderContext);
Query query = percolatorQueries.apply(docId);
Explanation detail = percolatorIndexSearcher.explain(query, 0);
@ -112,9 +119,9 @@ final class PercolateQuery extends Query implements Accountable {
return null;
}
final CheckedFunction<Integer, Query, IOException> queries = queryStore.getQueries(leafReaderContext);
if (needsScores) {
return new BaseScorer(this, approximation, queries, percolatorIndexSearcher) {
final CheckedFunction<Integer, Query, IOException> percolatorQueries = queryStore.getQueries(leafReaderContext);
if (scoreMode.needsScores()) {
return new BaseScorer(this, approximation) {
float score;
@ -122,8 +129,14 @@ final class PercolateQuery extends Query implements Accountable {
boolean matchDocId(int docId) throws IOException {
Query query = percolatorQueries.apply(docId);
if (query != null) {
if (nonNestedDocsFilter != null) {
query = new BooleanQuery.Builder()
.add(query, Occur.MUST)
.add(nonNestedDocsFilter, Occur.FILTER)
.build();
}
TopDocs topDocs = percolatorIndexSearcher.search(query, 1);
if (topDocs.totalHits > 0) {
if (topDocs.scoreDocs.length > 0) {
score = topDocs.scoreDocs[0].score;
return true;
} else {
@ -142,7 +155,7 @@ final class PercolateQuery extends Query implements Accountable {
} else {
ScorerSupplier verifiedDocsScorer = verifiedMatchesWeight.scorerSupplier(leafReaderContext);
Bits verifiedDocsBits = Lucene.asSequentialAccessBits(leafReaderContext.reader().maxDoc(), verifiedDocsScorer);
return new BaseScorer(this, approximation, queries, percolatorIndexSearcher) {
return new BaseScorer(this, approximation) {
@Override
public float score() throws IOException {
@ -159,7 +172,16 @@ final class PercolateQuery extends Query implements Accountable {
return true;
}
Query query = percolatorQueries.apply(docId);
return query != null && Lucene.exists(percolatorIndexSearcher, query);
if (query == null) {
return false;
}
if (nonNestedDocsFilter != null) {
query = new BooleanQuery.Builder()
.add(query, Occur.MUST)
.add(nonNestedDocsFilter, Occur.FILTER)
.build();
}
return Lucene.exists(percolatorIndexSearcher, query);
}
};
}
@ -182,6 +204,10 @@ final class PercolateQuery extends Query implements Accountable {
return percolatorIndexSearcher;
}
boolean excludesNestedDocs() {
return nonNestedDocsFilter != null;
}
List<BytesReference> getDocuments() {
return documents;
}
@ -241,15 +267,10 @@ final class PercolateQuery extends Query implements Accountable {
abstract static class BaseScorer extends Scorer {
final Scorer approximation;
final CheckedFunction<Integer, Query, IOException> percolatorQueries;
final IndexSearcher percolatorIndexSearcher;
BaseScorer(Weight weight, Scorer approximation, CheckedFunction<Integer, Query, IOException> percolatorQueries,
IndexSearcher percolatorIndexSearcher) {
BaseScorer(Weight weight, Scorer approximation) {
super(weight);
this.approximation = approximation;
this.percolatorQueries = percolatorQueries;
this.percolatorIndexSearcher = percolatorIndexSearcher;
}
@Override
@ -279,6 +300,10 @@ final class PercolateQuery extends Query implements Accountable {
abstract boolean matchDocId(int docId) throws IOException;
@Override
public float getMaxScore(int upTo) throws IOException {
return Float.MAX_VALUE;
}
}
}

View File

@ -29,10 +29,9 @@ import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.ReaderUtil;
import org.apache.lucene.index.memory.MemoryIndex;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.search.join.BitSetProducer;
@ -56,7 +55,6 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.XContent;
@ -605,13 +603,19 @@ public class PercolateQueryBuilder extends AbstractQueryBuilder<PercolateQueryBu
}
};
final IndexSearcher docSearcher;
final boolean excludeNestedDocuments;
if (docs.size() > 1 || docs.get(0).docs().size() > 1) {
assert docs.size() != 1 || docMapper.hasNestedObjects();
docSearcher = createMultiDocumentSearcher(analyzer, docs);
excludeNestedDocuments = docMapper.hasNestedObjects() && docs.stream()
.map(ParsedDocument::docs)
.mapToInt(List::size)
.anyMatch(size -> size > 1);
} else {
MemoryIndex memoryIndex = MemoryIndex.fromDocument(docs.get(0).rootDoc(), analyzer, true, false);
docSearcher = memoryIndex.createSearcher();
docSearcher.setQueryCache(null);
excludeNestedDocuments = false;
}
PercolatorFieldMapper.FieldType pft = (PercolatorFieldMapper.FieldType) fieldType;
@ -621,7 +625,7 @@ public class PercolateQueryBuilder extends AbstractQueryBuilder<PercolateQueryBu
percolateShardContext,
pft.mapUnmappedFieldsAsText);
return pft.percolateQuery(name, queryStore, documents, docSearcher, context.indexVersionCreated());
return pft.percolateQuery(name, queryStore, documents, docSearcher, excludeNestedDocuments, context.indexVersionCreated());
}
public String getField() {
@ -653,17 +657,7 @@ public class PercolateQueryBuilder extends AbstractQueryBuilder<PercolateQueryBu
DirectoryReader directoryReader = DirectoryReader.open(indexWriter);
assert directoryReader.leaves().size() == 1 : "Expected single leaf, but got [" + directoryReader.leaves().size() + "]";
final IndexSearcher slowSearcher = new IndexSearcher(directoryReader) {
@Override
public Weight createNormalizedWeight(Query query, boolean needsScores) throws IOException {
BooleanQuery.Builder bq = new BooleanQuery.Builder();
bq.add(query, BooleanClause.Occur.MUST);
bq.add(Queries.newNestedFilter(), BooleanClause.Occur.MUST_NOT);
return super.createNormalizedWeight(bq.build(), needsScores);
}
};
final IndexSearcher slowSearcher = new IndexSearcher(directoryReader);
slowSearcher.setQueryCache(null);
return slowSearcher;
} catch (IOException e) {
@ -738,7 +732,7 @@ public class PercolateQueryBuilder extends AbstractQueryBuilder<PercolateQueryBu
final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context);
final IndexSearcher searcher = new IndexSearcher(topLevelContext);
searcher.setQueryCache(null);
final Weight weight = searcher.createNormalizedWeight(query, false);
final Weight weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1f);
final Scorer s = weight.scorer(context);
if (s != null) {

View File

@ -50,6 +50,7 @@ import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.hash.MurmurHash3;
import org.elasticsearch.common.io.stream.OutputStreamStreamOutput;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
@ -244,7 +245,7 @@ public class PercolatorFieldMapper extends FieldMapper {
}
Query percolateQuery(String name, PercolateQuery.QueryStore queryStore, List<BytesReference> documents,
IndexSearcher searcher, Version indexVersion) throws IOException {
IndexSearcher searcher, boolean excludeNestedDocuments, Version indexVersion) throws IOException {
IndexReader indexReader = searcher.getIndexReader();
Tuple<BooleanQuery, Boolean> t = createCandidateQuery(indexReader, indexVersion);
Query candidateQuery = t.v1();
@ -261,7 +262,11 @@ public class PercolatorFieldMapper extends FieldMapper {
} else {
verifiedMatchesQuery = new MatchNoDocsQuery("multiple or nested docs or CoveringQuery could not be used");
}
return new PercolateQuery(name, queryStore, documents, candidateQuery, searcher, verifiedMatchesQuery);
Query filter = null;
if (excludeNestedDocuments) {
filter = Queries.newNonNestedFilter(indexVersion);
}
return new PercolateQuery(name, queryStore, documents, candidateQuery, searcher, filter, verifiedMatchesQuery);
}
Tuple<BooleanQuery, Boolean> createCandidateQuery(IndexReader indexReader, Version indexVersion) throws IOException {

View File

@ -22,6 +22,7 @@ import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.ReaderUtil;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
@ -74,7 +75,8 @@ final class PercolatorMatchedSlotSubFetchPhase implements FetchSubPhase {
// See https://issues.apache.org/jira/browse/LUCENE-8055
// for now we just use version 6.0 version to find nested parent
final Version version = Version.V_6_0_0; //context.mapperService().getIndexSettings().getIndexVersionCreated();
Weight weight = percolatorIndexSearcher.createNormalizedWeight(Queries.newNonNestedFilter(version), false);
Weight weight = percolatorIndexSearcher.createWeight(percolatorIndexSearcher.rewrite(Queries.newNonNestedFilter(version)),
ScoreMode.COMPLETE_NO_SCORES, 1f);
Scorer s = weight.scorer(percolatorIndexSearcher.getIndexReader().leaves().get(0));
int memoryIndexMaxDoc = percolatorIndexSearcher.getIndexReader().maxDoc();
BitSet rootDocs = BitSet.of(s.iterator(), memoryIndexMaxDoc);
@ -96,7 +98,7 @@ final class PercolatorMatchedSlotSubFetchPhase implements FetchSubPhase {
}
TopDocs topDocs = percolatorIndexSearcher.search(query, memoryIndexMaxDoc, new Sort(SortField.FIELD_DOC));
if (topDocs.totalHits == 0) {
if (topDocs.totalHits.value == 0) {
// This hit didn't match with a percolate query,
// likely to happen when percolating multiple documents
continue;

View File

@ -61,6 +61,7 @@ import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.PrefixQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
@ -595,51 +596,52 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
Version v = Version.V_6_1_0;
MemoryIndex memoryIndex = MemoryIndex.fromDocument(Collections.singleton(new IntPoint("int_field", 3)), new WhitespaceAnalyzer());
IndexSearcher percolateSearcher = memoryIndex.createSearcher();
Query query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, v);
Query query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")),
percolateSearcher, false, v);
TopDocs topDocs = shardSearcher.search(query, 1);
assertEquals(1L, topDocs.totalHits);
assertEquals(1L, topDocs.totalHits.value);
assertEquals(1, topDocs.scoreDocs.length);
assertEquals(0, topDocs.scoreDocs[0].doc);
memoryIndex = MemoryIndex.fromDocument(Collections.singleton(new LongPoint("long_field", 7L)), new WhitespaceAnalyzer());
percolateSearcher = memoryIndex.createSearcher();
query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, v);
query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, v);
topDocs = shardSearcher.search(query, 1);
assertEquals(1L, topDocs.totalHits);
assertEquals(1L, topDocs.totalHits.value);
assertEquals(1, topDocs.scoreDocs.length);
assertEquals(1, topDocs.scoreDocs[0].doc);
memoryIndex = MemoryIndex.fromDocument(Collections.singleton(new HalfFloatPoint("half_float_field", 12)),
new WhitespaceAnalyzer());
percolateSearcher = memoryIndex.createSearcher();
query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, v);
query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, v);
topDocs = shardSearcher.search(query, 1);
assertEquals(1L, topDocs.totalHits);
assertEquals(1L, topDocs.totalHits.value);
assertEquals(1, topDocs.scoreDocs.length);
assertEquals(2, topDocs.scoreDocs[0].doc);
memoryIndex = MemoryIndex.fromDocument(Collections.singleton(new FloatPoint("float_field", 17)), new WhitespaceAnalyzer());
percolateSearcher = memoryIndex.createSearcher();
query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, v);
query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, v);
topDocs = shardSearcher.search(query, 1);
assertEquals(1, topDocs.totalHits);
assertEquals(1, topDocs.totalHits.value);
assertEquals(1, topDocs.scoreDocs.length);
assertEquals(3, topDocs.scoreDocs[0].doc);
memoryIndex = MemoryIndex.fromDocument(Collections.singleton(new DoublePoint("double_field", 21)), new WhitespaceAnalyzer());
percolateSearcher = memoryIndex.createSearcher();
query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, v);
query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, v);
topDocs = shardSearcher.search(query, 1);
assertEquals(1, topDocs.totalHits);
assertEquals(1, topDocs.totalHits.value);
assertEquals(1, topDocs.scoreDocs.length);
assertEquals(4, topDocs.scoreDocs[0].doc);
memoryIndex = MemoryIndex.fromDocument(Collections.singleton(new InetAddressPoint("ip_field",
forString("192.168.0.4"))), new WhitespaceAnalyzer());
percolateSearcher = memoryIndex.createSearcher();
query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, v);
query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, v);
topDocs = shardSearcher.search(query, 1);
assertEquals(1, topDocs.totalHits);
assertEquals(1, topDocs.totalHits.value);
assertEquals(1, topDocs.scoreDocs.length);
assertEquals(5, topDocs.scoreDocs[0].doc);
}
@ -777,16 +779,16 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
memoryIndex.addField("field", "value1", new WhitespaceAnalyzer());
IndexSearcher percolateSearcher = memoryIndex.createSearcher();
PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore,
Collections.singletonList(new BytesArray("{}")), percolateSearcher, Version.CURRENT);
TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC), true, true);
assertEquals(3L, topDocs.totalHits);
Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, Version.CURRENT);
TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC));
assertEquals(3L, topDocs.totalHits.value);
assertEquals(3, topDocs.scoreDocs.length);
assertEquals(0, topDocs.scoreDocs[0].doc);
assertEquals(1, topDocs.scoreDocs[1].doc);
assertEquals(4, topDocs.scoreDocs[2].doc);
topDocs = shardSearcher.search(new ConstantScoreQuery(query), 10);
assertEquals(3L, topDocs.totalHits);
assertEquals(3L, topDocs.totalHits.value);
assertEquals(3, topDocs.scoreDocs.length);
assertEquals(0, topDocs.scoreDocs[0].doc);
assertEquals(1, topDocs.scoreDocs[1].doc);
@ -810,9 +812,9 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
memoryIndex.addField("field", "value", new WhitespaceAnalyzer());
IndexSearcher percolateSearcher = memoryIndex.createSearcher();
PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore,
Collections.singletonList(new BytesArray("{}")), percolateSearcher, Version.CURRENT);
TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC), true, true);
assertEquals(2L, topDocs.totalHits);
Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, Version.CURRENT);
TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC));
assertEquals(2L, topDocs.totalHits.value);
assertEquals(2, topDocs.scoreDocs.length);
assertEquals(0, topDocs.scoreDocs[0].doc);
assertEquals(2, topDocs.scoreDocs[1].doc);
@ -860,17 +862,18 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
try (IndexReader ir = DirectoryReader.open(directory)){
IndexSearcher percolateSearcher = new IndexSearcher(ir);
PercolateQuery query = (PercolateQuery)
fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, v);
fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")),
percolateSearcher, false, v);
BooleanQuery candidateQuery = (BooleanQuery) query.getCandidateMatchesQuery();
assertThat(candidateQuery.clauses().get(0).getQuery(), instanceOf(CoveringQuery.class));
TopDocs topDocs = shardSearcher.search(query, 10);
assertEquals(2L, topDocs.totalHits);
assertEquals(2L, topDocs.totalHits.value);
assertEquals(2, topDocs.scoreDocs.length);
assertEquals(0, topDocs.scoreDocs[0].doc);
assertEquals(2, topDocs.scoreDocs[1].doc);
topDocs = shardSearcher.search(new ConstantScoreQuery(query), 10);
assertEquals(2L, topDocs.totalHits);
assertEquals(2L, topDocs.totalHits.value);
assertEquals(2, topDocs.scoreDocs.length);
assertEquals(0, topDocs.scoreDocs[0].doc);
assertEquals(2, topDocs.scoreDocs[1].doc);
@ -890,18 +893,19 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
try (IndexReader ir = DirectoryReader.open(directory)){
IndexSearcher percolateSearcher = new IndexSearcher(ir);
PercolateQuery query = (PercolateQuery)
fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, v);
fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")),
percolateSearcher, false, v);
BooleanQuery candidateQuery = (BooleanQuery) query.getCandidateMatchesQuery();
assertThat(candidateQuery.clauses().get(0).getQuery(), instanceOf(TermInSetQuery.class));
TopDocs topDocs = shardSearcher.search(query, 10);
assertEquals(2L, topDocs.totalHits);
assertEquals(2L, topDocs.totalHits.value);
assertEquals(2, topDocs.scoreDocs.length);
assertEquals(1, topDocs.scoreDocs[0].doc);
assertEquals(2, topDocs.scoreDocs[1].doc);
topDocs = shardSearcher.search(new ConstantScoreQuery(query), 10);
assertEquals(2L, topDocs.totalHits);
assertEquals(2L, topDocs.totalHits.value);
assertEquals(2, topDocs.scoreDocs.length);
assertEquals(1, topDocs.scoreDocs[0].doc);
assertEquals(2, topDocs.scoreDocs[1].doc);
@ -951,9 +955,9 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
MemoryIndex memoryIndex = new MemoryIndex();
memoryIndex.addField("field", "value1 value2 value3", new WhitespaceAnalyzer());
IndexSearcher percolateSearcher = memoryIndex.createSearcher();
PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, v);
TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC), true, true);
assertEquals(2L, topDocs.totalHits);
PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, false, v);
TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC));
assertEquals(2L, topDocs.totalHits.value);
assertEquals(0, topDocs.scoreDocs[0].doc);
assertEquals(1, topDocs.scoreDocs[1].doc);
}
@ -985,25 +989,25 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
MemoryIndex memoryIndex = new MemoryIndex();
memoryIndex.addField("field", "value1 value4 value5", new WhitespaceAnalyzer());
IndexSearcher percolateSearcher = memoryIndex.createSearcher();
PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, v);
TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC), true, true);
assertEquals(1L, topDocs.totalHits);
PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, false, v);
TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC));
assertEquals(1L, topDocs.totalHits.value);
assertEquals(0, topDocs.scoreDocs[0].doc);
memoryIndex = new MemoryIndex();
memoryIndex.addField("field", "value1 value2", new WhitespaceAnalyzer());
percolateSearcher = memoryIndex.createSearcher();
query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, v);
topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC), true, true);
assertEquals(1L, topDocs.totalHits);
query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, false, v);
topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC));
assertEquals(1L, topDocs.totalHits.value);
assertEquals(0, topDocs.scoreDocs[0].doc);
memoryIndex = new MemoryIndex();
memoryIndex.addField("field", "value3", new WhitespaceAnalyzer());
percolateSearcher = memoryIndex.createSearcher();
query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, v);
topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC), true, true);
assertEquals(1L, topDocs.totalHits);
query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, false, v);
topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC));
assertEquals(1L, topDocs.totalHits.value);
assertEquals(0, topDocs.scoreDocs[0].doc);
}
@ -1036,9 +1040,9 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
document.add(new IntPoint("int_field", 7));
MemoryIndex memoryIndex = MemoryIndex.fromDocument(document, new WhitespaceAnalyzer());
IndexSearcher percolateSearcher = memoryIndex.createSearcher();
PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, v);
TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC), true, true);
assertEquals(1L, topDocs.totalHits);
PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, false, v);
TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC));
assertEquals(1L, topDocs.totalHits.value);
assertEquals(0, topDocs.scoreDocs[0].doc);
}
@ -1046,7 +1050,7 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
boolean requireScore = randomBoolean();
IndexSearcher percolateSearcher = memoryIndex.createSearcher();
Query percolateQuery = fieldType.percolateQuery("_name", queryStore,
Collections.singletonList(new BytesArray("{}")), percolateSearcher, Version.CURRENT);
Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, Version.CURRENT);
Query query = requireScore ? percolateQuery : new ConstantScoreQuery(percolateQuery);
TopDocs topDocs = shardSearcher.search(query, 100);
@ -1055,7 +1059,7 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
TopDocs controlTopDocs = shardSearcher.search(controlQuery, 100);
try {
assertThat(topDocs.totalHits, equalTo(controlTopDocs.totalHits));
assertThat(topDocs.totalHits.value, equalTo(controlTopDocs.totalHits.value));
assertThat(topDocs.scoreDocs.length, equalTo(controlTopDocs.scoreDocs.length));
for (int j = 0; j < topDocs.scoreDocs.length; j++) {
assertThat(topDocs.scoreDocs[j].doc, equalTo(controlTopDocs.scoreDocs[j].doc));
@ -1130,7 +1134,7 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
IndexSearcher shardSearcher) throws IOException {
IndexSearcher percolateSearcher = memoryIndex.createSearcher();
Query percolateQuery = fieldType.percolateQuery("_name", queryStore,
Collections.singletonList(new BytesArray("{}")), percolateSearcher, Version.CURRENT);
Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, Version.CURRENT);
return shardSearcher.search(percolateQuery, 10);
}
@ -1174,7 +1178,7 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
}
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) {
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) {
final IndexSearcher percolatorIndexSearcher = memoryIndex.createSearcher();
return new Weight(this) {
@ -1210,8 +1214,8 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
try {
Query query = leaf.apply(doc);
TopDocs topDocs = percolatorIndexSearcher.search(query, 1);
if (topDocs.totalHits > 0) {
if (needsScores) {
if (topDocs.scoreDocs.length > 0) {
if (scoreMode.needsScores()) {
_score[0] = topDocs.scoreDocs[0].score;
}
return true;
@ -1239,6 +1243,11 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
public float score() throws IOException {
return _score[0];
}
@Override
public float getMaxScore(int upTo) throws IOException {
return _score[0];
}
};
}

View File

@ -19,12 +19,6 @@
package org.elasticsearch.percolator;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.Query;
import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
@ -40,8 +34,6 @@ import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.get.GetResult;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.query.Rewriteable;
@ -63,7 +55,6 @@ import java.util.Map;
import java.util.Set;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.sameInstance;
public class PercolateQueryBuilderTests extends AbstractQueryTestCase<PercolateQueryBuilder> {
@ -72,8 +63,8 @@ public class PercolateQueryBuilderTests extends AbstractQueryTestCase<PercolateQ
PercolateQueryBuilder.DOCUMENTS_FIELD.getPreferredName()
};
private static String queryField = "field";
private static String aliasField = "alias";
protected static String queryField = "field";
protected static String aliasField = "alias";
private static String docType;
private String indexedDocumentIndex;
@ -249,48 +240,6 @@ public class PercolateQueryBuilderTests extends AbstractQueryTestCase<PercolateQ
() -> parseQuery("{\"percolate\" : { \"document\": {}, \"documents\": [{}, {}], \"field\":\"" + queryField + "\"}}"));
}
public void testCreateNestedDocumentSearcher() throws Exception {
int numNestedDocs = randomIntBetween(2, 8);
List<ParseContext.Document> docs = new ArrayList<>(numNestedDocs);
for (int i = 0; i < numNestedDocs; i++) {
docs.add(new ParseContext.Document());
}
Collection<ParsedDocument> parsedDocument = Collections.singleton(
new ParsedDocument(null, null, "_id", "_type", null, docs, null, null, null));
Analyzer analyzer = new WhitespaceAnalyzer();
IndexSearcher indexSearcher = PercolateQueryBuilder.createMultiDocumentSearcher(analyzer, parsedDocument);
assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(numNestedDocs));
// ensure that any query get modified so that the nested docs are never included as hits:
Query query = new MatchAllDocsQuery();
BooleanQuery result = (BooleanQuery) indexSearcher.createNormalizedWeight(query, true).getQuery();
assertThat(result.clauses().size(), equalTo(2));
assertThat(result.clauses().get(0).getQuery(), sameInstance(query));
assertThat(result.clauses().get(0).getOccur(), equalTo(BooleanClause.Occur.MUST));
assertThat(result.clauses().get(1).getOccur(), equalTo(BooleanClause.Occur.MUST_NOT));
}
public void testCreateMultiDocumentSearcher() throws Exception {
int numDocs = randomIntBetween(2, 8);
List<ParsedDocument> docs = new ArrayList<>();
for (int i = 0; i < numDocs; i++) {
docs.add(new ParsedDocument(null, null, "_id", "_type", null,
Collections.singletonList(new ParseContext.Document()), null, null, null));
}
Analyzer analyzer = new WhitespaceAnalyzer();
IndexSearcher indexSearcher = PercolateQueryBuilder.createMultiDocumentSearcher(analyzer, docs);
assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(numDocs));
// ensure that any query get modified so that the nested docs are never included as hits:
Query query = new MatchAllDocsQuery();
BooleanQuery result = (BooleanQuery) indexSearcher.createNormalizedWeight(query, true).getQuery();
assertThat(result.clauses().size(), equalTo(2));
assertThat(result.clauses().get(0).getQuery(), sameInstance(query));
assertThat(result.clauses().get(0).getOccur(), equalTo(BooleanClause.Occur.MUST));
assertThat(result.clauses().get(1).getOccur(), equalTo(BooleanClause.Occur.MUST_NOT));
}
private static BytesReference randomSource(Set<String> usedFields) {
try {
// If we create two source that have the same field, but these fields have different kind of values (str vs. lng) then
@ -352,4 +301,5 @@ public class PercolateQueryBuilderTests extends AbstractQueryTestCase<PercolateQ
assertEquals(query.getCandidateMatchesQuery(), aliasQuery.getCandidateMatchesQuery());
assertEquals(query.getVerifiedMatchesQuery(), aliasQuery.getVerifiedMatchesQuery());
}
}

View File

@ -117,9 +117,9 @@ public class PercolateQueryTests extends ESTestCase {
IndexSearcher percolateSearcher = memoryIndex.createSearcher();
// no scoring, wrapping it in a constant score query:
Query query = new ConstantScoreQuery(new PercolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("a")),
new TermQuery(new Term("select", "a")), percolateSearcher, new MatchNoDocsQuery("")));
new TermQuery(new Term("select", "a")), percolateSearcher, null, new MatchNoDocsQuery("")));
TopDocs topDocs = shardSearcher.search(query, 10);
assertThat(topDocs.totalHits, equalTo(1L));
assertThat(topDocs.totalHits.value, equalTo(1L));
assertThat(topDocs.scoreDocs.length, equalTo(1));
assertThat(topDocs.scoreDocs[0].doc, equalTo(0));
Explanation explanation = shardSearcher.explain(query, 0);
@ -127,9 +127,9 @@ public class PercolateQueryTests extends ESTestCase {
assertThat(explanation.getValue(), equalTo(topDocs.scoreDocs[0].score));
query = new ConstantScoreQuery(new PercolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("b")),
new TermQuery(new Term("select", "b")), percolateSearcher, new MatchNoDocsQuery("")));
new TermQuery(new Term("select", "b")), percolateSearcher, null, new MatchNoDocsQuery("")));
topDocs = shardSearcher.search(query, 10);
assertThat(topDocs.totalHits, equalTo(3L));
assertThat(topDocs.totalHits.value, equalTo(3L));
assertThat(topDocs.scoreDocs.length, equalTo(3));
assertThat(topDocs.scoreDocs[0].doc, equalTo(1));
explanation = shardSearcher.explain(query, 1);
@ -147,14 +147,14 @@ public class PercolateQueryTests extends ESTestCase {
assertThat(explanation.getValue(), equalTo(topDocs.scoreDocs[2].score));
query = new ConstantScoreQuery(new PercolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("c")),
new MatchAllDocsQuery(), percolateSearcher, new MatchAllDocsQuery()));
new MatchAllDocsQuery(), percolateSearcher, null, new MatchAllDocsQuery()));
topDocs = shardSearcher.search(query, 10);
assertThat(topDocs.totalHits, equalTo(4L));
assertThat(topDocs.totalHits.value, equalTo(4L));
query = new PercolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")),
new TermQuery(new Term("select", "b")), percolateSearcher, new MatchNoDocsQuery(""));
new TermQuery(new Term("select", "b")), percolateSearcher, null, new MatchNoDocsQuery(""));
topDocs = shardSearcher.search(query, 10);
assertThat(topDocs.totalHits, equalTo(3L));
assertThat(topDocs.totalHits.value, equalTo(3L));
assertThat(topDocs.scoreDocs.length, equalTo(3));
assertThat(topDocs.scoreDocs[0].doc, equalTo(3));
explanation = shardSearcher.explain(query, 3);

View File

@ -0,0 +1,57 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.percolator;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryShardContext;
import java.io.IOException;
public class PercolateWithNestedQueryBuilderTests extends PercolateQueryBuilderTests {
@Override
protected void initializeAdditionalMappings(MapperService mapperService) throws IOException {
super.initializeAdditionalMappings(mapperService);
mapperService.merge("_doc", new CompressedXContent(Strings.toString(PutMappingRequest.buildFromSimplifiedDef(
"_doc", "some_nested_object", "type=nested"))), MapperService.MergeReason.MAPPING_UPDATE);
}
public void testDetectsNestedDocuments() throws IOException {
QueryShardContext shardContext = createShardContext();
PercolateQueryBuilder builder = new PercolateQueryBuilder(queryField,
new BytesArray("{ \"foo\": \"bar\" }"), XContentType.JSON);
QueryBuilder rewrittenBuilder = rewriteAndFetch(builder, shardContext);
PercolateQuery query = (PercolateQuery) rewrittenBuilder.toQuery(shardContext);
assertFalse(query.excludesNestedDocs());
builder = new PercolateQueryBuilder(queryField,
new BytesArray("{ \"foo\": \"bar\", \"some_nested_object\": [ { \"baz\": 42 } ] }"), XContentType.JSON);
rewrittenBuilder = rewriteAndFetch(builder, shardContext);
query = (PercolateQuery) rewrittenBuilder.toQuery(shardContext);
assertTrue(query.excludesNestedDocs());
}
}

View File

@ -46,7 +46,7 @@ public class PercolatorHighlightSubFetchPhaseTests extends ESTestCase {
public void testHitsExecutionNeeded() {
PercolateQuery percolateQuery = new PercolateQuery("_name", ctx -> null, Collections.singletonList(new BytesArray("{}")),
new MatchAllDocsQuery(), Mockito.mock(IndexSearcher.class), new MatchAllDocsQuery());
new MatchAllDocsQuery(), Mockito.mock(IndexSearcher.class), null, new MatchAllDocsQuery());
PercolatorHighlightSubFetchPhase subFetchPhase = new PercolatorHighlightSubFetchPhase(Settings.EMPTY,
emptyMap());
SearchContext searchContext = Mockito.mock(SearchContext.class);
@ -60,7 +60,7 @@ public class PercolatorHighlightSubFetchPhaseTests extends ESTestCase {
public void testLocatePercolatorQuery() {
PercolateQuery percolateQuery = new PercolateQuery("_name", ctx -> null, Collections.singletonList(new BytesArray("{}")),
new MatchAllDocsQuery(), Mockito.mock(IndexSearcher.class), new MatchAllDocsQuery());
new MatchAllDocsQuery(), Mockito.mock(IndexSearcher.class), null, new MatchAllDocsQuery());
assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(new MatchAllDocsQuery()).size(), equalTo(0));
BooleanQuery.Builder bq = new BooleanQuery.Builder();
bq.add(new MatchAllDocsQuery(), BooleanClause.Occur.FILTER);
@ -94,7 +94,7 @@ public class PercolatorHighlightSubFetchPhaseTests extends ESTestCase {
assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(disjunctionMaxQuery).get(0), sameInstance(percolateQuery));
PercolateQuery percolateQuery2 = new PercolateQuery("_name", ctx -> null, Collections.singletonList(new BytesArray("{}")),
new MatchAllDocsQuery(), Mockito.mock(IndexSearcher.class), new MatchAllDocsQuery());
new MatchAllDocsQuery(), Mockito.mock(IndexSearcher.class), null, new MatchAllDocsQuery());
bq = new BooleanQuery.Builder();
bq.add(new MatchAllDocsQuery(), BooleanClause.Occur.FILTER);
assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(bq.build()).size(), equalTo(0));

View File

@ -30,6 +30,7 @@ import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.TotalHits;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.FixedBitSet;
import org.elasticsearch.search.SearchHit;
@ -58,7 +59,7 @@ public class PercolatorMatchedSlotSubFetchPhaseTests extends ESTestCase {
MemoryIndex memoryIndex = new MemoryIndex();
memoryIndex.addField("field", "value", new WhitespaceAnalyzer());
PercolateQuery percolateQuery = new PercolateQuery("_name", queryStore, Collections.emptyList(),
new MatchAllDocsQuery(), memoryIndex.createSearcher(), new MatchNoDocsQuery());
new MatchAllDocsQuery(), memoryIndex.createSearcher(), null, new MatchNoDocsQuery());
PercolatorMatchedSlotSubFetchPhase.innerHitsExecute(percolateQuery, indexSearcher, hits);
assertNotNull(hits[0].field(PercolatorMatchedSlotSubFetchPhase.FIELD_NAME_PREFIX));
@ -72,7 +73,7 @@ public class PercolatorMatchedSlotSubFetchPhaseTests extends ESTestCase {
MemoryIndex memoryIndex = new MemoryIndex();
memoryIndex.addField("field", "value1", new WhitespaceAnalyzer());
PercolateQuery percolateQuery = new PercolateQuery("_name", queryStore, Collections.emptyList(),
new MatchAllDocsQuery(), memoryIndex.createSearcher(), new MatchNoDocsQuery());
new MatchAllDocsQuery(), memoryIndex.createSearcher(), null, new MatchNoDocsQuery());
PercolatorMatchedSlotSubFetchPhase.innerHitsExecute(percolateQuery, indexSearcher, hits);
assertNull(hits[0].field(PercolatorMatchedSlotSubFetchPhase.FIELD_NAME_PREFIX));
@ -85,7 +86,7 @@ public class PercolatorMatchedSlotSubFetchPhaseTests extends ESTestCase {
MemoryIndex memoryIndex = new MemoryIndex();
memoryIndex.addField("field", "value", new WhitespaceAnalyzer());
PercolateQuery percolateQuery = new PercolateQuery("_name", queryStore, Collections.emptyList(),
new MatchAllDocsQuery(), memoryIndex.createSearcher(), new MatchNoDocsQuery());
new MatchAllDocsQuery(), memoryIndex.createSearcher(), null, new MatchNoDocsQuery());
PercolatorMatchedSlotSubFetchPhase.innerHitsExecute(percolateQuery, indexSearcher, hits);
assertNull(hits[0].field(PercolatorMatchedSlotSubFetchPhase.FIELD_NAME_PREFIX));
@ -100,7 +101,7 @@ public class PercolatorMatchedSlotSubFetchPhaseTests extends ESTestCase {
scoreDocs[i] = new ScoreDoc(i, 1f);
}
TopDocs topDocs = new TopDocs(scoreDocs.length, scoreDocs, 1f);
TopDocs topDocs = new TopDocs(new TotalHits(scoreDocs.length, TotalHits.Relation.EQUAL_TO), scoreDocs);
IntStream stream = PercolatorMatchedSlotSubFetchPhase.convertTopDocsToSlots(topDocs, null);
int[] result = stream.toArray();
@ -117,7 +118,7 @@ public class PercolatorMatchedSlotSubFetchPhaseTests extends ESTestCase {
scoreDocs[2] = new ScoreDoc(8, 1f);
scoreDocs[3] = new ScoreDoc(11, 1f);
scoreDocs[4] = new ScoreDoc(14, 1f);
TopDocs topDocs = new TopDocs(scoreDocs.length, scoreDocs, 1f);
TopDocs topDocs = new TopDocs(new TotalHits(scoreDocs.length, TotalHits.Relation.EQUAL_TO), scoreDocs);
FixedBitSet bitSet = new FixedBitSet(15);
bitSet.set(2);

View File

@ -1 +0,0 @@
a010e852be8d56efe1906e6da5292e4541239724

View File

@ -0,0 +1 @@
97a3758487272ba4d15720b0ca15b0f980310c89

View File

@ -12,7 +12,7 @@
analyzer:
my_analyzer:
tokenizer: standard
filter: ["standard", "lowercase", "my_collator"]
filter: ["lowercase", "my_collator"]
filter:
my_collator:
type: icu_collation

View File

@ -1 +0,0 @@
88e0ed90d433a9088528485cd4f59311735d92a4

View File

@ -0,0 +1 @@
12ed739794cd317754684308ddc5bdbdcc46cdde

Some files were not shown because too many files have changed in this diff Show More