Scripting: Replace advanced and native scripts with ScriptEngine docs (#24603)
This commit documents how to write a `ScriptEngine` in order to use expert internal apis, such as using Lucene directly to find index term statistics. These documents prepare the way to remove both native scripts and IndexLookup. The example java code is actually compiled and tested under a new gradle subproject for example plugins. This change does not yet breakup jvm-example into the new examples dir, which should be done separately. relates #19359 relates #19966
This commit is contained in:
parent
17d01550c2
commit
c1f1f66509
|
@ -19,18 +19,30 @@
|
|||
|
||||
package org.elasticsearch.script;
|
||||
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.elasticsearch.common.lucene.ScorerAware;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* A per-segment {@link SearchScript}.
|
||||
*
|
||||
* This is effectively a functional interface, requiring at least implementing {@link #runAsDouble()}.
|
||||
*/
|
||||
public interface LeafSearchScript extends ScorerAware, ExecutableScript {
|
||||
|
||||
void setDocument(int doc);
|
||||
/**
|
||||
* Set the document this script will process next.
|
||||
*/
|
||||
default void setDocument(int doc) {}
|
||||
|
||||
void setSource(Map<String, Object> source);
|
||||
@Override
|
||||
default void setScorer(Scorer scorer) {}
|
||||
|
||||
/**
|
||||
* Set the source for the current document.
|
||||
*/
|
||||
default void setSource(Map<String, Object> source) {}
|
||||
|
||||
/**
|
||||
* Sets per-document aggregation {@code _value}.
|
||||
|
@ -44,8 +56,23 @@ public interface LeafSearchScript extends ScorerAware, ExecutableScript {
|
|||
setNextVar("_value", value);
|
||||
}
|
||||
|
||||
long runAsLong();
|
||||
@Override
|
||||
default void setNextVar(String field, Object value) {}
|
||||
|
||||
/**
|
||||
* Return the result as a long. This is used by aggregation scripts over long fields.
|
||||
*/
|
||||
default long runAsLong() {
|
||||
throw new UnsupportedOperationException("runAsLong is not implemented");
|
||||
}
|
||||
|
||||
@Override
|
||||
default Object run() {
|
||||
return runAsDouble();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the result as a double. This is the main use case of search script, used for document scoring.
|
||||
*/
|
||||
double runAsDouble();
|
||||
|
||||
}
|
||||
|
|
|
@ -38,7 +38,9 @@ public interface ScriptEngine extends Closeable {
|
|||
/**
|
||||
* The extension for file scripts in this language.
|
||||
*/
|
||||
String getExtension();
|
||||
default String getExtension() {
|
||||
return getType();
|
||||
}
|
||||
|
||||
/**
|
||||
* Compiles a script.
|
||||
|
|
|
@ -51,7 +51,7 @@ certain tasks.
|
|||
|built-in
|
||||
|templates
|
||||
|
||||
|<<modules-scripting-native, `java`>>
|
||||
|<<modules-scripting-engine, `java`>>
|
||||
|n/a
|
||||
|you write it!
|
||||
|expert API
|
||||
|
@ -83,6 +83,4 @@ include::scripting/painless-debugging.asciidoc[]
|
|||
|
||||
include::scripting/expression.asciidoc[]
|
||||
|
||||
include::scripting/native.asciidoc[]
|
||||
|
||||
include::scripting/advanced-scripting.asciidoc[]
|
||||
include::scripting/engine.asciidoc[]
|
||||
|
|
|
@ -1,189 +0,0 @@
|
|||
[[modules-advanced-scripting]]
|
||||
=== Advanced text scoring in scripts
|
||||
|
||||
experimental[The functionality described on this page is considered experimental and may be changed or removed in a future release]
|
||||
|
||||
Text features, such as term or document frequency for a specific term can be
|
||||
accessed in scripts with the `_index` variable. This can be useful if, for
|
||||
example, you want to implement your own scoring model using for example a
|
||||
script inside a <<query-dsl-function-score-query,function score query>>.
|
||||
Statistics over the document collection are computed *per shard*, not per
|
||||
index.
|
||||
|
||||
It should be noted that the `_index` variable is not supported in the painless language, but `_index` is defined when using the groovy language.
|
||||
|
||||
[float]
|
||||
=== Nomenclature:
|
||||
|
||||
|
||||
[horizontal]
|
||||
`df`::
|
||||
|
||||
document frequency. The number of documents a term appears in. Computed
|
||||
per field.
|
||||
|
||||
|
||||
`tf`::
|
||||
|
||||
term frequency. The number times a term appears in a field in one specific
|
||||
document.
|
||||
|
||||
`ttf`::
|
||||
|
||||
total term frequency. The number of times this term appears in all
|
||||
documents, that is, the sum of `tf` over all documents. Computed per
|
||||
field.
|
||||
|
||||
`df` and `ttf` are computed per shard and therefore these numbers can vary
|
||||
depending on the shard the current document resides in.
|
||||
|
||||
|
||||
[float]
|
||||
=== Shard statistics:
|
||||
|
||||
`_index.numDocs()`::
|
||||
|
||||
Number of documents in shard.
|
||||
|
||||
`_index.maxDoc()`::
|
||||
|
||||
Maximal document number in shard.
|
||||
|
||||
`_index.numDeletedDocs()`::
|
||||
|
||||
Number of deleted documents in shard.
|
||||
|
||||
|
||||
[float]
|
||||
=== Field statistics:
|
||||
|
||||
Field statistics can be accessed with a subscript operator like this:
|
||||
`_index['FIELD']`.
|
||||
|
||||
|
||||
`_index['FIELD'].docCount()`::
|
||||
|
||||
Number of documents containing the field `FIELD`. Does not take deleted documents into account.
|
||||
|
||||
`_index['FIELD'].sumttf()`::
|
||||
|
||||
Sum of `ttf` over all terms that appear in field `FIELD` in all documents.
|
||||
|
||||
`_index['FIELD'].sumdf()`::
|
||||
|
||||
The sum of `df` s over all terms that appear in field `FIELD` in all
|
||||
documents.
|
||||
|
||||
|
||||
Field statistics are computed per shard and therefore these numbers can vary
|
||||
depending on the shard the current document resides in.
|
||||
The number of terms in a field cannot be accessed using the `_index` variable. See <<token-count>> for how to do that.
|
||||
|
||||
[float]
|
||||
=== Term statistics:
|
||||
|
||||
Term statistics for a field can be accessed with a subscript operator like
|
||||
this: `_index['FIELD']['TERM']`. This will never return null, even if term or field does not exist.
|
||||
If you do not need the term frequency, call `_index['FIELD'].get('TERM', 0)`
|
||||
to avoid unnecessary initialization of the frequencies. The flag will have only
|
||||
affect is your set the <<index-options,`index_options`>> to `docs`.
|
||||
|
||||
|
||||
`_index['FIELD']['TERM'].df()`::
|
||||
|
||||
`df` of term `TERM` in field `FIELD`. Will be returned, even if the term
|
||||
is not present in the current document.
|
||||
|
||||
`_index['FIELD']['TERM'].ttf()`::
|
||||
|
||||
The sum of term frequencies of term `TERM` in field `FIELD` over all
|
||||
documents. Will be returned, even if the term is not present in the
|
||||
current document.
|
||||
|
||||
`_index['FIELD']['TERM'].tf()`::
|
||||
|
||||
`tf` of term `TERM` in field `FIELD`. Will be 0 if the term is not present
|
||||
in the current document.
|
||||
|
||||
|
||||
[float]
|
||||
=== Term positions, offsets and payloads:
|
||||
|
||||
If you need information on the positions of terms in a field, call
|
||||
`_index['FIELD'].get('TERM', flag)` where flag can be
|
||||
|
||||
[horizontal]
|
||||
`_POSITIONS`:: if you need the positions of the term
|
||||
`_OFFSETS`:: if you need the offsets of the term
|
||||
`_PAYLOADS`:: if you need the payloads of the term
|
||||
`_CACHE`:: if you need to iterate over all positions several times
|
||||
|
||||
The iterator uses the underlying lucene classes to iterate over positions. For efficiency reasons, you can only iterate over positions once. If you need to iterate over the positions several times, set the `_CACHE` flag.
|
||||
|
||||
You can combine the operators with a `|` if you need more than one info. For
|
||||
example, the following will return an object holding the positions and payloads,
|
||||
as well as all statistics:
|
||||
|
||||
|
||||
`_index['FIELD'].get('TERM', _POSITIONS | _PAYLOADS)`
|
||||
|
||||
|
||||
Positions can be accessed with an iterator that returns an object
|
||||
(`POS_OBJECT`) holding position, offsets and payload for each term position.
|
||||
|
||||
`POS_OBJECT.position`::
|
||||
|
||||
The position of the term.
|
||||
|
||||
`POS_OBJECT.startOffset`::
|
||||
|
||||
The start offset of the term.
|
||||
|
||||
`POS_OBJECT.endOffset`::
|
||||
|
||||
The end offset of the term.
|
||||
|
||||
`POS_OBJECT.payload`::
|
||||
|
||||
The payload of the term.
|
||||
|
||||
`POS_OBJECT.payloadAsInt(missingValue)`::
|
||||
|
||||
The payload of the term converted to integer. If the current position has
|
||||
no payload, the `missingValue` will be returned. Call this only if you
|
||||
know that your payloads are integers.
|
||||
|
||||
`POS_OBJECT.payloadAsFloat(missingValue)`::
|
||||
|
||||
The payload of the term converted to float. If the current position has no
|
||||
payload, the `missingValue` will be returned. Call this only if you know
|
||||
that your payloads are floats.
|
||||
|
||||
`POS_OBJECT.payloadAsString()`::
|
||||
|
||||
The payload of the term converted to string. If the current position has
|
||||
no payload, `null` will be returned. Call this only if you know that your
|
||||
payloads are strings.
|
||||
|
||||
|
||||
Example: sums up all payloads for the term `foo`.
|
||||
|
||||
[source,groovy]
|
||||
---------------------------------------------------------
|
||||
termInfo = _index['my_field'].get('foo',_PAYLOADS);
|
||||
score = 0;
|
||||
for (pos in termInfo) {
|
||||
score = score + pos.payloadAsInt(0);
|
||||
}
|
||||
return score;
|
||||
---------------------------------------------------------
|
||||
|
||||
|
||||
[float]
|
||||
=== Term vectors:
|
||||
|
||||
The `_index` variable can only be used to gather statistics for single terms. If you want to use information on all terms in a field, you must store the term vectors (see <<term-vector>>). To access them, call
|
||||
`_index.termVectors()` to get a
|
||||
https://lucene.apache.org/core/4_0_0/core/org/apache/lucene/index/Fields.html[Fields]
|
||||
instance. This object can then be used as described in https://lucene.apache.org/core/4_0_0/core/org/apache/lucene/index/Fields.html[lucene doc] to iterate over fields and then for each field iterate over each term in the field.
|
||||
The method will return null if the term vectors were not stored.
|
|
@ -0,0 +1,57 @@
|
|||
[[modules-scripting-engine]]
|
||||
=== Advanced scripts using script engines
|
||||
|
||||
A `ScriptEngine` is a backend for implementing a scripting language. It may also
|
||||
be used to write scripts that need to use advanced internals of scripting. For example,
|
||||
a script that wants to use term frequencies while scoring.
|
||||
|
||||
The plugin {plugins}/plugin-authors.html[documentation] has more information on
|
||||
how to write a plugin so that Elasticsearch will properly load it. To register
|
||||
the `ScriptEngine`, your plugin should implement the `ScriptPlugin` interface
|
||||
and override the `getScriptEngine(Settings settings)` method.
|
||||
|
||||
The following is an example of a custom `ScriptEngine` which uses the language
|
||||
name `expert_scripts`. It implements a single script called `pure_df` which
|
||||
may be used as a search script to override each document's score as
|
||||
the document frequency of a provided term.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{docdir}/../../plugins/examples/script-expert-scoring/src/main/java/org/elasticsearch/example/expertscript/ExpertScriptPlugin.java[expert_engine]
|
||||
--------------------------------------------------
|
||||
|
||||
You can execute the script by specifying its `lang` as `expert_scripts`, and the name
|
||||
of the script as the the script source:
|
||||
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /_search
|
||||
{
|
||||
"query": {
|
||||
"function_score": {
|
||||
"query": {
|
||||
"match": {
|
||||
"body": "foo"
|
||||
}
|
||||
},
|
||||
"functions": [
|
||||
{
|
||||
"script_score": {
|
||||
"script": {
|
||||
"inline": "pure_df",
|
||||
"lang" : "expert_scripts",
|
||||
"params": {
|
||||
"field": "body",
|
||||
"term": "foo"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[skip:we don't have an expert script plugin installed to test this]
|
|
@ -1,86 +0,0 @@
|
|||
[[modules-scripting-native]]
|
||||
=== Native (Java) Scripts
|
||||
|
||||
Sometimes `painless` and <<modules-scripting-expression, expression>> aren't enough. For those times you can
|
||||
implement a native script.
|
||||
|
||||
The best way to implement a native script is to write a plugin and install it.
|
||||
The plugin {plugins}/plugin-authors.html[documentation] has more information on
|
||||
how to write a plugin so that Elasticsearch will properly load it.
|
||||
|
||||
To register the actual script you'll need to implement `NativeScriptFactory`
|
||||
to construct the script. The actual script will extend either
|
||||
`AbstractExecutableScript` or `AbstractSearchScript`. The second one is likely
|
||||
the most useful and has several helpful subclasses you can extend like
|
||||
`AbstractLongSearchScript` and `AbstractDoubleSearchScript`.
|
||||
Finally, your plugin should register the native script by implementing the
|
||||
`ScriptPlugin` interface.
|
||||
|
||||
If you squashed the whole thing into one class it'd look like:
|
||||
|
||||
[source,java]
|
||||
--------------------------------------------------
|
||||
public class MyNativeScriptPlugin extends Plugin implements ScriptPlugin {
|
||||
|
||||
@Override
|
||||
public List<NativeScriptFactory> getNativeScripts() {
|
||||
return Collections.singletonList(new MyNativeScriptFactory());
|
||||
}
|
||||
|
||||
public static class MyNativeScriptFactory implements NativeScriptFactory {
|
||||
@Override
|
||||
public ExecutableScript newScript(@Nullable Map<String, Object> params) {
|
||||
return new MyNativeScript();
|
||||
}
|
||||
@Override
|
||||
public boolean needsScores() {
|
||||
return false;
|
||||
}
|
||||
@Override
|
||||
public String getName() {
|
||||
return "my_script";
|
||||
}
|
||||
}
|
||||
|
||||
public static class MyNativeScript extends AbstractDoubleSearchScript {
|
||||
@Override
|
||||
public double runAsDouble() {
|
||||
double a = (double) source().get("a");
|
||||
double b = (double) source().get("b");
|
||||
return a * b;
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
You can execute the script by specifying its `lang` as `native`, and the name
|
||||
of the script as the `id`:
|
||||
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /_search
|
||||
{
|
||||
"query": {
|
||||
"function_score": {
|
||||
"query": {
|
||||
"match": {
|
||||
"body": "foo"
|
||||
}
|
||||
},
|
||||
"functions": [
|
||||
{
|
||||
"script_score": {
|
||||
"script": {
|
||||
"inline": "my_script",
|
||||
"lang" : "native"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[skip:we don't have a native plugin installed to test this]
|
|
@ -463,3 +463,13 @@ index that make warmers not necessary anymore.
|
|||
=== Index time boosting
|
||||
|
||||
The index time boost mapping has been replaced with query time boost (see <<mapping-boost>>).
|
||||
|
||||
[role="exclude",id="modules-scripting-native"]
|
||||
=== Native scripting
|
||||
|
||||
Native scripts have been replaced with writing custom `ScriptEngine` backends (see <<modules-scripting-engine>>).
|
||||
|
||||
[role="exclude",id="modules-advanced-scripting"]
|
||||
=== Advanced scripting
|
||||
|
||||
Using `_index` in scripts has been replaced with writing `ScriptEngine` backends (see <<modules-scripting-engine>>).
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
apply plugin: 'elasticsearch.esplugin'
|
||||
|
||||
esplugin {
|
||||
name 'script-expert-scoring'
|
||||
description 'An example script engine to use low level Lucene internals for expert scoring'
|
||||
classname 'org.elasticsearch.example.expertscript.ExpertScriptPlugin'
|
||||
}
|
||||
|
||||
test.enabled = false
|
|
@ -0,0 +1,145 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.example.expertscript;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.function.Function;
|
||||
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.PostingsEnum;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.plugins.ScriptPlugin;
|
||||
import org.elasticsearch.script.CompiledScript;
|
||||
import org.elasticsearch.script.ExecutableScript;
|
||||
import org.elasticsearch.script.LeafSearchScript;
|
||||
import org.elasticsearch.script.ScriptEngine;
|
||||
import org.elasticsearch.script.SearchScript;
|
||||
import org.elasticsearch.search.lookup.SearchLookup;
|
||||
|
||||
/**
|
||||
* An example script plugin that adds a {@link ScriptEngine} implementing expert scoring.
|
||||
*/
|
||||
public class ExpertScriptPlugin extends Plugin implements ScriptPlugin {
|
||||
|
||||
@Override
|
||||
public ScriptEngine getScriptEngine(Settings settings) {
|
||||
return new MyExpertScriptEngine();
|
||||
}
|
||||
|
||||
/** An example {@link ScriptEngine} that uses Lucene segment details to implement pure document frequency scoring. */
|
||||
// tag::expert_engine
|
||||
private static class MyExpertScriptEngine implements ScriptEngine {
|
||||
@Override
|
||||
public String getType() {
|
||||
return "expert_scripts";
|
||||
}
|
||||
|
||||
@Override
|
||||
public Function<Map<String,Object>,SearchScript> compile(String scriptName, String scriptSource, Map<String, String> params) {
|
||||
// we use the script "source" as the script identifier
|
||||
if ("pure_df".equals(scriptSource)) {
|
||||
return p -> new SearchScript() {
|
||||
final String field;
|
||||
final String term;
|
||||
{
|
||||
if (p.containsKey("field") == false) {
|
||||
throw new IllegalArgumentException("Missing parameter [field]");
|
||||
}
|
||||
if (p.containsKey("term") == false) {
|
||||
throw new IllegalArgumentException("Missing parameter [term]");
|
||||
}
|
||||
field = p.get("field").toString();
|
||||
term = p.get("term").toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public LeafSearchScript getLeafSearchScript(LeafReaderContext context) throws IOException {
|
||||
PostingsEnum postings = context.reader().postings(new Term(field, term));
|
||||
if (postings == null) {
|
||||
// the field and/or term don't exist in this segment, so always return 0
|
||||
return () -> 0.0d;
|
||||
}
|
||||
return new LeafSearchScript() {
|
||||
int currentDocid = -1;
|
||||
@Override
|
||||
public void setDocument(int docid) {
|
||||
// advance has undefined behavior calling with a docid <= its current docid
|
||||
if (postings.docID() < docid) {
|
||||
try {
|
||||
postings.advance(docid);
|
||||
} catch (IOException e) {
|
||||
throw new UncheckedIOException(e);
|
||||
}
|
||||
}
|
||||
currentDocid = docid;
|
||||
}
|
||||
@Override
|
||||
public double runAsDouble() {
|
||||
if (postings.docID() != currentDocid) {
|
||||
// advance moved past the current doc, so this doc has no occurrences of the term
|
||||
return 0.0d;
|
||||
}
|
||||
try {
|
||||
return postings.freq();
|
||||
} catch (IOException e) {
|
||||
throw new UncheckedIOException(e);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean needsScores() {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
}
|
||||
throw new IllegalArgumentException("Unknown script name " + scriptSource);
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public SearchScript search(CompiledScript compiledScript, SearchLookup lookup, @Nullable Map<String, Object> params) {
|
||||
Function<Map<String,Object>,SearchScript> scriptFactory = (Function<Map<String,Object>,SearchScript>) compiledScript.compiled();
|
||||
return scriptFactory.apply(params);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ExecutableScript executable(CompiledScript compiledScript, @Nullable Map<String, Object> params) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isInlineScriptEnabled() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {}
|
||||
}
|
||||
// end::expert_engine
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.example.expertscript;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.annotations.Name;
|
||||
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
|
||||
import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
|
||||
import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
|
||||
|
||||
public class ExpertScriptClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
|
||||
|
||||
public ExpertScriptClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) {
|
||||
super(testCandidate);
|
||||
}
|
||||
|
||||
@ParametersFactory
|
||||
public static Iterable<Object[]> parameters() throws Exception {
|
||||
return ESClientYamlSuiteTestCase.createParameters();
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
# Integration tests for the expert scoring script example plugin
|
||||
#
|
||||
"Plugin loaded":
|
||||
- do:
|
||||
cluster.state: {}
|
||||
|
||||
# Get master node id
|
||||
- set: { master_node: master }
|
||||
|
||||
- do:
|
||||
nodes.info: {}
|
||||
|
||||
- match: { nodes.$master.plugins.0.name: script-expert-scoring }
|
|
@ -0,0 +1,53 @@
|
|||
# Integration tests for the expert scoring script example plugin
|
||||
#
|
||||
---
|
||||
setup:
|
||||
- do:
|
||||
indices.create:
|
||||
index: test
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: test
|
||||
id: 1
|
||||
body: { "important_field": "foo" }
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: test
|
||||
id: 2
|
||||
body: { "important_field": "foo foo foo" }
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: test
|
||||
id: 3
|
||||
body: { "important_field": "foo foo" }
|
||||
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
---
|
||||
"document scoring":
|
||||
- do:
|
||||
search:
|
||||
index: test
|
||||
body:
|
||||
query:
|
||||
function_score:
|
||||
query:
|
||||
match:
|
||||
important_field: "foo"
|
||||
functions:
|
||||
- script_score:
|
||||
script:
|
||||
inline: "pure_df"
|
||||
lang: "expert_scripts"
|
||||
params:
|
||||
field: "important_field"
|
||||
term: "foo"
|
||||
|
||||
- length: { hits.hits: 3 }
|
||||
- match: {hits.hits.0._id: "2" }
|
||||
- match: {hits.hits.1._id: "3" }
|
||||
- match: {hits.hits.2._id: "1" }
|
|
@ -78,6 +78,15 @@ List projects = [
|
|||
'qa:wildfly'
|
||||
]
|
||||
|
||||
File examplePluginsDir = new File(rootProject.projectDir, 'plugins/examples')
|
||||
List<String> examplePlugins = []
|
||||
for (File example : examplePluginsDir.listFiles()) {
|
||||
if (example.isDirectory() == false) continue;
|
||||
if (example.name.startsWith('build') || example.name.startsWith('.')) continue;
|
||||
projects.add("example-plugins:${example.name}".toString())
|
||||
examplePlugins.add(example.name)
|
||||
}
|
||||
|
||||
boolean isEclipse = System.getProperty("eclipse.launcher") != null || gradle.startParameter.taskNames.contains('eclipse') || gradle.startParameter.taskNames.contains('cleanEclipse')
|
||||
if (isEclipse) {
|
||||
// eclipse cannot handle an intermediate dependency between main and test, so we must create separate projects
|
||||
|
@ -88,6 +97,11 @@ if (isEclipse) {
|
|||
include projects.toArray(new String[0])
|
||||
|
||||
project(':build-tools').projectDir = new File(rootProject.projectDir, 'buildSrc')
|
||||
project(':example-plugins').projectDir = new File(rootProject.projectDir, 'plugins/examples')
|
||||
|
||||
for (String example : examplePlugins) {
|
||||
project(":example-plugins:${example}").projectDir = new File(rootProject.projectDir, "plugins/examples/${example}")
|
||||
}
|
||||
|
||||
if (isEclipse) {
|
||||
project(":core").projectDir = new File(rootProject.projectDir, 'core/src/main')
|
||||
|
|
Loading…
Reference in New Issue