Merge branch 'master' into index-lifecycle
This commit is contained in:
commit
053e9fb234
|
@ -20,10 +20,8 @@ nbactions.xml
|
|||
.gradle/
|
||||
build/
|
||||
|
||||
# maven stuff (to be removed when trunk becomes 4.x)
|
||||
*-execution-hints.log
|
||||
target/
|
||||
dependency-reduced-pom.xml
|
||||
# vscode stuff
|
||||
.vscode/
|
||||
|
||||
# testing stuff
|
||||
**/.local*
|
||||
|
@ -43,4 +41,3 @@ html_docs
|
|||
# random old stuff that we should look at the necessity of...
|
||||
/tmp/
|
||||
eclipse-build
|
||||
|
||||
|
|
|
@ -107,6 +107,8 @@ We support development in the Eclipse and IntelliJ IDEs. For Eclipse, the
|
|||
minimum version that we support is [Eclipse Oxygen][eclipse] (version 4.7). For
|
||||
IntelliJ, the minimum version that we support is [IntelliJ 2017.2][intellij].
|
||||
|
||||
### Configuring IDEs And Running Tests
|
||||
|
||||
Eclipse users can automatically configure their IDE: `./gradlew eclipse`
|
||||
then `File: Import: Existing Projects into Workspace`. Select the
|
||||
option `Search for nested projects`. Additionally you will want to
|
||||
|
@ -144,6 +146,9 @@ For IntelliJ, go to
|
|||
For Eclipse, go to `Preferences->Java->Installed JREs` and add `-ea` to
|
||||
`VM Arguments`.
|
||||
|
||||
|
||||
### Java Language Formatting Guidelines
|
||||
|
||||
Please follow these formatting guidelines:
|
||||
|
||||
* Java indent is 4 spaces
|
||||
|
@ -155,6 +160,33 @@ Please follow these formatting guidelines:
|
|||
* IntelliJ: `Preferences/Settings->Editor->Code Style->Java->Imports`. There are two configuration options: `Class count to use import with '*'` and `Names count to use static import with '*'`. Set their values to 99999 or some other absurdly high value.
|
||||
* Don't worry too much about import order. Try not to change it but don't worry about fighting your IDE to stop it from doing so.
|
||||
|
||||
### License Headers
|
||||
|
||||
We require license headers on all Java files. You will notice that all the Java files in
|
||||
the top-level `x-pack` directory contain a separate license from the rest of the repository. This
|
||||
directory contains commercial code that is associated with a separate license. It can be helpful
|
||||
to have the IDE automatically insert the appropriate license header depending which part of the project
|
||||
contributions are made to.
|
||||
|
||||
#### IntelliJ: Copyright & Scope Profiles
|
||||
|
||||
To have IntelliJ insert the correct license, it is necessary to create to copyright profiles.
|
||||
These may potentially be called `apache2` and `commercial`. These can be created in
|
||||
`Preferences/Settings->Editor->Copyright->Copyright Profiles`. To associate these profiles to
|
||||
their respective directories, two "Scopes" will need to be created. These can be created in
|
||||
`Preferences/Settings->Appearances & Behavior->Scopes`. When creating scopes, be sure to choose
|
||||
the `shared` scope type. Create a scope, `apache2`, with
|
||||
the associated pattern of `!file[group:x-pack]:*/`. This pattern will exclude all the files contained in
|
||||
the `x-pack` directory. The other scope, `commercial`, will have the inverse pattern of `file[group:x-pack]:*/`.
|
||||
The two scopes, together, should account for all the files in the project. To associate the scopes
|
||||
with their copyright-profiles, go into `Preferences/Settings->Editor>Copyright` and use the `+` to add
|
||||
the associations `apache2/apache2` and `commercial/commercial`.
|
||||
|
||||
Configuring these options in IntelliJ can be quite buggy, so do not be alarmed if you have to open/close
|
||||
the settings window and/or restart IntelliJ to see your changes take effect.
|
||||
|
||||
### Creating A Distribution
|
||||
|
||||
To create a distribution from the source, simply run:
|
||||
|
||||
```sh
|
||||
|
@ -169,6 +201,8 @@ The archive distributions (tar and zip) can be found under:
|
|||
`./distribution/archives/(tar|zip)/build/distributions/`
|
||||
|
||||
|
||||
### Running The Full Test Suite
|
||||
|
||||
Before submitting your changes, run the test suite to make sure that nothing is broken, with:
|
||||
|
||||
```sh
|
||||
|
|
55
build.gradle
55
build.gradle
|
@ -20,6 +20,7 @@
|
|||
|
||||
import org.apache.tools.ant.taskdefs.condition.Os
|
||||
import org.elasticsearch.gradle.BuildPlugin
|
||||
import org.elasticsearch.gradle.LoggedExec
|
||||
import org.elasticsearch.gradle.Version
|
||||
import org.elasticsearch.gradle.VersionCollection
|
||||
import org.elasticsearch.gradle.VersionProperties
|
||||
|
@ -30,6 +31,7 @@ import org.gradle.api.tasks.wrapper.Wrapper.DistributionType
|
|||
import org.gradle.util.GradleVersion
|
||||
import org.gradle.util.DistributionLocator
|
||||
|
||||
import java.nio.file.Files
|
||||
import java.nio.file.Path
|
||||
import java.security.MessageDigest
|
||||
|
||||
|
@ -459,6 +461,59 @@ gradle.projectsEvaluated {
|
|||
|
||||
}
|
||||
|
||||
static void assertLinesInFile(final Path path, final List<String> expectedLines) {
|
||||
final List<String> actualLines = Files.readAllLines(path)
|
||||
int line = 0
|
||||
for (final String expectedLine : expectedLines) {
|
||||
final String actualLine = actualLines.get(line)
|
||||
if (expectedLine != actualLine) {
|
||||
throw new GradleException("expected line [${line + 1}] in [${path}] to be [${expectedLine}] but was [${actualLine}]")
|
||||
}
|
||||
line++
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Check that all generated JARs have our NOTICE.txt and an appropriate
|
||||
* LICENSE.txt in them. We configurate this in gradle but we'd like to
|
||||
* be extra paranoid.
|
||||
*/
|
||||
subprojects { project ->
|
||||
project.tasks.withType(Jar).whenTaskAdded { jarTask ->
|
||||
final Task extract = project.task("extract${jarTask.name.capitalize()}", type: LoggedExec) {
|
||||
dependsOn jarTask
|
||||
ext.destination = project.buildDir.toPath().resolve("jar-extracted/${jarTask.name}")
|
||||
commandLine "${->new File(rootProject.compilerJavaHome, 'bin/jar')}",
|
||||
'xf', "${-> jarTask.outputs.files.singleFile}", 'META-INF/LICENSE.txt', 'META-INF/NOTICE.txt'
|
||||
workingDir destination
|
||||
doFirst {
|
||||
project.delete(destination)
|
||||
Files.createDirectories(destination)
|
||||
}
|
||||
}
|
||||
|
||||
final Task checkNotice = project.task("verify${jarTask.name.capitalize()}Notice") {
|
||||
dependsOn extract
|
||||
doLast {
|
||||
final List<String> noticeLines = Files.readAllLines(project.noticeFile.toPath())
|
||||
final Path noticePath = extract.destination.resolve('META-INF/NOTICE.txt')
|
||||
assertLinesInFile(noticePath, noticeLines)
|
||||
}
|
||||
}
|
||||
project.check.dependsOn checkNotice
|
||||
|
||||
final Task checkLicense = project.task("verify${jarTask.name.capitalize()}License") {
|
||||
dependsOn extract
|
||||
doLast {
|
||||
final List<String> licenseLines = Files.readAllLines(project.licenseFile.toPath())
|
||||
final Path licensePath = extract.destination.resolve('META-INF/LICENSE.txt')
|
||||
assertLinesInFile(licensePath, licenseLines)
|
||||
}
|
||||
}
|
||||
project.check.dependsOn checkLicense
|
||||
}
|
||||
}
|
||||
|
||||
/* Remove assemble on all qa projects because we don't need to publish
|
||||
* artifacts for them. */
|
||||
gradle.projectsEvaluated {
|
||||
|
|
|
@ -563,16 +563,17 @@ class ClusterFormationTasks {
|
|||
|
||||
/** Adds a task to execute a command to help setup the cluster */
|
||||
static Task configureExecTask(String name, Project project, Task setup, NodeInfo node, Object[] execArgs) {
|
||||
return project.tasks.create(name: name, type: LoggedExec, dependsOn: setup) {
|
||||
workingDir node.cwd
|
||||
return project.tasks.create(name: name, type: LoggedExec, dependsOn: setup) { Exec exec ->
|
||||
exec.workingDir node.cwd
|
||||
exec.environment 'JAVA_HOME', node.getJavaHome()
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
executable 'cmd'
|
||||
args '/C', 'call'
|
||||
exec.executable 'cmd'
|
||||
exec.args '/C', 'call'
|
||||
// On Windows the comma character is considered a parameter separator:
|
||||
// argument are wrapped in an ExecArgWrapper that escapes commas
|
||||
args execArgs.collect { a -> new EscapeCommaWrapper(arg: a) }
|
||||
exec.args execArgs.collect { a -> new EscapeCommaWrapper(arg: a) }
|
||||
} else {
|
||||
commandLine execArgs
|
||||
exec.commandLine execArgs
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,8 +37,15 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||
'ubuntu-1404',
|
||||
]
|
||||
|
||||
/** All onboarded archives by default, available for Bats tests even if not used **/
|
||||
static List<String> DISTRIBUTION_ARCHIVES = ['tar', 'rpm', 'deb', 'oss-rpm', 'oss-deb']
|
||||
/** All distributions to bring into test VM, whether or not they are used **/
|
||||
static List<String> DISTRIBUTIONS = [
|
||||
'archives:tar',
|
||||
'archives:oss-tar',
|
||||
'packages:rpm',
|
||||
'packages:oss-rpm',
|
||||
'packages:deb',
|
||||
'packages:oss-deb'
|
||||
]
|
||||
|
||||
/** Packages onboarded for upgrade tests **/
|
||||
static List<String> UPGRADE_FROM_ARCHIVES = ['rpm', 'deb']
|
||||
|
@ -117,13 +124,8 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||
upgradeFromVersion = Version.fromString(upgradeFromVersionRaw)
|
||||
}
|
||||
|
||||
DISTRIBUTION_ARCHIVES.each {
|
||||
DISTRIBUTIONS.each {
|
||||
// Adds a dependency for the current version
|
||||
if (it == 'tar') {
|
||||
it = 'archives:tar'
|
||||
} else {
|
||||
it = "packages:${it}"
|
||||
}
|
||||
project.dependencies.add(PACKAGING_CONFIGURATION,
|
||||
project.dependencies.project(path: ":distribution:${it}", configuration: 'default'))
|
||||
}
|
||||
|
|
|
@ -535,7 +535,6 @@
|
|||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]ReplicaShardAllocatorTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]ReusePeerRecoverySharedTest.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]get[/\\]GetActionIT.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]IndexServiceTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]IndexingSlowLogTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]MergePolicySettingsTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]SearchSlowLogTests.java" checks="LineLength" />
|
||||
|
|
|
@ -48,6 +48,7 @@ import org.elasticsearch.action.admin.indices.shrink.ResizeRequest;
|
|||
import org.elasticsearch.action.admin.indices.shrink.ResizeType;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest;
|
||||
import org.elasticsearch.action.get.GetRequest;
|
||||
import org.elasticsearch.action.get.MultiGetRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
|
@ -75,6 +76,7 @@ import org.elasticsearch.common.xcontent.XContentParser;
|
|||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.rankeval.RankEvalRequest;
|
||||
import org.elasticsearch.rest.action.RestFieldCapabilitiesAction;
|
||||
import org.elasticsearch.rest.action.search.RestSearchAction;
|
||||
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
|
||||
|
||||
|
@ -536,6 +538,16 @@ public final class Request {
|
|||
return new Request(HttpHead.METHOD_NAME, endpoint, params.getParams(), null);
|
||||
}
|
||||
|
||||
static Request fieldCaps(FieldCapabilitiesRequest fieldCapabilitiesRequest) {
|
||||
Params params = Params.builder();
|
||||
params.withFields(fieldCapabilitiesRequest.fields());
|
||||
params.withIndicesOptions(fieldCapabilitiesRequest.indicesOptions());
|
||||
|
||||
String[] indices = fieldCapabilitiesRequest.indices();
|
||||
String endpoint = endpoint(indices, "_field_caps");
|
||||
return new Request(HttpGet.METHOD_NAME, endpoint, params.getParams(), null);
|
||||
}
|
||||
|
||||
static Request rankEval(RankEvalRequest rankEvalRequest) throws IOException {
|
||||
String endpoint = endpoint(rankEvalRequest.indices(), Strings.EMPTY_ARRAY, "_rank_eval");
|
||||
Params params = Params.builder();
|
||||
|
@ -712,6 +724,13 @@ public final class Request {
|
|||
return this;
|
||||
}
|
||||
|
||||
Params withFields(String[] fields) {
|
||||
if (fields != null && fields.length > 0) {
|
||||
return putParam("fields", String.join(",", fields));
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
Params withMasterTimeout(TimeValue masterTimeout) {
|
||||
return putParam("master_timeout", masterTimeout);
|
||||
}
|
||||
|
|
|
@ -30,6 +30,8 @@ import org.elasticsearch.action.bulk.BulkRequest;
|
|||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.delete.DeleteResponse;
|
||||
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest;
|
||||
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse;
|
||||
import org.elasticsearch.action.get.GetRequest;
|
||||
import org.elasticsearch.action.get.GetResponse;
|
||||
import org.elasticsearch.action.get.MultiGetRequest;
|
||||
|
@ -501,6 +503,31 @@ public class RestHighLevelClient implements Closeable {
|
|||
headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a request using the Field Capabilities API.
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-field-caps.html">Field Capabilities API
|
||||
* on elastic.co</a>.
|
||||
*/
|
||||
public final FieldCapabilitiesResponse fieldCaps(FieldCapabilitiesRequest fieldCapabilitiesRequest,
|
||||
Header... headers) throws IOException {
|
||||
return performRequestAndParseEntity(fieldCapabilitiesRequest, Request::fieldCaps,
|
||||
FieldCapabilitiesResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously executes a request using the Field Capabilities API.
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-field-caps.html">Field Capabilities API
|
||||
* on elastic.co</a>.
|
||||
*/
|
||||
public final void fieldCapsAsync(FieldCapabilitiesRequest fieldCapabilitiesRequest,
|
||||
ActionListener<FieldCapabilitiesResponse> listener,
|
||||
Header... headers) {
|
||||
performRequestAsyncAndParseEntity(fieldCapabilitiesRequest, Request::fieldCaps,
|
||||
FieldCapabilitiesResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
protected final <Req extends ActionRequest, Resp> Resp performRequestAndParseEntity(Req request,
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
CheckedFunction<XContentParser, Resp, IOException> entityParser,
|
||||
|
|
|
@ -52,6 +52,7 @@ import org.elasticsearch.action.admin.indices.shrink.ResizeType;
|
|||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.bulk.BulkShardRequest;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest;
|
||||
import org.elasticsearch.action.get.GetRequest;
|
||||
import org.elasticsearch.action.get.MultiGetRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
|
@ -89,6 +90,7 @@ import org.elasticsearch.index.rankeval.RankEvalRequest;
|
|||
import org.elasticsearch.index.rankeval.RankEvalSpec;
|
||||
import org.elasticsearch.index.rankeval.RatedRequest;
|
||||
import org.elasticsearch.index.rankeval.RestRankEvalAction;
|
||||
import org.elasticsearch.rest.action.RestFieldCapabilitiesAction;
|
||||
import org.elasticsearch.rest.action.search.RestSearchAction;
|
||||
import org.elasticsearch.search.Scroll;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
|
||||
|
@ -108,11 +110,14 @@ import java.io.InputStream;
|
|||
import java.lang.reflect.Constructor;
|
||||
import java.lang.reflect.Modifier;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.StringJoiner;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
|
@ -128,6 +133,8 @@ import static org.elasticsearch.index.alias.RandomAliasActionsGenerator.randomAl
|
|||
import static org.elasticsearch.search.RandomSearchRequestGenerator.randomSearchRequest;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent;
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
import static org.hamcrest.Matchers.hasEntry;
|
||||
import static org.hamcrest.Matchers.hasKey;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
||||
public class RequestTests extends ESTestCase {
|
||||
|
@ -1213,6 +1220,47 @@ public class RequestTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testFieldCaps() {
|
||||
// Create a random request.
|
||||
String[] indices = randomIndicesNames(0, 5);
|
||||
String[] fields = generateRandomStringArray(5, 10, false, false);
|
||||
|
||||
FieldCapabilitiesRequest fieldCapabilitiesRequest = new FieldCapabilitiesRequest()
|
||||
.indices(indices)
|
||||
.fields(fields);
|
||||
|
||||
Map<String, String> indicesOptionsParams = new HashMap<>();
|
||||
setRandomIndicesOptions(fieldCapabilitiesRequest::indicesOptions,
|
||||
fieldCapabilitiesRequest::indicesOptions,
|
||||
indicesOptionsParams);
|
||||
|
||||
Request request = Request.fieldCaps(fieldCapabilitiesRequest);
|
||||
|
||||
// Verify that the resulting REST request looks as expected.
|
||||
StringJoiner endpoint = new StringJoiner("/", "/", "");
|
||||
String joinedIndices = String.join(",", indices);
|
||||
if (!joinedIndices.isEmpty()) {
|
||||
endpoint.add(joinedIndices);
|
||||
}
|
||||
endpoint.add("_field_caps");
|
||||
|
||||
assertEquals(endpoint.toString(), request.getEndpoint());
|
||||
assertEquals(4, request.getParameters().size());
|
||||
|
||||
// Note that we don't check the field param value explicitly, as field names are passed through
|
||||
// a hash set before being added to the request, and can appear in a non-deterministic order.
|
||||
assertThat(request.getParameters(), hasKey("fields"));
|
||||
String[] requestFields = Strings.splitStringByCommaToArray(request.getParameters().get("fields"));
|
||||
assertEquals(new HashSet<>(Arrays.asList(fields)),
|
||||
new HashSet<>(Arrays.asList(requestFields)));
|
||||
|
||||
for (Map.Entry<String, String> param : indicesOptionsParams.entrySet()) {
|
||||
assertThat(request.getParameters(), hasEntry(param.getKey(), param.getValue()));
|
||||
}
|
||||
|
||||
assertNull(request.getEntity());
|
||||
}
|
||||
|
||||
public void testRankEval() throws Exception {
|
||||
RankEvalSpec spec = new RankEvalSpec(
|
||||
Collections.singletonList(new RatedRequest("queryId", Collections.emptyList(), new SearchSourceBuilder())),
|
||||
|
@ -1233,7 +1281,6 @@ public class RequestTests extends ESTestCase {
|
|||
assertEquals(3, request.getParameters().size());
|
||||
assertEquals(expectedParams, request.getParameters());
|
||||
assertToXContentBody(spec, request.getEntity());
|
||||
|
||||
}
|
||||
|
||||
public void testSplit() throws IOException {
|
||||
|
|
|
@ -27,6 +27,9 @@ import org.apache.http.entity.StringEntity;
|
|||
import org.apache.http.nio.entity.NStringEntity;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ElasticsearchStatusException;
|
||||
import org.elasticsearch.action.fieldcaps.FieldCapabilities;
|
||||
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest;
|
||||
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse;
|
||||
import org.elasticsearch.action.search.ClearScrollRequest;
|
||||
import org.elasticsearch.action.search.ClearScrollResponse;
|
||||
import org.elasticsearch.action.search.MultiSearchRequest;
|
||||
|
@ -96,14 +99,31 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
|||
client().performRequest(HttpPut.METHOD_NAME, "/index/type/5", Collections.emptyMap(), doc5);
|
||||
client().performRequest(HttpPost.METHOD_NAME, "/index/_refresh");
|
||||
|
||||
StringEntity doc = new StringEntity("{\"field\":\"value1\"}", ContentType.APPLICATION_JSON);
|
||||
|
||||
StringEntity doc = new StringEntity("{\"field\":\"value1\", \"rating\": 7}", ContentType.APPLICATION_JSON);
|
||||
client().performRequest(HttpPut.METHOD_NAME, "/index1/doc/1", Collections.emptyMap(), doc);
|
||||
doc = new StringEntity("{\"field\":\"value2\"}", ContentType.APPLICATION_JSON);
|
||||
client().performRequest(HttpPut.METHOD_NAME, "/index1/doc/2", Collections.emptyMap(), doc);
|
||||
doc = new StringEntity("{\"field\":\"value1\"}", ContentType.APPLICATION_JSON);
|
||||
|
||||
StringEntity mappings = new StringEntity(
|
||||
"{" +
|
||||
" \"mappings\": {" +
|
||||
" \"doc\": {" +
|
||||
" \"properties\": {" +
|
||||
" \"rating\": {" +
|
||||
" \"type\": \"keyword\"" +
|
||||
" }" +
|
||||
" }" +
|
||||
" }" +
|
||||
" }" +
|
||||
"}}",
|
||||
ContentType.APPLICATION_JSON);
|
||||
client().performRequest("PUT", "/index2", Collections.emptyMap(), mappings);
|
||||
doc = new StringEntity("{\"field\":\"value1\", \"rating\": \"good\"}", ContentType.APPLICATION_JSON);
|
||||
client().performRequest(HttpPut.METHOD_NAME, "/index2/doc/3", Collections.emptyMap(), doc);
|
||||
doc = new StringEntity("{\"field\":\"value2\"}", ContentType.APPLICATION_JSON);
|
||||
client().performRequest(HttpPut.METHOD_NAME, "/index2/doc/4", Collections.emptyMap(), doc);
|
||||
|
||||
doc = new StringEntity("{\"field\":\"value1\"}", ContentType.APPLICATION_JSON);
|
||||
client().performRequest(HttpPut.METHOD_NAME, "/index3/doc/5", Collections.emptyMap(), doc);
|
||||
doc = new StringEntity("{\"field\":\"value2\"}", ContentType.APPLICATION_JSON);
|
||||
|
@ -713,6 +733,57 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
|||
assertThat(multiSearchResponse.getResponses()[1].getResponse(), nullValue());
|
||||
}
|
||||
|
||||
public void testFieldCaps() throws IOException {
|
||||
FieldCapabilitiesRequest request = new FieldCapabilitiesRequest()
|
||||
.indices("index1", "index2")
|
||||
.fields("rating", "field");
|
||||
|
||||
FieldCapabilitiesResponse response = execute(request,
|
||||
highLevelClient()::fieldCaps, highLevelClient()::fieldCapsAsync);
|
||||
|
||||
// Check the capabilities for the 'rating' field.
|
||||
assertTrue(response.get().containsKey("rating"));
|
||||
Map<String, FieldCapabilities> ratingResponse = response.getField("rating");
|
||||
assertEquals(2, ratingResponse.size());
|
||||
|
||||
FieldCapabilities expectedKeywordCapabilities = new FieldCapabilities(
|
||||
"rating", "keyword", true, true, new String[]{"index2"}, null, null);
|
||||
assertEquals(expectedKeywordCapabilities, ratingResponse.get("keyword"));
|
||||
|
||||
FieldCapabilities expectedLongCapabilities = new FieldCapabilities(
|
||||
"rating", "long", true, true, new String[]{"index1"}, null, null);
|
||||
assertEquals(expectedLongCapabilities, ratingResponse.get("long"));
|
||||
|
||||
// Check the capabilities for the 'field' field.
|
||||
assertTrue(response.get().containsKey("field"));
|
||||
Map<String, FieldCapabilities> fieldResponse = response.getField("field");
|
||||
assertEquals(1, fieldResponse.size());
|
||||
|
||||
FieldCapabilities expectedTextCapabilities = new FieldCapabilities(
|
||||
"field", "text", true, false);
|
||||
assertEquals(expectedTextCapabilities, fieldResponse.get("text"));
|
||||
}
|
||||
|
||||
public void testFieldCapsWithNonExistentFields() throws IOException {
|
||||
FieldCapabilitiesRequest request = new FieldCapabilitiesRequest()
|
||||
.indices("index2")
|
||||
.fields("nonexistent");
|
||||
|
||||
FieldCapabilitiesResponse response = execute(request,
|
||||
highLevelClient()::fieldCaps, highLevelClient()::fieldCapsAsync);
|
||||
assertTrue(response.get().isEmpty());
|
||||
}
|
||||
|
||||
public void testFieldCapsWithNonExistentIndices() {
|
||||
FieldCapabilitiesRequest request = new FieldCapabilitiesRequest()
|
||||
.indices("non-existent")
|
||||
.fields("rating");
|
||||
|
||||
ElasticsearchException exception = expectThrows(ElasticsearchException.class,
|
||||
() -> execute(request, highLevelClient()::fieldCaps, highLevelClient()::fieldCapsAsync));
|
||||
assertEquals(RestStatus.NOT_FOUND, exception.status());
|
||||
}
|
||||
|
||||
private static void assertSearchHeader(SearchResponse searchResponse) {
|
||||
assertThat(searchResponse.getTook().nanos(), greaterThanOrEqualTo(0L));
|
||||
assertEquals(0, searchResponse.getFailedShards());
|
||||
|
|
|
@ -21,8 +21,13 @@ package org.elasticsearch.client.documentation;
|
|||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.LatchedActionListener;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
import org.elasticsearch.action.fieldcaps.FieldCapabilities;
|
||||
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest;
|
||||
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.action.search.ClearScrollRequest;
|
||||
|
@ -93,6 +98,8 @@ import java.util.concurrent.CountDownLatch;
|
|||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
|
||||
import static org.hamcrest.Matchers.contains;
|
||||
import static org.hamcrest.Matchers.containsInAnyOrder;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
|
@ -157,6 +164,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
|
||||
// tag::search-source-setter
|
||||
SearchRequest searchRequest = new SearchRequest();
|
||||
searchRequest.indices("posts");
|
||||
searchRequest.source(sourceBuilder);
|
||||
// end::search-source-setter
|
||||
|
||||
|
@ -699,6 +707,65 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testFieldCaps() throws Exception {
|
||||
indexSearchTestData();
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
// tag::field-caps-request
|
||||
FieldCapabilitiesRequest request = new FieldCapabilitiesRequest()
|
||||
.fields("user")
|
||||
.indices("posts", "authors", "contributors");
|
||||
// end::field-caps-request
|
||||
|
||||
// tag::field-caps-request-indicesOptions
|
||||
request.indicesOptions(IndicesOptions.lenientExpandOpen()); // <1>
|
||||
// end::field-caps-request-indicesOptions
|
||||
|
||||
// tag::field-caps-execute
|
||||
FieldCapabilitiesResponse response = client.fieldCaps(request);
|
||||
// end::field-caps-execute
|
||||
|
||||
// tag::field-caps-response
|
||||
assertThat(response.get().keySet(), contains("user"));
|
||||
Map<String, FieldCapabilities> userResponse = response.getField("user");
|
||||
|
||||
assertThat(userResponse.keySet(), containsInAnyOrder("keyword", "text")); // <1>
|
||||
FieldCapabilities textCapabilities = userResponse.get("keyword");
|
||||
|
||||
assertTrue(textCapabilities.isSearchable());
|
||||
assertFalse(textCapabilities.isAggregatable());
|
||||
|
||||
assertArrayEquals(textCapabilities.indices(), // <2>
|
||||
new String[]{"authors", "contributors"});
|
||||
assertNull(textCapabilities.nonSearchableIndices()); // <3>
|
||||
assertArrayEquals(textCapabilities.nonAggregatableIndices(), // <4>
|
||||
new String[]{"authors"});
|
||||
// end::field-caps-response
|
||||
|
||||
// tag::field-caps-execute-listener
|
||||
ActionListener<FieldCapabilitiesResponse> listener = new ActionListener<FieldCapabilitiesResponse>() {
|
||||
@Override
|
||||
public void onResponse(FieldCapabilitiesResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::field-caps-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener for tests.
|
||||
CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::field-caps-execute-async
|
||||
client.fieldCapsAsync(request, listener); // <1>
|
||||
// end::field-caps-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
public void testRankEval() throws Exception {
|
||||
indexSearchTestData();
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
@ -794,7 +861,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
MultiSearchResponse.Item firstResponse = response.getResponses()[0]; // <1>
|
||||
assertNull(firstResponse.getFailure()); // <2>
|
||||
SearchResponse searchResponse = firstResponse.getResponse(); // <3>
|
||||
assertEquals(3, searchResponse.getHits().getTotalHits());
|
||||
assertEquals(4, searchResponse.getHits().getTotalHits());
|
||||
MultiSearchResponse.Item secondResponse = response.getResponses()[1]; // <4>
|
||||
assertNull(secondResponse.getFailure());
|
||||
searchResponse = secondResponse.getResponse();
|
||||
|
@ -840,18 +907,35 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
|
||||
private void indexSearchTestData() throws IOException {
|
||||
BulkRequest request = new BulkRequest();
|
||||
request.add(new IndexRequest("posts", "doc", "1")
|
||||
CreateIndexRequest authorsRequest = new CreateIndexRequest("authors")
|
||||
.mapping("doc", "user", "type=keyword,doc_values=false");
|
||||
CreateIndexResponse authorsResponse = highLevelClient().indices().create(authorsRequest);
|
||||
assertTrue(authorsResponse.isAcknowledged());
|
||||
|
||||
CreateIndexRequest reviewersRequest = new CreateIndexRequest("contributors")
|
||||
.mapping("doc", "user", "type=keyword");
|
||||
CreateIndexResponse reviewersResponse = highLevelClient().indices().create(reviewersRequest);
|
||||
assertTrue(reviewersResponse.isAcknowledged());
|
||||
|
||||
BulkRequest bulkRequest = new BulkRequest();
|
||||
bulkRequest.add(new IndexRequest("posts", "doc", "1")
|
||||
.source(XContentType.JSON, "title", "In which order are my Elasticsearch queries executed?", "user",
|
||||
Arrays.asList("kimchy", "luca"), "innerObject", Collections.singletonMap("key", "value")));
|
||||
request.add(new IndexRequest("posts", "doc", "2")
|
||||
bulkRequest.add(new IndexRequest("posts", "doc", "2")
|
||||
.source(XContentType.JSON, "title", "Current status and upcoming changes in Elasticsearch", "user",
|
||||
Arrays.asList("kimchy", "christoph"), "innerObject", Collections.singletonMap("key", "value")));
|
||||
request.add(new IndexRequest("posts", "doc", "3")
|
||||
bulkRequest.add(new IndexRequest("posts", "doc", "3")
|
||||
.source(XContentType.JSON, "title", "The Future of Federated Search in Elasticsearch", "user",
|
||||
Arrays.asList("kimchy", "tanguy"), "innerObject", Collections.singletonMap("key", "value")));
|
||||
request.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
|
||||
BulkResponse bulkResponse = highLevelClient().bulk(request);
|
||||
|
||||
bulkRequest.add(new IndexRequest("authors", "doc", "1")
|
||||
.source(XContentType.JSON, "user", "kimchy"));
|
||||
bulkRequest.add(new IndexRequest("contributors", "doc", "1")
|
||||
.source(XContentType.JSON, "user", "tanguy"));
|
||||
|
||||
|
||||
bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
|
||||
BulkResponse bulkResponse = highLevelClient().bulk(bulkRequest);
|
||||
assertSame(RestStatus.OK, bulkResponse.status());
|
||||
assertFalse(bulkResponse.hasFailures());
|
||||
}
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
import org.elasticsearch.gradle.precommit.PrecommitTasks
|
||||
|
||||
apply plugin: 'elasticsearch.build'
|
||||
apply plugin: 'ru.vyarus.animalsniffer'
|
||||
apply plugin: 'nebula.maven-base-publish'
|
||||
apply plugin: 'nebula.maven-scm'
|
||||
|
||||
|
@ -52,8 +51,6 @@ dependencies {
|
|||
testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}"
|
||||
testCompile "org.elasticsearch:securemock:${versions.securemock}"
|
||||
testCompile "org.elasticsearch:mocksocket:${versions.mocksocket}"
|
||||
testCompile "org.codehaus.mojo:animal-sniffer-annotations:1.15"
|
||||
signature "org.codehaus.mojo.signature:java17:1.0@signature"
|
||||
}
|
||||
|
||||
forbiddenApisMain {
|
||||
|
|
|
@ -24,7 +24,6 @@ import com.sun.net.httpserver.HttpHandler;
|
|||
import com.sun.net.httpserver.HttpsConfigurator;
|
||||
import com.sun.net.httpserver.HttpsServer;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.codehaus.mojo.animal_sniffer.IgnoreJRERequirement;
|
||||
import org.elasticsearch.mocksocket.MockHttpServer;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
@ -46,8 +45,6 @@ import static org.junit.Assert.fail;
|
|||
/**
|
||||
* Integration test to validate the builder builds a client with the correct configuration
|
||||
*/
|
||||
//animal-sniffer doesn't like our usage of com.sun.net.httpserver.* classes
|
||||
@IgnoreJRERequirement
|
||||
public class RestClientBuilderIntegTests extends RestClientTestCase {
|
||||
|
||||
private static HttpsServer httpsServer;
|
||||
|
@ -60,8 +57,6 @@ public class RestClientBuilderIntegTests extends RestClientTestCase {
|
|||
httpsServer.start();
|
||||
}
|
||||
|
||||
//animal-sniffer doesn't like our usage of com.sun.net.httpserver.* classes
|
||||
@IgnoreJRERequirement
|
||||
private static class ResponseHandler implements HttpHandler {
|
||||
@Override
|
||||
public void handle(HttpExchange httpExchange) throws IOException {
|
||||
|
|
|
@ -23,7 +23,6 @@ import com.sun.net.httpserver.HttpExchange;
|
|||
import com.sun.net.httpserver.HttpHandler;
|
||||
import com.sun.net.httpserver.HttpServer;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.codehaus.mojo.animal_sniffer.IgnoreJRERequirement;
|
||||
import org.elasticsearch.mocksocket.MockHttpServer;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
|
@ -48,8 +47,6 @@ import static org.junit.Assert.assertTrue;
|
|||
* Integration test to check interaction between {@link RestClient} and {@link org.apache.http.client.HttpClient}.
|
||||
* Works against real http servers, multiple hosts. Also tests failover by randomly shutting down hosts.
|
||||
*/
|
||||
//animal-sniffer doesn't like our usage of com.sun.net.httpserver.* classes
|
||||
@IgnoreJRERequirement
|
||||
public class RestClientMultipleHostsIntegTests extends RestClientTestCase {
|
||||
|
||||
private static HttpServer[] httpServers;
|
||||
|
@ -90,8 +87,6 @@ public class RestClientMultipleHostsIntegTests extends RestClientTestCase {
|
|||
return httpServer;
|
||||
}
|
||||
|
||||
//animal-sniffer doesn't like our usage of com.sun.net.httpserver.* classes
|
||||
@IgnoreJRERequirement
|
||||
private static class ResponseHandler implements HttpHandler {
|
||||
private final int statusCode;
|
||||
|
||||
|
|
|
@ -33,7 +33,6 @@ import org.apache.http.entity.StringEntity;
|
|||
import org.apache.http.impl.client.BasicCredentialsProvider;
|
||||
import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.codehaus.mojo.animal_sniffer.IgnoreJRERequirement;
|
||||
import org.elasticsearch.mocksocket.MockHttpServer;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
@ -64,8 +63,6 @@ import static org.junit.Assert.fail;
|
|||
* Integration test to check interaction between {@link RestClient} and {@link org.apache.http.client.HttpClient}.
|
||||
* Works against a real http server, one single host.
|
||||
*/
|
||||
//animal-sniffer doesn't like our usage of com.sun.net.httpserver.* classes
|
||||
@IgnoreJRERequirement
|
||||
public class RestClientSingleHostIntegTests extends RestClientTestCase {
|
||||
|
||||
private static HttpServer httpServer;
|
||||
|
@ -91,8 +88,6 @@ public class RestClientSingleHostIntegTests extends RestClientTestCase {
|
|||
return httpServer;
|
||||
}
|
||||
|
||||
//animal-sniffer doesn't like our usage of com.sun.net.httpserver.* classes
|
||||
@IgnoreJRERequirement
|
||||
private static class ResponseHandler implements HttpHandler {
|
||||
private final int statusCode;
|
||||
|
||||
|
|
|
@ -201,8 +201,7 @@ subprojects {
|
|||
}
|
||||
final List<String> licenseLines = Files.readAllLines(rootDir.toPath().resolve("licenses/" + licenseFilename))
|
||||
final Path licensePath = archiveExtractionDir.toPath().resolve("elasticsearch-${VersionProperties.elasticsearch}/LICENSE.txt")
|
||||
final List<String> actualLines = Files.readAllLines(licensePath)
|
||||
assertLinesInFile(licensePath, actualLines, licenseLines)
|
||||
assertLinesInFile(licensePath, licenseLines)
|
||||
}
|
||||
}
|
||||
check.dependsOn checkLicense
|
||||
|
@ -213,8 +212,7 @@ subprojects {
|
|||
doLast {
|
||||
final List<String> noticeLines = Arrays.asList("Elasticsearch", "Copyright 2009-2018 Elasticsearch")
|
||||
final Path noticePath = archiveExtractionDir.toPath().resolve("elasticsearch-${VersionProperties.elasticsearch}/NOTICE.txt")
|
||||
final List<String> actualLines = Files.readAllLines(noticePath)
|
||||
assertLinesInFile(noticePath, actualLines, noticeLines)
|
||||
assertLinesInFile(noticePath, noticeLines)
|
||||
}
|
||||
}
|
||||
check.dependsOn checkNotice
|
||||
|
@ -304,4 +302,3 @@ configure(subprojects.findAll { it.name.contains('zip') }) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -460,14 +460,3 @@ subprojects {
|
|||
return result
|
||||
}
|
||||
}
|
||||
|
||||
static void assertLinesInFile(final Path path, final List<String> actualLines, final List<String> expectedLines) {
|
||||
int line = 0
|
||||
for (final String expectedLine : expectedLines) {
|
||||
final String actualLine = actualLines.get(line)
|
||||
if (expectedLine != actualLine) {
|
||||
throw new GradleException("expected line [${line + 1}] in [${path}] to be [${expectedLine}] but was [${actualLine}]")
|
||||
}
|
||||
line++
|
||||
}
|
||||
}
|
||||
|
|
|
@ -270,7 +270,7 @@ Closure commonDebConfig(boolean oss) {
|
|||
customFields['License'] = 'Elastic-License'
|
||||
}
|
||||
|
||||
version = project.version
|
||||
version = project.version.replace('-', '~')
|
||||
packageGroup 'web'
|
||||
requires 'bash'
|
||||
requires 'libc6'
|
||||
|
@ -415,8 +415,7 @@ subprojects {
|
|||
"License: " + expectedLicense)
|
||||
final List<String> licenseLines = Files.readAllLines(rootDir.toPath().resolve("licenses/" + licenseFilename))
|
||||
final List<String> expectedLines = header + licenseLines.collect { " " + it }
|
||||
final List<String> actualLines = Files.readAllLines(copyrightPath)
|
||||
assertLinesInFile(copyrightPath, actualLines, expectedLines)
|
||||
assertLinesInFile(copyrightPath, expectedLines)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -432,8 +431,7 @@ subprojects {
|
|||
}
|
||||
final List<String> licenseLines = Files.readAllLines(rootDir.toPath().resolve("licenses/" + licenseFilename))
|
||||
final Path licensePath = packageExtractionDir.toPath().resolve("usr/share/elasticsearch/LICENSE.txt")
|
||||
final List<String> actualLines = Files.readAllLines(licensePath)
|
||||
assertLinesInFile(licensePath, actualLines, licenseLines)
|
||||
assertLinesInFile(licensePath, licenseLines)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -444,8 +442,7 @@ subprojects {
|
|||
doLast {
|
||||
final List<String> noticeLines = Arrays.asList("Elasticsearch", "Copyright 2009-2018 Elasticsearch")
|
||||
final Path noticePath = packageExtractionDir.toPath().resolve("usr/share/elasticsearch/NOTICE.txt")
|
||||
final List<String> actualLines = Files.readAllLines(noticePath)
|
||||
assertLinesInFile(noticePath, actualLines, noticeLines)
|
||||
assertLinesInFile(noticePath, noticeLines)
|
||||
}
|
||||
}
|
||||
check.dependsOn checkNotice
|
||||
|
|
|
@ -0,0 +1,82 @@
|
|||
[[java-rest-high-field-caps]]
|
||||
=== Field Capabilities API
|
||||
|
||||
The field capabilities API allows for retrieving the capabilities of fields across multiple indices.
|
||||
|
||||
[[java-rest-high-field-caps-request]]
|
||||
==== Field Capabilities Request
|
||||
|
||||
A `FieldCapabilitiesRequest` contains a list of fields to get capabilities for,
|
||||
should be returned, plus an optional list of target indices. If no indices
|
||||
are provided, the request will be executed on all indices.
|
||||
|
||||
Note that fields parameter supports wildcard notation. For example, providing `text_*`
|
||||
will cause all fields that match the expression to be returned.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SearchDocumentationIT.java[field-caps-request]
|
||||
--------------------------------------------------
|
||||
|
||||
[[java-rest-high-field-caps-request-optional]]
|
||||
===== Optional arguments
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SearchDocumentationIT.java[field-caps-request-indicesOptions]
|
||||
--------------------------------------------------
|
||||
<1> Setting `IndicesOptions` controls how unavailable indices are resolved and
|
||||
how wildcard expressions are expanded.
|
||||
|
||||
[[java-rest-high-field-caps-sync]]
|
||||
==== Synchronous Execution
|
||||
|
||||
The `fieldCaps` method executes the request synchronously:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SearchDocumentationIT.java[field-caps-execute]
|
||||
--------------------------------------------------
|
||||
|
||||
[[java-rest-high-field-caps-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The `fieldCapsAsync` method executes the request asynchronously,
|
||||
calling the provided `ActionListener` when the response is ready:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SearchDocumentationIT.java[field-caps-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `FieldCapabilitiesRequest` to execute and the `ActionListener` to use when
|
||||
the execution completes.
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once the request
|
||||
completes, the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed.
|
||||
|
||||
A typical listener for `FieldCapabilitiesResponse` is constructed as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SearchDocumentationIT.java[field-caps-execute-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed.
|
||||
<2> Called when the whole `FieldCapabilitiesRequest` fails.
|
||||
|
||||
[[java-rest-high-field-caps-response]]
|
||||
==== FieldCapabilitiesResponse
|
||||
|
||||
For each requested field, the returned `FieldCapabilitiesResponse` contains its type
|
||||
and whether or not it can be searched or aggregated on. The response also gives
|
||||
information about how each index contributes to the field's capabilities.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SearchDocumentationIT.java[field-caps-response]
|
||||
--------------------------------------------------
|
||||
<1> The `user` field has two possible types, `keyword` and `text`.
|
||||
<2> This field only has type `keyword` in the `authors` and `contributors` indices.
|
||||
<3> Null, since the field is searchable in all indices for which it has the `keyword` type.
|
||||
<4> The `user` field is not aggregatable in the `authors` index.
|
|
@ -32,11 +32,13 @@ The Java High Level REST Client supports the following Search APIs:
|
|||
* <<java-rest-high-search-scroll>>
|
||||
* <<java-rest-high-clear-scroll>>
|
||||
* <<java-rest-high-multi-search>>
|
||||
* <<java-rest-high-field-caps>>
|
||||
* <<java-rest-high-rank-eval>>
|
||||
|
||||
include::search/search.asciidoc[]
|
||||
include::search/scroll.asciidoc[]
|
||||
include::search/multi-search.asciidoc[]
|
||||
include::search/field-caps.asciidoc[]
|
||||
include::search/rank-eval.asciidoc[]
|
||||
|
||||
== Miscellaneous APIs
|
||||
|
|
|
@ -27,11 +27,13 @@ POST /sales/_search?size=0
|
|||
// CONSOLE
|
||||
// TEST[setup:sales]
|
||||
|
||||
Available expressions for interval: `year`, `quarter`, `month`, `week`, `day`, `hour`, `minute`, `second`
|
||||
Available expressions for interval: `year` (`1y`), `quarter` (`1q`), `month` (`1M`), `week` (`1w`),
|
||||
`day` (`1d`), `hour` (`1h`), `minute` (`1m`), `second` (`1s`)
|
||||
|
||||
Time values can also be specified via abbreviations supported by <<time-units,time units>> parsing.
|
||||
Note that fractional time values are not supported, but you can address this by shifting to another
|
||||
time unit (e.g., `1.5h` could instead be specified as `90m`).
|
||||
time unit (e.g., `1.5h` could instead be specified as `90m`). Also note that time intervals larger than
|
||||
than days do not support arbitrary values but can only be one unit large (e.g. `1y` is valid, `2y` is not).
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
|
|
@ -39,11 +39,14 @@ The result of the above delete operation is:
|
|||
[[delete-versioning]]
|
||||
=== Versioning
|
||||
|
||||
Each document indexed is versioned. When deleting a document, the
|
||||
`version` can be specified to make sure the relevant document we are
|
||||
trying to delete is actually being deleted and it has not changed in the
|
||||
meantime. Every write operation executed on a document, deletes included,
|
||||
causes its version to be incremented.
|
||||
Each document indexed is versioned. When deleting a document, the `version` can
|
||||
be specified to make sure the relevant document we are trying to delete is
|
||||
actually being deleted and it has not changed in the meantime. Every write
|
||||
operation executed on a document, deletes included, causes its version to be
|
||||
incremented. The version number of a deleted document remains available for a
|
||||
short time after deletion to allow for control of concurrent operations. The
|
||||
length of time for which a deleted document's version remains available is
|
||||
determined by the `index.gc_deletes` index setting and defaults to 60 seconds.
|
||||
|
||||
[float]
|
||||
[[delete-routing]]
|
||||
|
|
|
@ -214,6 +214,27 @@ specific index module:
|
|||
The maximum length of regex that can be used in Regexp Query.
|
||||
Defaults to `1000`.
|
||||
|
||||
`index.routing.allocation.enable`::
|
||||
|
||||
Controls shard allocation for this index. It can be set to:
|
||||
* `all` (default) - Allows shard allocation for all shards.
|
||||
* `primaries` - Allows shard allocation only for primary shards.
|
||||
* `new_primaries` - Allows shard allocation only for newly-created primary shards.
|
||||
* `none` - No shard allocation is allowed.
|
||||
|
||||
`index.routing.rebalance.enable`::
|
||||
|
||||
Enables shard rebalancing for this index. It can be set to:
|
||||
* `all` (default) - Allows shard rebalancing for all shards.
|
||||
* `primaries` - Allows shard rebalancing only for primary shards.
|
||||
* `replicas` - Allows shard rebalancing only for replica shards.
|
||||
* `none` - No shard rebalancing is allowed.
|
||||
|
||||
`index.gc_deletes`::
|
||||
|
||||
The length of time that a <<delete-versioning,deleted document's version number>> remains available for <<index-versioning,further versioned operations>>.
|
||||
Defaults to `60s`.
|
||||
|
||||
[float]
|
||||
=== Settings in other index modules
|
||||
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
[[breaking-changes-6.4]]
|
||||
== Breaking changes in 6.4
|
||||
|
||||
[[breaking_64_api_changes]]
|
||||
=== API changes
|
||||
|
||||
==== Field capabilities request format
|
||||
|
||||
In the past, `fields` could be provided either as a parameter, or as part of the request
|
||||
body. Specifying `fields` in the request body is now deprecated, and instead they should
|
||||
always be supplied through a request parameter. In 7.0.0, the field capabilities API will
|
||||
not accept `fields` supplied in the request body.
|
|
@ -41,7 +41,7 @@ addressable from the outside. Defaults to the actual port assigned via
|
|||
|`transport.tcp.connect_timeout` |The socket connect timeout setting (in
|
||||
time setting format). Defaults to `30s`.
|
||||
|
||||
|`transport.tcp.compress` |Set to `true` to enable compression (LZF)
|
||||
|`transport.tcp.compress` |Set to `true` to enable compression (`DEFLATE`)
|
||||
between all nodes. Defaults to `false`.
|
||||
|
||||
|`transport.ping_schedule` | Schedule a regular ping message to ensure that connections are kept alive. Defaults to `5s` in the transport client and `-1` (disabled) elsewhere.
|
||||
|
|
|
@ -20,7 +20,7 @@ GET twitter/_field_caps?fields=rating
|
|||
// CONSOLE
|
||||
// TEST[setup:twitter]
|
||||
|
||||
Alternatively the `fields` option can also be defined in the request body:
|
||||
Alternatively the `fields` option can also be defined in the request body. deprecated[6.4.0, Please use a request parameter instead.]
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
@ -30,6 +30,7 @@ POST _field_caps
|
|||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[warning:Specifying a request body is deprecated -- the [fields] request parameter should be used instead.]
|
||||
|
||||
This is equivalent to the previous request.
|
||||
|
||||
|
|
|
@ -78,9 +78,9 @@ returned with each batch of results. Each call to the `scroll` API returns the
|
|||
next batch of results until there are no more results left to return, ie the
|
||||
`hits` array is empty.
|
||||
|
||||
IMPORTANT: The initial search request and each subsequent scroll request
|
||||
returns a new `_scroll_id` -- only the most recent `_scroll_id` should be
|
||||
used.
|
||||
IMPORTANT: The initial search request and each subsequent scroll request each
|
||||
return a `_scroll_id`, which may change with each request -- only the most
|
||||
recent `_scroll_id` should be used.
|
||||
|
||||
NOTE: If the request specifies aggregations, only the initial search response
|
||||
will contain the aggregations results.
|
||||
|
|
|
@ -128,7 +128,7 @@ public class RenameProcessorTests extends ESTestCase {
|
|||
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>());
|
||||
String fieldName = RandomDocumentPicks.randomFieldName(random());
|
||||
ingestDocument.setFieldValue(fieldName, null);
|
||||
String newFieldName = RandomDocumentPicks.randomFieldName(random());
|
||||
String newFieldName = randomValueOtherThanMany(ingestDocument::hasField, () -> RandomDocumentPicks.randomFieldName(random()));
|
||||
Processor processor = new RenameProcessor(randomAlphaOfLength(10), fieldName, newFieldName, false);
|
||||
processor.execute(ingestDocument);
|
||||
assertThat(ingestDocument.hasField(fieldName), equalTo(false));
|
||||
|
|
|
@ -55,7 +55,8 @@ setup() {
|
|||
}
|
||||
|
||||
@test "[TAR] archive is available" {
|
||||
count=$(find . -type f -name 'elasticsearch*.tar.gz' | wc -l)
|
||||
local version=$(cat version)
|
||||
count=$(find . -type f -name "${PACKAGE_NAME}-${version}.tar.gz" | wc -l)
|
||||
[ "$count" -eq 1 ]
|
||||
}
|
||||
|
||||
|
|
|
@ -35,10 +35,12 @@
|
|||
install_archive() {
|
||||
export ESHOME=${1:-/tmp/elasticsearch}
|
||||
|
||||
local version=$(cat version)
|
||||
|
||||
echo "Unpacking tarball to $ESHOME"
|
||||
rm -rf /tmp/untar
|
||||
mkdir -p /tmp/untar
|
||||
tar -xzpf elasticsearch*.tar.gz -C /tmp/untar
|
||||
tar -xzpf "${PACKAGE_NAME}-${version}.tar.gz" -C /tmp/untar
|
||||
|
||||
find /tmp/untar -depth -type d -name 'elasticsearch*' -exec mv {} "$ESHOME" \; > /dev/null
|
||||
|
||||
|
@ -79,6 +81,8 @@ export_elasticsearch_paths() {
|
|||
export ESSCRIPTS="$ESCONFIG/scripts"
|
||||
export ESDATA="$ESHOME/data"
|
||||
export ESLOG="$ESHOME/logs"
|
||||
|
||||
export PACKAGE_NAME=${PACKAGE_NAME:-"elasticsearch-oss"}
|
||||
}
|
||||
|
||||
# Checks that all directories & files are correctly installed
|
||||
|
|
|
@ -19,11 +19,14 @@
|
|||
|
||||
package org.elasticsearch.action.fieldcaps;
|
||||
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -36,6 +39,13 @@ import java.util.List;
|
|||
* Describes the capabilities of a field optionally merged across multiple indices.
|
||||
*/
|
||||
public class FieldCapabilities implements Writeable, ToXContentObject {
|
||||
private static final ParseField TYPE_FIELD = new ParseField("type");
|
||||
private static final ParseField SEARCHABLE_FIELD = new ParseField("searchable");
|
||||
private static final ParseField AGGREGATABLE_FIELD = new ParseField("aggregatable");
|
||||
private static final ParseField INDICES_FIELD = new ParseField("indices");
|
||||
private static final ParseField NON_SEARCHABLE_INDICES_FIELD = new ParseField("non_searchable_indices");
|
||||
private static final ParseField NON_AGGREGATABLE_INDICES_FIELD = new ParseField("non_aggregatable_indices");
|
||||
|
||||
private final String name;
|
||||
private final String type;
|
||||
private final boolean isSearchable;
|
||||
|
@ -52,7 +62,7 @@ public class FieldCapabilities implements Writeable, ToXContentObject {
|
|||
* @param isSearchable Whether this field is indexed for search.
|
||||
* @param isAggregatable Whether this field can be aggregated on.
|
||||
*/
|
||||
FieldCapabilities(String name, String type, boolean isSearchable, boolean isAggregatable) {
|
||||
public FieldCapabilities(String name, String type, boolean isSearchable, boolean isAggregatable) {
|
||||
this(name, type, isSearchable, isAggregatable, null, null, null);
|
||||
}
|
||||
|
||||
|
@ -69,7 +79,7 @@ public class FieldCapabilities implements Writeable, ToXContentObject {
|
|||
* @param nonAggregatableIndices The list of indices where this field is not aggregatable,
|
||||
* or null if the field is aggregatable in all indices.
|
||||
*/
|
||||
FieldCapabilities(String name, String type,
|
||||
public FieldCapabilities(String name, String type,
|
||||
boolean isSearchable, boolean isAggregatable,
|
||||
String[] indices,
|
||||
String[] nonSearchableIndices,
|
||||
|
@ -83,7 +93,7 @@ public class FieldCapabilities implements Writeable, ToXContentObject {
|
|||
this.nonAggregatableIndices = nonAggregatableIndices;
|
||||
}
|
||||
|
||||
FieldCapabilities(StreamInput in) throws IOException {
|
||||
public FieldCapabilities(StreamInput in) throws IOException {
|
||||
this.name = in.readString();
|
||||
this.type = in.readString();
|
||||
this.isSearchable = in.readBoolean();
|
||||
|
@ -107,22 +117,47 @@ public class FieldCapabilities implements Writeable, ToXContentObject {
|
|||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field("type", type);
|
||||
builder.field("searchable", isSearchable);
|
||||
builder.field("aggregatable", isAggregatable);
|
||||
builder.field(TYPE_FIELD.getPreferredName(), type);
|
||||
builder.field(SEARCHABLE_FIELD.getPreferredName(), isSearchable);
|
||||
builder.field(AGGREGATABLE_FIELD.getPreferredName(), isAggregatable);
|
||||
if (indices != null) {
|
||||
builder.field("indices", indices);
|
||||
builder.field(INDICES_FIELD.getPreferredName(), indices);
|
||||
}
|
||||
if (nonSearchableIndices != null) {
|
||||
builder.field("non_searchable_indices", nonSearchableIndices);
|
||||
builder.field(NON_SEARCHABLE_INDICES_FIELD.getPreferredName(), nonSearchableIndices);
|
||||
}
|
||||
if (nonAggregatableIndices != null) {
|
||||
builder.field("non_aggregatable_indices", nonAggregatableIndices);
|
||||
builder.field(NON_AGGREGATABLE_INDICES_FIELD.getPreferredName(), nonAggregatableIndices);
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
public static FieldCapabilities fromXContent(String name, XContentParser parser) throws IOException {
|
||||
return PARSER.parse(parser, name);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private static ConstructingObjectParser<FieldCapabilities, String> PARSER = new ConstructingObjectParser<>(
|
||||
"field_capabilities",
|
||||
true,
|
||||
(a, name) -> new FieldCapabilities(name,
|
||||
(String) a[0],
|
||||
(boolean) a[1],
|
||||
(boolean) a[2],
|
||||
a[3] != null ? ((List<String>) a[3]).toArray(new String[0]) : null,
|
||||
a[4] != null ? ((List<String>) a[4]).toArray(new String[0]) : null,
|
||||
a[5] != null ? ((List<String>) a[5]).toArray(new String[0]) : null));
|
||||
|
||||
static {
|
||||
PARSER.declareString(ConstructingObjectParser.constructorArg(), TYPE_FIELD);
|
||||
PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), SEARCHABLE_FIELD);
|
||||
PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), AGGREGATABLE_FIELD);
|
||||
PARSER.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), INDICES_FIELD);
|
||||
PARSER.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), NON_SEARCHABLE_INDICES_FIELD);
|
||||
PARSER.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), NON_AGGREGATABLE_INDICES_FIELD);
|
||||
}
|
||||
|
||||
/**
|
||||
* The name of the field.
|
||||
*/
|
||||
|
|
|
@ -61,14 +61,18 @@ public final class FieldCapabilitiesRequest extends ActionRequest implements Ind
|
|||
|
||||
/**
|
||||
* Returns <code>true</code> iff the results should be merged.
|
||||
*
|
||||
* Note that when using the high-level REST client, results are always merged (this flag is always considered 'true').
|
||||
*/
|
||||
boolean isMergeResults() {
|
||||
return mergeResults;
|
||||
}
|
||||
|
||||
/**
|
||||
* if set to <code>true</code> the response will contain only a merged view of the per index field capabilities. Otherwise only
|
||||
* unmerged per index field capabilities are returned.
|
||||
* If set to <code>true</code> the response will contain only a merged view of the per index field capabilities.
|
||||
* Otherwise only unmerged per index field capabilities are returned.
|
||||
*
|
||||
* Note that when using the high-level REST client, results are always merged (this flag is always considered 'true').
|
||||
*/
|
||||
void setMergeResults(boolean mergeResults) {
|
||||
this.mergeResults = mergeResults;
|
||||
|
|
|
@ -21,20 +21,29 @@ package org.elasticsearch.action.fieldcaps;
|
|||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentFragment;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParserUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* Response for {@link FieldCapabilitiesRequest} requests.
|
||||
*/
|
||||
public class FieldCapabilitiesResponse extends ActionResponse implements ToXContentFragment {
|
||||
private static final ParseField FIELDS_FIELD = new ParseField("fields");
|
||||
|
||||
private Map<String, Map<String, FieldCapabilities>> responseMap;
|
||||
private List<FieldCapabilitiesIndexResponse> indexResponses;
|
||||
|
||||
|
@ -114,10 +123,42 @@ public class FieldCapabilitiesResponse extends ActionResponse implements ToXCont
|
|||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field("fields", responseMap);
|
||||
builder.field(FIELDS_FIELD.getPreferredName(), responseMap);
|
||||
return builder;
|
||||
}
|
||||
|
||||
public static FieldCapabilitiesResponse fromXContent(XContentParser parser) throws IOException {
|
||||
return PARSER.parse(parser, null);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private static final ConstructingObjectParser<FieldCapabilitiesResponse, Void> PARSER =
|
||||
new ConstructingObjectParser<>("field_capabilities_response", true,
|
||||
a -> new FieldCapabilitiesResponse(
|
||||
((List<Tuple<String, Map<String, FieldCapabilities>>>) a[0]).stream()
|
||||
.collect(Collectors.toMap(Tuple::v1, Tuple::v2))));
|
||||
|
||||
static {
|
||||
PARSER.declareNamedObjects(ConstructingObjectParser.constructorArg(), (p, c, n) -> {
|
||||
Map<String, FieldCapabilities> typeToCapabilities = parseTypeToCapabilities(p, n);
|
||||
return new Tuple<>(n, typeToCapabilities);
|
||||
}, FIELDS_FIELD);
|
||||
}
|
||||
|
||||
private static Map<String, FieldCapabilities> parseTypeToCapabilities(XContentParser parser, String name) throws IOException {
|
||||
Map<String, FieldCapabilities> typeToCapabilities = new HashMap<>();
|
||||
|
||||
XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation);
|
||||
XContentParser.Token token;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser::getTokenLocation);
|
||||
String type = parser.currentName();
|
||||
FieldCapabilities capabilities = FieldCapabilities.fromXContent(name, parser);
|
||||
typeToCapabilities.put(type, capabilities);
|
||||
}
|
||||
return typeToCapabilities;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
|
|
|
@ -37,8 +37,10 @@ import org.elasticsearch.search.internal.InternalSearchResponse;
|
|||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
@ -62,6 +64,7 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
|
|||
private final long clusterStateVersion;
|
||||
private final Map<String, AliasFilter> aliasFilter;
|
||||
private final Map<String, Float> concreteIndexBoosts;
|
||||
private final Map<String, Set<String>> indexRoutings;
|
||||
private final SetOnce<AtomicArray<ShardSearchFailure>> shardFailures = new SetOnce<>();
|
||||
private final Object shardFailuresMutex = new Object();
|
||||
private final AtomicInteger successfulOps = new AtomicInteger();
|
||||
|
@ -72,6 +75,7 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
|
|||
protected AbstractSearchAsyncAction(String name, Logger logger, SearchTransportService searchTransportService,
|
||||
BiFunction<String, String, Transport.Connection> nodeIdToConnection,
|
||||
Map<String, AliasFilter> aliasFilter, Map<String, Float> concreteIndexBoosts,
|
||||
Map<String, Set<String>> indexRoutings,
|
||||
Executor executor, SearchRequest request,
|
||||
ActionListener<SearchResponse> listener, GroupShardsIterator<SearchShardIterator> shardsIts,
|
||||
TransportSearchAction.SearchTimeProvider timeProvider, long clusterStateVersion,
|
||||
|
@ -89,6 +93,7 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
|
|||
this.clusterStateVersion = clusterStateVersion;
|
||||
this.concreteIndexBoosts = concreteIndexBoosts;
|
||||
this.aliasFilter = aliasFilter;
|
||||
this.indexRoutings = indexRoutings;
|
||||
this.results = resultConsumer;
|
||||
this.clusters = clusters;
|
||||
}
|
||||
|
@ -128,17 +133,17 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
|
|||
onPhaseFailure(currentPhase, "all shards failed", cause);
|
||||
} else {
|
||||
Boolean allowPartialResults = request.allowPartialSearchResults();
|
||||
assert allowPartialResults != null : "SearchRequest missing setting for allowPartialSearchResults";
|
||||
assert allowPartialResults != null : "SearchRequest missing setting for allowPartialSearchResults";
|
||||
if (allowPartialResults == false && shardFailures.get() != null ){
|
||||
if (logger.isDebugEnabled()) {
|
||||
final ShardOperationFailedException[] shardSearchFailures = ExceptionsHelper.groupBy(buildShardFailures());
|
||||
Throwable cause = shardSearchFailures.length == 0 ? null :
|
||||
ElasticsearchException.guessRootCauses(shardSearchFailures[0].getCause())[0];
|
||||
logger.debug(() -> new ParameterizedMessage("{} shards failed for phase: [{}]",
|
||||
logger.debug(() -> new ParameterizedMessage("{} shards failed for phase: [{}]",
|
||||
shardSearchFailures.length, getName()), cause);
|
||||
}
|
||||
onPhaseFailure(currentPhase, "Partial shards failure", null);
|
||||
} else {
|
||||
onPhaseFailure(currentPhase, "Partial shards failure", null);
|
||||
} else {
|
||||
if (logger.isTraceEnabled()) {
|
||||
final String resultsFrom = results.getSuccessfulResults()
|
||||
.map(r -> r.getSearchShardTarget().toString()).collect(Collectors.joining(","));
|
||||
|
@ -271,14 +276,14 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
|
|||
|
||||
@Override
|
||||
public final SearchResponse buildSearchResponse(InternalSearchResponse internalSearchResponse, String scrollId) {
|
||||
|
||||
|
||||
ShardSearchFailure[] failures = buildShardFailures();
|
||||
Boolean allowPartialResults = request.allowPartialSearchResults();
|
||||
assert allowPartialResults != null : "SearchRequest missing setting for allowPartialSearchResults";
|
||||
if (allowPartialResults == false && failures.length > 0){
|
||||
raisePhaseFailure(new SearchPhaseExecutionException("", "Shard failures", null, failures));
|
||||
}
|
||||
|
||||
raisePhaseFailure(new SearchPhaseExecutionException("", "Shard failures", null, failures));
|
||||
}
|
||||
|
||||
return new SearchResponse(internalSearchResponse, scrollId, getNumShards(), successfulOps.get(),
|
||||
skippedOps.get(), buildTookInMillis(), failures, clusters);
|
||||
}
|
||||
|
@ -318,8 +323,11 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
|
|||
AliasFilter filter = aliasFilter.get(shardIt.shardId().getIndex().getUUID());
|
||||
assert filter != null;
|
||||
float indexBoost = concreteIndexBoosts.getOrDefault(shardIt.shardId().getIndex().getUUID(), DEFAULT_INDEX_BOOST);
|
||||
String indexName = shardIt.shardId().getIndex().getName();
|
||||
final String[] routings = indexRoutings.getOrDefault(indexName, Collections.emptySet())
|
||||
.toArray(new String[0]);
|
||||
return new ShardSearchTransportRequest(shardIt.getOriginalIndices(), request, shardIt.shardId(), getNumShards(),
|
||||
filter, indexBoost, timeProvider.getAbsoluteStartMillis(), clusterAlias);
|
||||
filter, indexBoost, timeProvider.getAbsoluteStartMillis(), clusterAlias, routings);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.elasticsearch.search.internal.AliasFilter;
|
|||
import org.elasticsearch.transport.Transport;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.function.Function;
|
||||
|
@ -47,6 +48,7 @@ final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction<Searc
|
|||
CanMatchPreFilterSearchPhase(Logger logger, SearchTransportService searchTransportService,
|
||||
BiFunction<String, String, Transport.Connection> nodeIdToConnection,
|
||||
Map<String, AliasFilter> aliasFilter, Map<String, Float> concreteIndexBoosts,
|
||||
Map<String, Set<String>> indexRoutings,
|
||||
Executor executor, SearchRequest request,
|
||||
ActionListener<SearchResponse> listener, GroupShardsIterator<SearchShardIterator> shardsIts,
|
||||
TransportSearchAction.SearchTimeProvider timeProvider, long clusterStateVersion,
|
||||
|
@ -56,9 +58,9 @@ final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction<Searc
|
|||
* We set max concurrent shard requests to the number of shards to otherwise avoid deep recursing that would occur if the local node
|
||||
* is the coordinating node for the query, holds all the shards for the request, and there are a lot of shards.
|
||||
*/
|
||||
super("can_match", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, executor, request,
|
||||
listener, shardsIts, timeProvider, clusterStateVersion, task, new BitSetSearchPhaseResults(shardsIts.size()), shardsIts.size(),
|
||||
clusters);
|
||||
super("can_match", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, indexRoutings,
|
||||
executor, request, listener, shardsIts, timeProvider, clusterStateVersion, task,
|
||||
new BitSetSearchPhaseResults(shardsIts.size()), shardsIts.size(), clusters);
|
||||
this.phaseFactory = phaseFactory;
|
||||
this.shardsIts = shardsIts;
|
||||
}
|
||||
|
|
|
@ -131,7 +131,7 @@ abstract class InitialSearchPhase<FirstResult extends SearchPhaseResult> extends
|
|||
if (shardsIts.size() > 0) {
|
||||
int maxConcurrentShardRequests = Math.min(this.maxConcurrentShardRequests, shardsIts.size());
|
||||
final boolean success = shardExecutionIndex.compareAndSet(0, maxConcurrentShardRequests);
|
||||
assert success;
|
||||
assert success;
|
||||
assert request.allowPartialSearchResults() != null : "SearchRequest missing setting for allowPartialSearchResults";
|
||||
if (request.allowPartialSearchResults() == false) {
|
||||
final StringBuilder missingShards = new StringBuilder();
|
||||
|
@ -140,7 +140,7 @@ abstract class InitialSearchPhase<FirstResult extends SearchPhaseResult> extends
|
|||
final SearchShardIterator shardRoutings = shardsIts.get(index);
|
||||
if (shardRoutings.size() == 0) {
|
||||
if(missingShards.length() >0 ){
|
||||
missingShards.append(", ");
|
||||
missingShards.append(", ");
|
||||
}
|
||||
missingShards.append(shardRoutings.shardId());
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.search.internal.AliasFilter;
|
|||
import org.elasticsearch.transport.Transport;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.function.BiFunction;
|
||||
|
||||
|
@ -37,11 +38,13 @@ final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction
|
|||
|
||||
SearchDfsQueryThenFetchAsyncAction(final Logger logger, final SearchTransportService searchTransportService,
|
||||
final BiFunction<String, String, Transport.Connection> nodeIdToConnection, final Map<String, AliasFilter> aliasFilter,
|
||||
final Map<String, Float> concreteIndexBoosts, final SearchPhaseController searchPhaseController, final Executor executor,
|
||||
final Map<String, Float> concreteIndexBoosts, final Map<String, Set<String>> indexRoutings,
|
||||
final SearchPhaseController searchPhaseController, final Executor executor,
|
||||
final SearchRequest request, final ActionListener<SearchResponse> listener,
|
||||
final GroupShardsIterator<SearchShardIterator> shardsIts, final TransportSearchAction.SearchTimeProvider timeProvider,
|
||||
final long clusterStateVersion, final SearchTask task, SearchResponse.Clusters clusters) {
|
||||
super("dfs", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, executor, request, listener,
|
||||
super("dfs", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, indexRoutings,
|
||||
executor, request, listener,
|
||||
shardsIts, timeProvider, clusterStateVersion, task, new ArraySearchPhaseResults<>(shardsIts.size()),
|
||||
request.getMaxConcurrentShardRequests(), clusters);
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.search.internal.AliasFilter;
|
|||
import org.elasticsearch.transport.Transport;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.function.BiFunction;
|
||||
|
||||
|
@ -37,13 +38,14 @@ final class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<Se
|
|||
|
||||
SearchQueryThenFetchAsyncAction(final Logger logger, final SearchTransportService searchTransportService,
|
||||
final BiFunction<String, String, Transport.Connection> nodeIdToConnection, final Map<String, AliasFilter> aliasFilter,
|
||||
final Map<String, Float> concreteIndexBoosts, final SearchPhaseController searchPhaseController, final Executor executor,
|
||||
final Map<String, Float> concreteIndexBoosts, final Map<String, Set<String>> indexRoutings,
|
||||
final SearchPhaseController searchPhaseController, final Executor executor,
|
||||
final SearchRequest request, final ActionListener<SearchResponse> listener,
|
||||
final GroupShardsIterator<SearchShardIterator> shardsIts, final TransportSearchAction.SearchTimeProvider timeProvider,
|
||||
long clusterStateVersion, SearchTask task, SearchResponse.Clusters clusters) {
|
||||
super("query", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, executor, request, listener,
|
||||
shardsIts, timeProvider, clusterStateVersion, task, searchPhaseController.newSearchPhaseResults(request, shardsIts.size()),
|
||||
request.getMaxConcurrentShardRequests(), clusters);
|
||||
super("query", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, indexRoutings,
|
||||
executor, request, listener, shardsIts, timeProvider, clusterStateVersion, task,
|
||||
searchPhaseController.newSearchPhaseResults(request, shardsIts.size()), request.getMaxConcurrentShardRequests(), clusters);
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
}
|
||||
|
||||
|
|
|
@ -297,6 +297,7 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
|||
Map<String, AliasFilter> aliasFilter = buildPerIndexAliasFilter(searchRequest, clusterState, indices, remoteAliasMap);
|
||||
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, searchRequest.routing(),
|
||||
searchRequest.indices());
|
||||
routingMap = routingMap == null ? Collections.emptyMap() : Collections.unmodifiableMap(routingMap);
|
||||
String[] concreteIndices = new String[indices.length];
|
||||
for (int i = 0; i < indices.length; i++) {
|
||||
concreteIndices[i] = indices[i].getName();
|
||||
|
@ -350,7 +351,7 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
|||
}
|
||||
boolean preFilterSearchShards = shouldPreFilterSearchShards(searchRequest, shardIterators);
|
||||
searchAsyncAction(task, searchRequest, shardIterators, timeProvider, connectionLookup, clusterState.version(),
|
||||
Collections.unmodifiableMap(aliasFilter), concreteIndexBoosts, listener, preFilterSearchShards, clusters).start();
|
||||
Collections.unmodifiableMap(aliasFilter), concreteIndexBoosts, routingMap, listener, preFilterSearchShards, clusters).start();
|
||||
}
|
||||
|
||||
private boolean shouldPreFilterSearchShards(SearchRequest searchRequest, GroupShardsIterator<SearchShardIterator> shardIterators) {
|
||||
|
@ -380,17 +381,20 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
|||
GroupShardsIterator<SearchShardIterator> shardIterators,
|
||||
SearchTimeProvider timeProvider,
|
||||
BiFunction<String, String, Transport.Connection> connectionLookup,
|
||||
long clusterStateVersion, Map<String, AliasFilter> aliasFilter,
|
||||
long clusterStateVersion,
|
||||
Map<String, AliasFilter> aliasFilter,
|
||||
Map<String, Float> concreteIndexBoosts,
|
||||
ActionListener<SearchResponse> listener, boolean preFilter,
|
||||
Map<String, Set<String>> indexRoutings,
|
||||
ActionListener<SearchResponse> listener,
|
||||
boolean preFilter,
|
||||
SearchResponse.Clusters clusters) {
|
||||
Executor executor = threadPool.executor(ThreadPool.Names.SEARCH);
|
||||
if (preFilter) {
|
||||
return new CanMatchPreFilterSearchPhase(logger, searchTransportService, connectionLookup,
|
||||
aliasFilter, concreteIndexBoosts, executor, searchRequest, listener, shardIterators,
|
||||
aliasFilter, concreteIndexBoosts, indexRoutings, executor, searchRequest, listener, shardIterators,
|
||||
timeProvider, clusterStateVersion, task, (iter) -> {
|
||||
AbstractSearchAsyncAction action = searchAsyncAction(task, searchRequest, iter, timeProvider, connectionLookup,
|
||||
clusterStateVersion, aliasFilter, concreteIndexBoosts, listener, false, clusters);
|
||||
clusterStateVersion, aliasFilter, concreteIndexBoosts, indexRoutings, listener, false, clusters);
|
||||
return new SearchPhase(action.getName()) {
|
||||
@Override
|
||||
public void run() throws IOException {
|
||||
|
@ -403,14 +407,14 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
|||
switch (searchRequest.searchType()) {
|
||||
case DFS_QUERY_THEN_FETCH:
|
||||
searchAsyncAction = new SearchDfsQueryThenFetchAsyncAction(logger, searchTransportService, connectionLookup,
|
||||
aliasFilter, concreteIndexBoosts, searchPhaseController, executor, searchRequest, listener, shardIterators,
|
||||
timeProvider, clusterStateVersion, task, clusters);
|
||||
aliasFilter, concreteIndexBoosts, indexRoutings, searchPhaseController, executor, searchRequest, listener,
|
||||
shardIterators, timeProvider, clusterStateVersion, task, clusters);
|
||||
break;
|
||||
case QUERY_AND_FETCH:
|
||||
case QUERY_THEN_FETCH:
|
||||
searchAsyncAction = new SearchQueryThenFetchAsyncAction(logger, searchTransportService, connectionLookup,
|
||||
aliasFilter, concreteIndexBoosts, searchPhaseController, executor, searchRequest, listener, shardIterators,
|
||||
timeProvider, clusterStateVersion, task, clusters);
|
||||
aliasFilter, concreteIndexBoosts, indexRoutings, searchPhaseController, executor, searchRequest, listener,
|
||||
shardIterators, timeProvider, clusterStateVersion, task, clusters);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalStateException("Unknown search type: [" + searchRequest.searchType() + "]");
|
||||
|
|
|
@ -24,7 +24,7 @@ import java.util.List;
|
|||
|
||||
/**
|
||||
* A simple {@link ShardsIterator} that iterates a list or sub-list of
|
||||
* {@link ShardRouting shard routings}.
|
||||
* {@link ShardRouting shard indexRoutings}.
|
||||
*/
|
||||
public class PlainShardsIterator implements ShardsIterator {
|
||||
|
||||
|
|
|
@ -38,7 +38,7 @@ import java.util.List;
|
|||
|
||||
/**
|
||||
* {@link ShardRouting} immutably encapsulates information about shard
|
||||
* routings like id, state, version, etc.
|
||||
* indexRoutings like id, state, version, etc.
|
||||
*/
|
||||
public final class ShardRouting implements Writeable, ToXContentObject {
|
||||
|
||||
|
@ -477,7 +477,7 @@ public final class ShardRouting implements Writeable, ToXContentObject {
|
|||
"ShardRouting is a relocation target but current node id isn't equal to source relocating node. This [" + this + "], other [" + other + "]";
|
||||
|
||||
assert b == false || this.shardId.equals(other.shardId) :
|
||||
"ShardRouting is a relocation target but both routings are not of the same shard id. This [" + this + "], other [" + other + "]";
|
||||
"ShardRouting is a relocation target but both indexRoutings are not of the same shard id. This [" + this + "], other [" + other + "]";
|
||||
|
||||
assert b == false || this.primary == other.primary :
|
||||
"ShardRouting is a relocation target but primary flag is different. This [" + this + "], target [" + other + "]";
|
||||
|
@ -504,7 +504,7 @@ public final class ShardRouting implements Writeable, ToXContentObject {
|
|||
"ShardRouting is a relocation source but relocating node isn't equal to other's current node. This [" + this + "], other [" + other + "]";
|
||||
|
||||
assert b == false || this.shardId.equals(other.shardId) :
|
||||
"ShardRouting is a relocation source but both routings are not of the same shard. This [" + this + "], target [" + other + "]";
|
||||
"ShardRouting is a relocation source but both indexRoutings are not of the same shard. This [" + this + "], target [" + other + "]";
|
||||
|
||||
assert b == false || this.primary == other.primary :
|
||||
"ShardRouting is a relocation source but primary flag is different. This [" + this + "], target [" + other + "]";
|
||||
|
|
|
@ -1269,8 +1269,10 @@ public class InternalEngine extends Engine {
|
|||
final long seqNo = noOp.seqNo();
|
||||
try {
|
||||
final NoOpResult noOpResult = new NoOpResult(noOp.seqNo());
|
||||
final Translog.Location location = translog.add(new Translog.NoOp(noOp.seqNo(), noOp.primaryTerm(), noOp.reason()));
|
||||
noOpResult.setTranslogLocation(location);
|
||||
if (noOp.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) {
|
||||
final Translog.Location location = translog.add(new Translog.NoOp(noOp.seqNo(), noOp.primaryTerm(), noOp.reason()));
|
||||
noOpResult.setTranslogLocation(location);
|
||||
}
|
||||
noOpResult.setTook(System.nanoTime() - noOp.startTime());
|
||||
noOpResult.freeze();
|
||||
return noOpResult;
|
||||
|
|
|
@ -18,9 +18,7 @@
|
|||
*/
|
||||
package org.elasticsearch.index.query;
|
||||
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.index.SortedNumericDocValues;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
|
@ -86,6 +84,11 @@ public final class TermsSetQueryBuilder extends AbstractQueryBuilder<TermsSetQue
|
|||
out.writeOptionalWriteable(minimumShouldMatchScript);
|
||||
}
|
||||
|
||||
// package protected for testing purpose
|
||||
String getFieldName() {
|
||||
return fieldName;
|
||||
}
|
||||
|
||||
public List<?> getValues() {
|
||||
return values;
|
||||
}
|
||||
|
@ -116,9 +119,10 @@ public final class TermsSetQueryBuilder extends AbstractQueryBuilder<TermsSetQue
|
|||
|
||||
@Override
|
||||
protected boolean doEquals(TermsSetQueryBuilder other) {
|
||||
return Objects.equals(fieldName, this.fieldName) && Objects.equals(values, this.values) &&
|
||||
Objects.equals(minimumShouldMatchField, this.minimumShouldMatchField) &&
|
||||
Objects.equals(minimumShouldMatchScript, this.minimumShouldMatchScript);
|
||||
return Objects.equals(fieldName, other.fieldName)
|
||||
&& Objects.equals(values, other.values)
|
||||
&& Objects.equals(minimumShouldMatchField, other.minimumShouldMatchField)
|
||||
&& Objects.equals(minimumShouldMatchScript, other.minimumShouldMatchScript);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -57,11 +57,16 @@ public class RestFieldCapabilitiesAction extends BaseRestHandler {
|
|||
@Override
|
||||
public RestChannelConsumer prepareRequest(final RestRequest request,
|
||||
final NodeClient client) throws IOException {
|
||||
if (request.hasContentOrSourceParam() && request.hasParam("fields")) {
|
||||
throw new IllegalArgumentException("can't specify a request body and [fields]" +
|
||||
" request parameter, either specify a request body or the" +
|
||||
" [fields] request parameter");
|
||||
if (request.hasContentOrSourceParam()) {
|
||||
deprecationLogger.deprecated("Specifying a request body is deprecated -- the" +
|
||||
" [fields] request parameter should be used instead.");
|
||||
if (request.hasParam("fields")) {
|
||||
throw new IllegalArgumentException("can't specify a request body and [fields]" +
|
||||
" request parameter, either specify a request body or the" +
|
||||
" [fields] request parameter");
|
||||
}
|
||||
}
|
||||
|
||||
final String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
|
||||
final FieldCapabilitiesRequest fieldRequest;
|
||||
if (request.hasContentOrSourceParam()) {
|
||||
|
|
|
@ -25,8 +25,10 @@ import org.apache.lucene.search.Collector;
|
|||
import org.apache.lucene.search.FieldDoc;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.util.Counter;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.search.SearchTask;
|
||||
import org.elasticsearch.action.search.SearchType;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.lease.Releasables;
|
||||
import org.elasticsearch.common.lucene.search.Queries;
|
||||
|
@ -91,6 +93,7 @@ final class DefaultSearchContext extends SearchContext {
|
|||
private final Engine.Searcher engineSearcher;
|
||||
private final BigArrays bigArrays;
|
||||
private final IndexShard indexShard;
|
||||
private final ClusterService clusterService;
|
||||
private final IndexService indexService;
|
||||
private final ContextIndexSearcher searcher;
|
||||
private final DfsSearchResult dfsResult;
|
||||
|
@ -120,6 +123,7 @@ final class DefaultSearchContext extends SearchContext {
|
|||
// filter for sliced scroll
|
||||
private SliceBuilder sliceBuilder;
|
||||
private SearchTask task;
|
||||
private final Version minNodeVersion;
|
||||
|
||||
|
||||
/**
|
||||
|
@ -152,9 +156,10 @@ final class DefaultSearchContext extends SearchContext {
|
|||
private final QueryShardContext queryShardContext;
|
||||
private FetchPhase fetchPhase;
|
||||
|
||||
DefaultSearchContext(long id, ShardSearchRequest request, SearchShardTarget shardTarget, Engine.Searcher engineSearcher,
|
||||
IndexService indexService, IndexShard indexShard, BigArrays bigArrays, Counter timeEstimateCounter,
|
||||
TimeValue timeout, FetchPhase fetchPhase, String clusterAlias) {
|
||||
DefaultSearchContext(long id, ShardSearchRequest request, SearchShardTarget shardTarget,
|
||||
Engine.Searcher engineSearcher, ClusterService clusterService, IndexService indexService,
|
||||
IndexShard indexShard, BigArrays bigArrays, Counter timeEstimateCounter, TimeValue timeout,
|
||||
FetchPhase fetchPhase, String clusterAlias, Version minNodeVersion) {
|
||||
this.id = id;
|
||||
this.request = request;
|
||||
this.fetchPhase = fetchPhase;
|
||||
|
@ -168,9 +173,11 @@ final class DefaultSearchContext extends SearchContext {
|
|||
this.fetchResult = new FetchSearchResult(id, shardTarget);
|
||||
this.indexShard = indexShard;
|
||||
this.indexService = indexService;
|
||||
this.clusterService = clusterService;
|
||||
this.searcher = new ContextIndexSearcher(engineSearcher, indexService.cache().query(), indexShard.getQueryCachingPolicy());
|
||||
this.timeEstimateCounter = timeEstimateCounter;
|
||||
this.timeout = timeout;
|
||||
this.minNodeVersion = minNodeVersion;
|
||||
queryShardContext = indexService.newQueryShardContext(request.shardId().id(), searcher.getIndexReader(), request::nowInMillis,
|
||||
clusterAlias);
|
||||
queryShardContext.setTypes(request.types());
|
||||
|
@ -278,8 +285,7 @@ final class DefaultSearchContext extends SearchContext {
|
|||
}
|
||||
|
||||
if (sliceBuilder != null) {
|
||||
filters.add(sliceBuilder.toFilter(queryShardContext, shardTarget().getShardId().getId(),
|
||||
queryShardContext.getIndexSettings().getNumberOfShards()));
|
||||
filters.add(sliceBuilder.toFilter(clusterService, request, queryShardContext, minNodeVersion));
|
||||
}
|
||||
|
||||
if (filters.isEmpty()) {
|
||||
|
|
|
@ -616,8 +616,8 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
|
|||
Engine.Searcher engineSearcher = indexShard.acquireSearcher("search");
|
||||
|
||||
final DefaultSearchContext searchContext = new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget,
|
||||
engineSearcher, indexService, indexShard, bigArrays, threadPool.estimatedTimeInMillisCounter(), timeout, fetchPhase,
|
||||
request.getClusterAlias());
|
||||
engineSearcher, clusterService, indexService, indexShard, bigArrays, threadPool.estimatedTimeInMillisCounter(), timeout,
|
||||
fetchPhase, request.getClusterAlias(), clusterService.state().nodes().getMinNodeVersion());
|
||||
boolean success = false;
|
||||
try {
|
||||
// we clone the query shard context here just for rewriting otherwise we
|
||||
|
|
|
@ -28,13 +28,10 @@ import org.elasticsearch.common.bytes.BytesReference;
|
|||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryRewriteContext;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.query.Rewriteable;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.search.Scroll;
|
||||
import org.elasticsearch.search.SearchService;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -61,7 +58,6 @@ import java.util.Optional;
|
|||
*/
|
||||
|
||||
public class ShardSearchLocalRequest implements ShardSearchRequest {
|
||||
|
||||
private String clusterAlias;
|
||||
private ShardId shardId;
|
||||
private int numberOfShards;
|
||||
|
@ -74,17 +70,18 @@ public class ShardSearchLocalRequest implements ShardSearchRequest {
|
|||
private Boolean requestCache;
|
||||
private long nowInMillis;
|
||||
private boolean allowPartialSearchResults;
|
||||
|
||||
private String[] indexRoutings = Strings.EMPTY_ARRAY;
|
||||
private String preference;
|
||||
private boolean profile;
|
||||
|
||||
ShardSearchLocalRequest() {
|
||||
}
|
||||
|
||||
ShardSearchLocalRequest(SearchRequest searchRequest, ShardId shardId, int numberOfShards,
|
||||
AliasFilter aliasFilter, float indexBoost, long nowInMillis, String clusterAlias) {
|
||||
AliasFilter aliasFilter, float indexBoost, long nowInMillis, String clusterAlias, String[] indexRoutings) {
|
||||
this(shardId, numberOfShards, searchRequest.searchType(),
|
||||
searchRequest.source(), searchRequest.types(), searchRequest.requestCache(), aliasFilter, indexBoost,
|
||||
searchRequest.allowPartialSearchResults());
|
||||
searchRequest.source(), searchRequest.types(), searchRequest.requestCache(), aliasFilter, indexBoost,
|
||||
searchRequest.allowPartialSearchResults(), indexRoutings, searchRequest.preference());
|
||||
// If allowPartialSearchResults is unset (ie null), the cluster-level default should have been substituted
|
||||
// at this stage. Any NPEs in the above are therefore an error in request preparation logic.
|
||||
assert searchRequest.allowPartialSearchResults() != null;
|
||||
|
@ -102,7 +99,8 @@ public class ShardSearchLocalRequest implements ShardSearchRequest {
|
|||
}
|
||||
|
||||
public ShardSearchLocalRequest(ShardId shardId, int numberOfShards, SearchType searchType, SearchSourceBuilder source, String[] types,
|
||||
Boolean requestCache, AliasFilter aliasFilter, float indexBoost, boolean allowPartialSearchResults) {
|
||||
Boolean requestCache, AliasFilter aliasFilter, float indexBoost, boolean allowPartialSearchResults,
|
||||
String[] indexRoutings, String preference) {
|
||||
this.shardId = shardId;
|
||||
this.numberOfShards = numberOfShards;
|
||||
this.searchType = searchType;
|
||||
|
@ -112,6 +110,8 @@ public class ShardSearchLocalRequest implements ShardSearchRequest {
|
|||
this.aliasFilter = aliasFilter;
|
||||
this.indexBoost = indexBoost;
|
||||
this.allowPartialSearchResults = allowPartialSearchResults;
|
||||
this.indexRoutings = indexRoutings;
|
||||
this.preference = preference;
|
||||
}
|
||||
|
||||
|
||||
|
@ -169,18 +169,28 @@ public class ShardSearchLocalRequest implements ShardSearchRequest {
|
|||
public Boolean requestCache() {
|
||||
return requestCache;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Boolean allowPartialSearchResults() {
|
||||
return allowPartialSearchResults;
|
||||
}
|
||||
|
||||
|
||||
|
||||
@Override
|
||||
public Scroll scroll() {
|
||||
return scroll;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String[] indexRoutings() {
|
||||
return indexRoutings;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String preference() {
|
||||
return preference;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setProfile(boolean profile) {
|
||||
this.profile = profile;
|
||||
|
@ -225,6 +235,13 @@ public class ShardSearchLocalRequest implements ShardSearchRequest {
|
|||
if (in.getVersion().onOrAfter(Version.V_6_3_0)) {
|
||||
allowPartialSearchResults = in.readOptionalBoolean();
|
||||
}
|
||||
if (in.getVersion().onOrAfter(Version.V_6_4_0)) {
|
||||
indexRoutings = in.readStringArray();
|
||||
preference = in.readOptionalString();
|
||||
} else {
|
||||
indexRoutings = Strings.EMPTY_ARRAY;
|
||||
preference = null;
|
||||
}
|
||||
}
|
||||
|
||||
protected void innerWriteTo(StreamOutput out, boolean asKey) throws IOException {
|
||||
|
@ -240,7 +257,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest {
|
|||
if (out.getVersion().onOrAfter(Version.V_5_2_0)) {
|
||||
out.writeFloat(indexBoost);
|
||||
}
|
||||
if (!asKey) {
|
||||
if (asKey == false) {
|
||||
out.writeVLong(nowInMillis);
|
||||
}
|
||||
out.writeOptionalBoolean(requestCache);
|
||||
|
@ -250,7 +267,12 @@ public class ShardSearchLocalRequest implements ShardSearchRequest {
|
|||
if (out.getVersion().onOrAfter(Version.V_6_3_0)) {
|
||||
out.writeOptionalBoolean(allowPartialSearchResults);
|
||||
}
|
||||
|
||||
if (asKey == false) {
|
||||
if (out.getVersion().onOrAfter(Version.V_6_4_0)) {
|
||||
out.writeStringArray(indexRoutings);
|
||||
out.writeOptionalString(preference);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,7 +19,9 @@
|
|||
|
||||
package org.elasticsearch.search.internal;
|
||||
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchType;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.AliasMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.CheckedFunction;
|
||||
|
@ -28,8 +30,6 @@ import org.elasticsearch.common.collect.ImmutableOpenMap;
|
|||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.query.BoolQueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryRewriteContext;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.query.Rewriteable;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.AliasFilterParsingException;
|
||||
|
@ -68,11 +68,21 @@ public interface ShardSearchRequest {
|
|||
long nowInMillis();
|
||||
|
||||
Boolean requestCache();
|
||||
|
||||
|
||||
Boolean allowPartialSearchResults();
|
||||
|
||||
Scroll scroll();
|
||||
|
||||
/**
|
||||
* Returns the routing values resolved by the coordinating node for the index pointed by {@link #shardId()}.
|
||||
*/
|
||||
String[] indexRoutings();
|
||||
|
||||
/**
|
||||
* Returns the preference of the original {@link SearchRequest#preference()}.
|
||||
*/
|
||||
String preference();
|
||||
|
||||
/**
|
||||
* Sets if this shard search needs to be profiled or not
|
||||
* @param profile True if the shard should be profiled
|
||||
|
|
|
@ -28,9 +28,6 @@ import org.elasticsearch.action.support.IndicesOptions;
|
|||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryRewriteContext;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.query.Rewriteable;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.search.Scroll;
|
||||
|
@ -57,9 +54,10 @@ public class ShardSearchTransportRequest extends TransportRequest implements Sha
|
|||
}
|
||||
|
||||
public ShardSearchTransportRequest(OriginalIndices originalIndices, SearchRequest searchRequest, ShardId shardId, int numberOfShards,
|
||||
AliasFilter aliasFilter, float indexBoost, long nowInMillis, String clusterAlias) {
|
||||
AliasFilter aliasFilter, float indexBoost, long nowInMillis,
|
||||
String clusterAlias, String[] indexRoutings) {
|
||||
this.shardSearchLocalRequest = new ShardSearchLocalRequest(searchRequest, shardId, numberOfShards, aliasFilter, indexBoost,
|
||||
nowInMillis, clusterAlias);
|
||||
nowInMillis, clusterAlias, indexRoutings);
|
||||
this.originalIndices = originalIndices;
|
||||
}
|
||||
|
||||
|
@ -151,17 +149,27 @@ public class ShardSearchTransportRequest extends TransportRequest implements Sha
|
|||
public Boolean requestCache() {
|
||||
return shardSearchLocalRequest.requestCache();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Boolean allowPartialSearchResults() {
|
||||
return shardSearchLocalRequest.allowPartialSearchResults();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Scroll scroll() {
|
||||
return shardSearchLocalRequest.scroll();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String[] indexRoutings() {
|
||||
return shardSearchLocalRequest.indexRoutings();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String preference() {
|
||||
return shardSearchLocalRequest.preference();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
|
||||
|
|
|
@ -23,6 +23,10 @@ import org.apache.lucene.search.MatchAllDocsQuery;
|
|||
import org.apache.lucene.search.MatchNoDocsQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
@ -30,6 +34,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
@ -39,9 +44,13 @@ import org.elasticsearch.index.fielddata.IndexNumericFieldData;
|
|||
import org.elasticsearch.index.mapper.IdFieldMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.search.internal.ShardSearchRequest;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* A slice builder allowing to split a scroll in multiple partitions.
|
||||
|
@ -203,12 +212,49 @@ public class SliceBuilder implements Writeable, ToXContentObject {
|
|||
return Objects.hash(this.field, this.id, this.max);
|
||||
}
|
||||
|
||||
public Query toFilter(QueryShardContext context, int shardId, int numShards) {
|
||||
/**
|
||||
* Converts this QueryBuilder to a lucene {@link Query}.
|
||||
*
|
||||
* @param context Additional information needed to build the query
|
||||
*/
|
||||
public Query toFilter(ClusterService clusterService, ShardSearchRequest request, QueryShardContext context, Version minNodeVersion) {
|
||||
final MappedFieldType type = context.fieldMapper(field);
|
||||
if (type == null) {
|
||||
throw new IllegalArgumentException("field " + field + " not found");
|
||||
}
|
||||
|
||||
int shardId = request.shardId().id();
|
||||
int numShards = context.getIndexSettings().getNumberOfShards();
|
||||
if (minNodeVersion.onOrAfter(Version.V_6_4_0) &&
|
||||
(request.preference() != null || request.indexRoutings().length > 0)) {
|
||||
GroupShardsIterator<ShardIterator> group = buildShardIterator(clusterService, request);
|
||||
assert group.size() <= numShards : "index routing shards: " + group.size() +
|
||||
" cannot be greater than total number of shards: " + numShards;
|
||||
if (group.size() < numShards) {
|
||||
/**
|
||||
* The routing of this request targets a subset of the shards of this index so we need to we retrieve
|
||||
* the original {@link GroupShardsIterator} and compute the request shard id and number of
|
||||
* shards from it.
|
||||
* This behavior has been added in {@link Version#V_6_4_0} so if there is another node in the cluster
|
||||
* with an older version we use the original shard id and number of shards in order to ensure that all
|
||||
* slices use the same numbers.
|
||||
*/
|
||||
numShards = group.size();
|
||||
int ord = 0;
|
||||
shardId = -1;
|
||||
// remap the original shard id with its index (position) in the sorted shard iterator.
|
||||
for (ShardIterator it : group) {
|
||||
assert it.shardId().getIndex().equals(request.shardId().getIndex());
|
||||
if (request.shardId().equals(it.shardId())) {
|
||||
shardId = ord;
|
||||
break;
|
||||
}
|
||||
++ord;
|
||||
}
|
||||
assert shardId != -1 : "shard id: " + request.shardId().getId() + " not found in index shard routing";
|
||||
}
|
||||
}
|
||||
|
||||
String field = this.field;
|
||||
boolean useTermQuery = false;
|
||||
if ("_uid".equals(field)) {
|
||||
|
@ -273,6 +319,17 @@ public class SliceBuilder implements Writeable, ToXContentObject {
|
|||
return new MatchAllDocsQuery();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the {@link GroupShardsIterator} for the provided <code>request</code>.
|
||||
*/
|
||||
private GroupShardsIterator<ShardIterator> buildShardIterator(ClusterService clusterService, ShardSearchRequest request) {
|
||||
final ClusterState state = clusterService.state();
|
||||
String[] indices = new String[] { request.shardId().getIndex().getName() };
|
||||
Map<String, Set<String>> routingMap = request.indexRoutings().length > 0 ?
|
||||
Collections.singletonMap(indices[0], Sets.newHashSet(request.indexRoutings())) : null;
|
||||
return clusterService.operationRouting().searchShards(state, indices, routingMap, request.preference());
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return Strings.toString(this, true, true);
|
||||
|
|
|
@ -19,7 +19,9 @@
|
|||
|
||||
package org.elasticsearch.action.fieldcaps;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.common.ValidationException;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
@ -80,7 +82,7 @@ public class FieldCapabilitiesRequestTests extends ESTestCase {
|
|||
|
||||
}
|
||||
|
||||
public void testFieldCapsRequestSerialization() throws IOException {
|
||||
public void testSerialization() throws IOException {
|
||||
for (int i = 0; i < 20; i++) {
|
||||
FieldCapabilitiesRequest request = randomRequest();
|
||||
BytesStreamOutput output = new BytesStreamOutput();
|
||||
|
@ -93,4 +95,11 @@ public class FieldCapabilitiesRequestTests extends ESTestCase {
|
|||
assertEquals(deserialized.hashCode(), request.hashCode());
|
||||
}
|
||||
}
|
||||
|
||||
public void testValidation() {
|
||||
FieldCapabilitiesRequest request = new FieldCapabilitiesRequest()
|
||||
.indices("index2");
|
||||
ActionRequestValidationException exception = request.validate();
|
||||
assertNotNull(exception);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,42 +19,152 @@
|
|||
|
||||
package org.elasticsearch.action.fieldcaps;
|
||||
|
||||
import org.elasticsearch.action.admin.indices.close.CloseIndexResponse;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.test.AbstractStreamableXContentTestCase;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
public class FieldCapabilitiesResponseTests extends ESTestCase {
|
||||
private FieldCapabilitiesResponse randomResponse() {
|
||||
Map<String, Map<String, FieldCapabilities> > fieldMap = new HashMap<> ();
|
||||
int numFields = randomInt(10);
|
||||
for (int i = 0; i < numFields; i++) {
|
||||
String fieldName = randomAlphaOfLengthBetween(5, 10);
|
||||
int numIndices = randomIntBetween(1, 5);
|
||||
Map<String, FieldCapabilities> indexFieldMap = new HashMap<> ();
|
||||
for (int j = 0; j < numIndices; j++) {
|
||||
String index = randomAlphaOfLengthBetween(10, 20);
|
||||
indexFieldMap.put(index, FieldCapabilitiesTests.randomFieldCaps());
|
||||
}
|
||||
fieldMap.put(fieldName, indexFieldMap);
|
||||
}
|
||||
return new FieldCapabilitiesResponse(fieldMap);
|
||||
import static org.elasticsearch.test.XContentTestUtils.insertRandomFields;
|
||||
|
||||
public class FieldCapabilitiesResponseTests extends AbstractStreamableXContentTestCase<FieldCapabilitiesResponse> {
|
||||
|
||||
@Override
|
||||
protected FieldCapabilitiesResponse doParseInstance(XContentParser parser) throws IOException {
|
||||
return FieldCapabilitiesResponse.fromXContent(parser);
|
||||
}
|
||||
|
||||
public void testSerialization() throws IOException {
|
||||
for (int i = 0; i < 20; i++) {
|
||||
FieldCapabilitiesResponse response = randomResponse();
|
||||
BytesStreamOutput output = new BytesStreamOutput();
|
||||
response.writeTo(output);
|
||||
output.flush();
|
||||
StreamInput input = output.bytes().streamInput();
|
||||
FieldCapabilitiesResponse deserialized = new FieldCapabilitiesResponse();
|
||||
deserialized.readFrom(input);
|
||||
assertEquals(deserialized, response);
|
||||
assertEquals(deserialized.hashCode(), response.hashCode());
|
||||
@Override
|
||||
protected FieldCapabilitiesResponse createBlankInstance() {
|
||||
return new FieldCapabilitiesResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected FieldCapabilitiesResponse createTestInstance() {
|
||||
Map<String, Map<String, FieldCapabilities>> responses = new HashMap<>();
|
||||
|
||||
String[] fields = generateRandomStringArray(5, 10, false, true);
|
||||
assertNotNull(fields);
|
||||
|
||||
for (String field : fields) {
|
||||
Map<String, FieldCapabilities> typesToCapabilities = new HashMap<>();
|
||||
String[] types = generateRandomStringArray(5, 10, false, false);
|
||||
assertNotNull(types);
|
||||
|
||||
for (String type : types) {
|
||||
typesToCapabilities.put(type, FieldCapabilitiesTests.randomFieldCaps(field));
|
||||
}
|
||||
responses.put(field, typesToCapabilities);
|
||||
}
|
||||
return new FieldCapabilitiesResponse(responses);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected FieldCapabilitiesResponse mutateInstance(FieldCapabilitiesResponse response) {
|
||||
Map<String, Map<String, FieldCapabilities>> mutatedResponses = new HashMap<>(response.get());
|
||||
|
||||
int mutation = response.get().isEmpty() ? 0 : randomIntBetween(0, 2);
|
||||
|
||||
switch (mutation) {
|
||||
case 0:
|
||||
String toAdd = randomAlphaOfLength(10);
|
||||
mutatedResponses.put(toAdd, Collections.singletonMap(
|
||||
randomAlphaOfLength(10),
|
||||
FieldCapabilitiesTests.randomFieldCaps(toAdd)));
|
||||
break;
|
||||
case 1:
|
||||
String toRemove = randomFrom(mutatedResponses.keySet());
|
||||
mutatedResponses.remove(toRemove);
|
||||
break;
|
||||
case 2:
|
||||
String toReplace = randomFrom(mutatedResponses.keySet());
|
||||
mutatedResponses.put(toReplace, Collections.singletonMap(
|
||||
randomAlphaOfLength(10),
|
||||
FieldCapabilitiesTests.randomFieldCaps(toReplace)));
|
||||
break;
|
||||
}
|
||||
return new FieldCapabilitiesResponse(mutatedResponses);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Predicate<String> getRandomFieldsExcludeFilter() {
|
||||
// Disallow random fields from being inserted under the 'fields' key, as this
|
||||
// map only contains field names, and also under 'fields.FIELD_NAME', as these
|
||||
// maps only contain type names.
|
||||
return field -> field.matches("fields(\\.\\w+)?");
|
||||
}
|
||||
|
||||
public void testToXContent() throws IOException {
|
||||
FieldCapabilitiesResponse response = createSimpleResponse();
|
||||
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)
|
||||
.startObject();
|
||||
response.toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
builder.endObject();
|
||||
|
||||
String generatedResponse = BytesReference.bytes(builder).utf8ToString();
|
||||
assertEquals((
|
||||
"{" +
|
||||
" \"fields\": {" +
|
||||
" \"rating\": { " +
|
||||
" \"keyword\": {" +
|
||||
" \"type\": \"keyword\"," +
|
||||
" \"searchable\": false," +
|
||||
" \"aggregatable\": true," +
|
||||
" \"indices\": [\"index3\", \"index4\"]," +
|
||||
" \"non_searchable_indices\": [\"index4\"] " +
|
||||
" }," +
|
||||
" \"long\": {" +
|
||||
" \"type\": \"long\"," +
|
||||
" \"searchable\": true," +
|
||||
" \"aggregatable\": false," +
|
||||
" \"indices\": [\"index1\", \"index2\"]," +
|
||||
" \"non_aggregatable_indices\": [\"index1\"] " +
|
||||
" }" +
|
||||
" }," +
|
||||
" \"title\": { " +
|
||||
" \"text\": {" +
|
||||
" \"type\": \"text\"," +
|
||||
" \"searchable\": true," +
|
||||
" \"aggregatable\": false" +
|
||||
" }" +
|
||||
" }" +
|
||||
" }" +
|
||||
"}").replaceAll("\\s+", ""), generatedResponse);
|
||||
}
|
||||
|
||||
private static FieldCapabilitiesResponse createSimpleResponse() {
|
||||
Map<String, FieldCapabilities> titleCapabilities = new HashMap<>();
|
||||
titleCapabilities.put("text", new FieldCapabilities("title", "text", true, false));
|
||||
|
||||
Map<String, FieldCapabilities> ratingCapabilities = new HashMap<>();
|
||||
ratingCapabilities.put("long", new FieldCapabilities("rating", "long",
|
||||
true, false,
|
||||
new String[]{"index1", "index2"},
|
||||
null,
|
||||
new String[]{"index1"}));
|
||||
ratingCapabilities.put("keyword", new FieldCapabilities("rating", "keyword",
|
||||
false, true,
|
||||
new String[]{"index3", "index4"},
|
||||
new String[]{"index4"},
|
||||
null));
|
||||
|
||||
Map<String, Map<String, FieldCapabilities>> responses = new HashMap<>();
|
||||
responses.put("title", titleCapabilities);
|
||||
responses.put("rating", ratingCapabilities);
|
||||
return new FieldCapabilitiesResponse(responses);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,16 +20,26 @@
|
|||
package org.elasticsearch.action.fieldcaps;
|
||||
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.test.AbstractSerializingTestCase;
|
||||
import org.elasticsearch.test.AbstractWireSerializingTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public class FieldCapabilitiesTests extends AbstractWireSerializingTestCase<FieldCapabilities> {
|
||||
public class FieldCapabilitiesTests extends AbstractSerializingTestCase<FieldCapabilities> {
|
||||
private static final String FIELD_NAME = "field";
|
||||
|
||||
@Override
|
||||
protected FieldCapabilities doParseInstance(XContentParser parser) throws IOException {
|
||||
return FieldCapabilities.fromXContent(FIELD_NAME, parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected FieldCapabilities createTestInstance() {
|
||||
return randomFieldCaps();
|
||||
return randomFieldCaps(FIELD_NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -82,7 +92,7 @@ public class FieldCapabilitiesTests extends AbstractWireSerializingTestCase<Fiel
|
|||
}
|
||||
}
|
||||
|
||||
static FieldCapabilities randomFieldCaps() {
|
||||
static FieldCapabilities randomFieldCaps(String fieldName) {
|
||||
String[] indices = null;
|
||||
if (randomBoolean()) {
|
||||
indices = new String[randomIntBetween(1, 5)];
|
||||
|
@ -104,7 +114,7 @@ public class FieldCapabilitiesTests extends AbstractWireSerializingTestCase<Fiel
|
|||
nonAggregatableIndices[i] = randomAlphaOfLengthBetween(5, 20);
|
||||
}
|
||||
}
|
||||
return new FieldCapabilities(randomAlphaOfLengthBetween(5, 20),
|
||||
return new FieldCapabilities(fieldName,
|
||||
randomAlphaOfLengthBetween(5, 20), randomBoolean(), randomBoolean(),
|
||||
indices, nonSearchableIndices, nonAggregatableIndices);
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.elasticsearch.action.OriginalIndices;
|
|||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.query.MatchAllQueryBuilder;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
@ -62,10 +63,15 @@ public class AbstractSearchAsyncActionTests extends ESTestCase {
|
|||
|
||||
final SearchRequest request = new SearchRequest();
|
||||
request.allowPartialSearchResults(true);
|
||||
request.preference("_shards:1,3");
|
||||
return new AbstractSearchAsyncAction<SearchPhaseResult>("test", null, null, null,
|
||||
Collections.singletonMap("foo", new AliasFilter(new MatchAllQueryBuilder())), Collections.singletonMap("foo", 2.0f), null,
|
||||
request, null, new GroupShardsIterator<>(Collections.singletonList(
|
||||
new SearchShardIterator(null, null, Collections.emptyList(), null))), timeProvider, 0, null,
|
||||
Collections.singletonMap("foo", new AliasFilter(new MatchAllQueryBuilder())), Collections.singletonMap("foo", 2.0f),
|
||||
Collections.singletonMap("name", Sets.newHashSet("bar", "baz")),null, request, null,
|
||||
new GroupShardsIterator<>(
|
||||
Collections.singletonList(
|
||||
new SearchShardIterator(null, null, Collections.emptyList(), null)
|
||||
)
|
||||
), timeProvider, 0, null,
|
||||
new InitialSearchPhase.ArraySearchPhaseResults<>(10), request.getMaxConcurrentShardRequests(),
|
||||
SearchResponse.Clusters.EMPTY) {
|
||||
@Override
|
||||
|
@ -117,5 +123,8 @@ public class AbstractSearchAsyncActionTests extends ESTestCase {
|
|||
assertArrayEquals(new String[] {"name", "name1"}, shardSearchTransportRequest.indices());
|
||||
assertEquals(new MatchAllQueryBuilder(), shardSearchTransportRequest.getAliasFilter().getQueryBuilder());
|
||||
assertEquals(2.0f, shardSearchTransportRequest.indexBoost(), 0.0f);
|
||||
assertArrayEquals(new String[] {"name", "name1"}, shardSearchTransportRequest.indices());
|
||||
assertArrayEquals(new String[] {"bar", "baz"}, shardSearchTransportRequest.indexRoutings());
|
||||
assertEquals("_shards:1,3", shardSearchTransportRequest.preference());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -78,12 +78,12 @@ public class CanMatchPreFilterSearchPhaseTests extends ESTestCase {
|
|||
2, randomBoolean(), primaryNode, replicaNode);
|
||||
final SearchRequest searchRequest = new SearchRequest();
|
||||
searchRequest.allowPartialSearchResults(true);
|
||||
|
||||
|
||||
CanMatchPreFilterSearchPhase canMatchPhase = new CanMatchPreFilterSearchPhase(logger,
|
||||
searchTransportService,
|
||||
(clusterAlias, node) -> lookup.get(node),
|
||||
Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)),
|
||||
Collections.emptyMap(), EsExecutors.newDirectExecutorService(),
|
||||
Collections.emptyMap(), Collections.emptyMap(), EsExecutors.newDirectExecutorService(),
|
||||
searchRequest, null, shardsIter, timeProvider, 0, null,
|
||||
(iter) -> new SearchPhase("test") {
|
||||
@Override
|
||||
|
@ -159,12 +159,12 @@ public class CanMatchPreFilterSearchPhaseTests extends ESTestCase {
|
|||
|
||||
final SearchRequest searchRequest = new SearchRequest();
|
||||
searchRequest.allowPartialSearchResults(true);
|
||||
|
||||
|
||||
CanMatchPreFilterSearchPhase canMatchPhase = new CanMatchPreFilterSearchPhase(logger,
|
||||
searchTransportService,
|
||||
(clusterAlias, node) -> lookup.get(node),
|
||||
Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)),
|
||||
Collections.emptyMap(), EsExecutors.newDirectExecutorService(),
|
||||
Collections.emptyMap(), Collections.emptyMap(), EsExecutors.newDirectExecutorService(),
|
||||
searchRequest, null, shardsIter, timeProvider, 0, null,
|
||||
(iter) -> new SearchPhase("test") {
|
||||
@Override
|
||||
|
@ -222,6 +222,7 @@ public class CanMatchPreFilterSearchPhaseTests extends ESTestCase {
|
|||
(clusterAlias, node) -> lookup.get(node),
|
||||
Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)),
|
||||
Collections.emptyMap(),
|
||||
Collections.emptyMap(),
|
||||
EsExecutors.newDirectExecutorService(),
|
||||
searchRequest,
|
||||
null,
|
||||
|
|
|
@ -106,6 +106,7 @@ public class SearchAsyncActionTests extends ESTestCase {
|
|||
return lookup.get(node); },
|
||||
aliasFilters,
|
||||
Collections.emptyMap(),
|
||||
Collections.emptyMap(),
|
||||
null,
|
||||
request,
|
||||
responseListener,
|
||||
|
@ -198,6 +199,7 @@ public class SearchAsyncActionTests extends ESTestCase {
|
|||
return lookup.get(node); },
|
||||
aliasFilters,
|
||||
Collections.emptyMap(),
|
||||
Collections.emptyMap(),
|
||||
null,
|
||||
request,
|
||||
responseListener,
|
||||
|
@ -303,6 +305,7 @@ public class SearchAsyncActionTests extends ESTestCase {
|
|||
return lookup.get(node); },
|
||||
aliasFilters,
|
||||
Collections.emptyMap(),
|
||||
Collections.emptyMap(),
|
||||
executor,
|
||||
request,
|
||||
responseListener,
|
||||
|
|
|
@ -83,7 +83,7 @@ public class PrimaryTermsTests extends ESAllocationTestCase {
|
|||
}
|
||||
|
||||
/**
|
||||
* puts primary shard routings into initializing state
|
||||
* puts primary shard indexRoutings into initializing state
|
||||
*/
|
||||
private void initPrimaries() {
|
||||
logger.info("adding {} nodes and performing rerouting", this.numberOfReplicas + 1);
|
||||
|
|
|
@ -83,7 +83,7 @@ public class RoutingTableTests extends ESAllocationTestCase {
|
|||
}
|
||||
|
||||
/**
|
||||
* puts primary shard routings into initializing state
|
||||
* puts primary shard indexRoutings into initializing state
|
||||
*/
|
||||
private void initPrimaries() {
|
||||
logger.info("adding {} nodes and performing rerouting", this.numberOfReplicas + 1);
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.index;
|
|||
|
||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -131,16 +130,16 @@ public class IndexServiceTests extends ESSingleNodeTestCase {
|
|||
assertTrue(indexService.getRefreshTask().mustReschedule());
|
||||
|
||||
// now disable
|
||||
IndexMetaData metaData = IndexMetaData.builder(indexService.getMetaData()).settings(Settings.builder().put(indexService.getMetaData().getSettings()).put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1)).build();
|
||||
indexService.updateMetaData(metaData);
|
||||
client().admin().indices().prepareUpdateSettings("test")
|
||||
.setSettings(Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1)).get();
|
||||
assertNotSame(refreshTask, indexService.getRefreshTask());
|
||||
assertTrue(refreshTask.isClosed());
|
||||
assertFalse(refreshTask.isScheduled());
|
||||
assertFalse(indexService.getRefreshTask().mustReschedule());
|
||||
|
||||
// set it to 100ms
|
||||
metaData = IndexMetaData.builder(indexService.getMetaData()).settings(Settings.builder().put(indexService.getMetaData().getSettings()).put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "100ms")).build();
|
||||
indexService.updateMetaData(metaData);
|
||||
client().admin().indices().prepareUpdateSettings("test")
|
||||
.setSettings(Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "100ms")).get();
|
||||
assertNotSame(refreshTask, indexService.getRefreshTask());
|
||||
assertTrue(refreshTask.isClosed());
|
||||
|
||||
|
@ -150,8 +149,8 @@ public class IndexServiceTests extends ESSingleNodeTestCase {
|
|||
assertEquals(100, refreshTask.getInterval().millis());
|
||||
|
||||
// set it to 200ms
|
||||
metaData = IndexMetaData.builder(indexService.getMetaData()).settings(Settings.builder().put(indexService.getMetaData().getSettings()).put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "200ms")).build();
|
||||
indexService.updateMetaData(metaData);
|
||||
client().admin().indices().prepareUpdateSettings("test")
|
||||
.setSettings(Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "200ms")).get();
|
||||
assertNotSame(refreshTask, indexService.getRefreshTask());
|
||||
assertTrue(refreshTask.isClosed());
|
||||
|
||||
|
@ -161,8 +160,8 @@ public class IndexServiceTests extends ESSingleNodeTestCase {
|
|||
assertEquals(200, refreshTask.getInterval().millis());
|
||||
|
||||
// set it to 200ms again
|
||||
metaData = IndexMetaData.builder(indexService.getMetaData()).settings(Settings.builder().put(indexService.getMetaData().getSettings()).put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "200ms")).build();
|
||||
indexService.updateMetaData(metaData);
|
||||
client().admin().indices().prepareUpdateSettings("test")
|
||||
.setSettings(Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "200ms")).get();
|
||||
assertSame(refreshTask, indexService.getRefreshTask());
|
||||
assertTrue(indexService.getRefreshTask().mustReschedule());
|
||||
assertTrue(refreshTask.isScheduled());
|
||||
|
@ -174,7 +173,9 @@ public class IndexServiceTests extends ESSingleNodeTestCase {
|
|||
}
|
||||
|
||||
public void testFsyncTaskIsRunning() throws IOException {
|
||||
IndexService indexService = createIndex("test", Settings.builder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC).build());
|
||||
Settings settings = Settings.builder()
|
||||
.put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC).build();
|
||||
IndexService indexService = createIndex("test", settings);
|
||||
IndexService.AsyncTranslogFSync fsyncTask = indexService.getFsyncTask();
|
||||
assertNotNull(fsyncTask);
|
||||
assertEquals(5000, fsyncTask.getInterval().millis());
|
||||
|
@ -198,12 +199,10 @@ public class IndexServiceTests extends ESSingleNodeTestCase {
|
|||
IndexShard shard = indexService.getShard(0);
|
||||
client().prepareIndex("test", "test", "0").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get();
|
||||
// now disable the refresh
|
||||
IndexMetaData metaData = IndexMetaData.builder(indexService.getMetaData())
|
||||
.settings(Settings.builder().put(indexService.getMetaData().getSettings())
|
||||
.put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1)).build();
|
||||
client().admin().indices().prepareUpdateSettings("test")
|
||||
.setSettings(Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1)).get();
|
||||
// when we update we reschedule the existing task AND fire off an async refresh to make sure we make everything visible
|
||||
// before that this is why we need to wait for the refresh task to be unscheduled and the first doc to be visible
|
||||
indexService.updateMetaData(metaData);
|
||||
assertTrue(refreshTask.isClosed());
|
||||
refreshTask = indexService.getRefreshTask();
|
||||
assertBusy(() -> {
|
||||
|
@ -217,10 +216,8 @@ public class IndexServiceTests extends ESSingleNodeTestCase {
|
|||
assertFalse(refreshTask.isClosed());
|
||||
// refresh every millisecond
|
||||
client().prepareIndex("test", "test", "1").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get();
|
||||
metaData = IndexMetaData.builder(indexService.getMetaData())
|
||||
.settings(Settings.builder().put(indexService.getMetaData().getSettings())
|
||||
.put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "1ms")).build();
|
||||
indexService.updateMetaData(metaData);
|
||||
client().admin().indices().prepareUpdateSettings("test")
|
||||
.setSettings(Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "1ms")).get();
|
||||
assertTrue(refreshTask.isClosed());
|
||||
assertBusy(() -> {
|
||||
// this one becomes visible due to the force refresh we are running on updateMetaData if the interval changes
|
||||
|
@ -303,13 +300,11 @@ public class IndexServiceTests extends ESSingleNodeTestCase {
|
|||
assertTrue(indexService.getRefreshTask().mustReschedule());
|
||||
client().prepareIndex("test", "test", "1").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get();
|
||||
client().admin().indices().prepareFlush("test").get();
|
||||
IndexMetaData metaData = IndexMetaData.builder(indexService.getMetaData()).settings(Settings.builder()
|
||||
.put(indexService.getMetaData().getSettings())
|
||||
.put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), -1)
|
||||
.put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), -1))
|
||||
.build();
|
||||
indexService.updateMetaData(metaData);
|
||||
|
||||
client().admin().indices().prepareUpdateSettings("test")
|
||||
.setSettings(Settings.builder()
|
||||
.put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), -1)
|
||||
.put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), -1))
|
||||
.get();
|
||||
IndexShard shard = indexService.getShard(0);
|
||||
assertBusy(() -> assertThat(shard.estimateTranslogOperationsFromMinSeq(0L), equalTo(0)));
|
||||
}
|
||||
|
|
|
@ -122,6 +122,16 @@ public class SearchSlowLogTests extends ESSingleNodeTestCase {
|
|||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String[] indexRoutings() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String preference() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setProfile(boolean profile) {
|
||||
|
||||
|
|
|
@ -3724,15 +3724,13 @@ public class InternalEngineTests extends EngineTestCase {
|
|||
noOpEngine.recoverFromTranslog();
|
||||
final int gapsFilled = noOpEngine.fillSeqNoGaps(primaryTerm.get());
|
||||
final String reason = randomAlphaOfLength(16);
|
||||
noOpEngine.noOp(
|
||||
new Engine.NoOp(
|
||||
maxSeqNo + 1,
|
||||
primaryTerm.get(),
|
||||
randomFrom(PRIMARY, REPLICA, PEER_RECOVERY, LOCAL_TRANSLOG_RECOVERY),
|
||||
System.nanoTime(),
|
||||
reason));
|
||||
noOpEngine.noOp(new Engine.NoOp(maxSeqNo + 1, primaryTerm.get(), LOCAL_TRANSLOG_RECOVERY, System.nanoTime(), reason));
|
||||
assertThat(noOpEngine.getLocalCheckpointTracker().getCheckpoint(), equalTo((long) (maxSeqNo + 1)));
|
||||
assertThat(noOpEngine.getTranslog().stats().getUncommittedOperations(), equalTo(1 + gapsFilled));
|
||||
assertThat(noOpEngine.getTranslog().stats().getUncommittedOperations(), equalTo(gapsFilled));
|
||||
noOpEngine.noOp(
|
||||
new Engine.NoOp(maxSeqNo + 2, primaryTerm.get(), randomFrom(PRIMARY, REPLICA, PEER_RECOVERY), System.nanoTime(), reason));
|
||||
assertThat(noOpEngine.getLocalCheckpointTracker().getCheckpoint(), equalTo((long) (maxSeqNo + 2)));
|
||||
assertThat(noOpEngine.getTranslog().stats().getUncommittedOperations(), equalTo(gapsFilled + 1));
|
||||
// skip to the op that we added to the translog
|
||||
Translog.Operation op;
|
||||
Translog.Operation last = null;
|
||||
|
@ -3744,7 +3742,7 @@ public class InternalEngineTests extends EngineTestCase {
|
|||
assertNotNull(last);
|
||||
assertThat(last, instanceOf(Translog.NoOp.class));
|
||||
final Translog.NoOp noOp = (Translog.NoOp) last;
|
||||
assertThat(noOp.seqNo(), equalTo((long) (maxSeqNo + 1)));
|
||||
assertThat(noOp.seqNo(), equalTo((long) (maxSeqNo + 2)));
|
||||
assertThat(noOp.primaryTerm(), equalTo(primaryTerm.get()));
|
||||
assertThat(noOp.reason(), equalTo(reason));
|
||||
} finally {
|
||||
|
|
|
@ -59,7 +59,9 @@ import java.util.Collections;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
|
@ -85,17 +87,13 @@ public class TermsSetQueryBuilderTests extends AbstractQueryTestCase<TermsSetQue
|
|||
do {
|
||||
fieldName = randomFrom(MAPPED_FIELD_NAMES);
|
||||
} while (fieldName.equals(GEO_POINT_FIELD_NAME) || fieldName.equals(GEO_SHAPE_FIELD_NAME));
|
||||
int numValues = randomIntBetween(0, 10);
|
||||
List<Object> randomTerms = new ArrayList<>(numValues);
|
||||
for (int i = 0; i < numValues; i++) {
|
||||
randomTerms.add(getRandomValueForFieldName(fieldName));
|
||||
}
|
||||
List<?> randomTerms = randomValues(fieldName);
|
||||
TermsSetQueryBuilder queryBuilder = new TermsSetQueryBuilder(STRING_FIELD_NAME, randomTerms);
|
||||
if (randomBoolean()) {
|
||||
queryBuilder.setMinimumShouldMatchField("m_s_m");
|
||||
} else {
|
||||
queryBuilder.setMinimumShouldMatchScript(
|
||||
new Script(ScriptType.INLINE, MockScriptEngine.NAME, "_script", Collections.emptyMap()));
|
||||
new Script(ScriptType.INLINE, MockScriptEngine.NAME, "_script", emptyMap()));
|
||||
}
|
||||
return queryBuilder;
|
||||
}
|
||||
|
@ -122,6 +120,41 @@ public class TermsSetQueryBuilderTests extends AbstractQueryTestCase<TermsSetQue
|
|||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TermsSetQueryBuilder mutateInstance(final TermsSetQueryBuilder instance) throws IOException {
|
||||
String fieldName = instance.getFieldName();
|
||||
List<?> values = instance.getValues();
|
||||
String minimumShouldMatchField = null;
|
||||
Script minimumShouldMatchScript = null;
|
||||
|
||||
switch (randomIntBetween(0, 3)) {
|
||||
case 0:
|
||||
Predicate<String> predicate = s -> s.equals(instance.getFieldName()) == false && s.equals(GEO_POINT_FIELD_NAME) == false
|
||||
&& s.equals(GEO_SHAPE_FIELD_NAME) == false;
|
||||
fieldName = randomValueOtherThanMany(predicate, () -> randomFrom(MAPPED_FIELD_NAMES));
|
||||
values = randomValues(fieldName);
|
||||
break;
|
||||
case 1:
|
||||
values = randomValues(fieldName);
|
||||
break;
|
||||
case 2:
|
||||
minimumShouldMatchField = randomAlphaOfLengthBetween(1, 10);
|
||||
break;
|
||||
case 3:
|
||||
minimumShouldMatchScript = new Script(ScriptType.INLINE, MockScriptEngine.NAME, randomAlphaOfLength(10), emptyMap());
|
||||
break;
|
||||
}
|
||||
|
||||
TermsSetQueryBuilder newInstance = new TermsSetQueryBuilder(fieldName, values);
|
||||
if (minimumShouldMatchField != null) {
|
||||
newInstance.setMinimumShouldMatchField(minimumShouldMatchField);
|
||||
}
|
||||
if (minimumShouldMatchScript != null) {
|
||||
newInstance.setMinimumShouldMatchScript(minimumShouldMatchScript);
|
||||
}
|
||||
return newInstance;
|
||||
}
|
||||
|
||||
public void testBothFieldAndScriptSpecified() {
|
||||
TermsSetQueryBuilder queryBuilder = new TermsSetQueryBuilder("_field", Collections.emptyList());
|
||||
queryBuilder.setMinimumShouldMatchScript(new Script(""));
|
||||
|
@ -215,7 +248,7 @@ public class TermsSetQueryBuilderTests extends AbstractQueryTestCase<TermsSetQue
|
|||
|
||||
try (IndexReader ir = DirectoryReader.open(directory)) {
|
||||
QueryShardContext context = createShardContext();
|
||||
Script script = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "_script", Collections.emptyMap());
|
||||
Script script = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "_script", emptyMap());
|
||||
Query query = new TermsSetQueryBuilder("message", Arrays.asList("a", "b", "c", "d"))
|
||||
.setMinimumShouldMatchScript(script).doToQuery(context);
|
||||
IndexSearcher searcher = new IndexSearcher(ir);
|
||||
|
@ -228,6 +261,16 @@ public class TermsSetQueryBuilderTests extends AbstractQueryTestCase<TermsSetQue
|
|||
}
|
||||
}
|
||||
|
||||
private static List<?> randomValues(final String fieldName) {
|
||||
final int numValues = randomIntBetween(0, 10);
|
||||
final List<Object> values = new ArrayList<>(numValues);
|
||||
|
||||
for (int i = 0; i < numValues; i++) {
|
||||
values.add(getRandomValueForFieldName(fieldName));
|
||||
}
|
||||
return values;
|
||||
}
|
||||
|
||||
public static class CustomScriptPlugin extends MockScriptPlugin {
|
||||
|
||||
@Override
|
||||
|
|
|
@ -1661,6 +1661,16 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
IndexShardTestCase.updateRoutingEntry(newShard, newShard.routingEntry().moveToStarted());
|
||||
assertDocCount(newShard, 1);
|
||||
assertDocCount(shard, 2);
|
||||
|
||||
for (int i = 0; i < 2; i++) {
|
||||
newShard = reinitShard(newShard, ShardRoutingHelper.initWithSameId(primaryShardRouting,
|
||||
RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE));
|
||||
newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null));
|
||||
assertTrue(newShard.recoverFromStore());
|
||||
try (Translog.Snapshot snapshot = getTranslog(newShard).newSnapshot()) {
|
||||
assertThat(snapshot.totalOperations(), equalTo(2));
|
||||
}
|
||||
}
|
||||
closeShards(newShard, shard);
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,59 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.rest.action;
|
||||
|
||||
import org.elasticsearch.client.node.NodeClient;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.action.RestFieldCapabilitiesAction;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.rest.FakeRestRequest;
|
||||
import org.elasticsearch.usage.UsageService;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
||||
public class RestFieldCapabilitiesActionTests extends ESTestCase {
|
||||
|
||||
private RestFieldCapabilitiesAction action;
|
||||
|
||||
@Before
|
||||
public void setUpAction() {
|
||||
action = new RestFieldCapabilitiesAction(Settings.EMPTY, mock(RestController.class));
|
||||
}
|
||||
|
||||
public void testRequestBodyIsDeprecated() throws IOException {
|
||||
String content = "{ \"fields\": [\"title\"] }";
|
||||
RestRequest request = new FakeRestRequest.Builder(xContentRegistry())
|
||||
.withPath("/_field_caps")
|
||||
.withContent(new BytesArray(content), XContentType.JSON)
|
||||
.build();
|
||||
action.prepareRequest(request, mock(NodeClient.class));
|
||||
|
||||
assertWarnings("Specifying a request body is deprecated -- the" +
|
||||
" [fields] request parameter should be used instead.");
|
||||
}
|
||||
}
|
|
@ -170,7 +170,7 @@ public class AliasRoutingIT extends ESIntegTestCase {
|
|||
assertThat(client().prepareSearch("alias1").setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(1L));
|
||||
}
|
||||
|
||||
logger.info("--> search with 0,1 routings , should find two");
|
||||
logger.info("--> search with 0,1 indexRoutings , should find two");
|
||||
for (int i = 0; i < 5; i++) {
|
||||
assertThat(client().prepareSearch().setRouting("0", "1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(2L));
|
||||
assertThat(client().prepareSearch().setSize(0).setRouting("0", "1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(2L));
|
||||
|
|
|
@ -173,13 +173,13 @@ public class SimpleRoutingIT extends ESIntegTestCase {
|
|||
assertThat(client().prepareSearch().setSize(0).setRouting(secondRoutingValue).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(1L));
|
||||
}
|
||||
|
||||
logger.info("--> search with {},{} routings , should find two", routingValue, "1");
|
||||
logger.info("--> search with {},{} indexRoutings , should find two", routingValue, "1");
|
||||
for (int i = 0; i < 5; i++) {
|
||||
assertThat(client().prepareSearch().setRouting(routingValue, secondRoutingValue).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(2L));
|
||||
assertThat(client().prepareSearch().setSize(0).setRouting(routingValue, secondRoutingValue).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(2L));
|
||||
}
|
||||
|
||||
logger.info("--> search with {},{},{} routings , should find two", routingValue, secondRoutingValue, routingValue);
|
||||
logger.info("--> search with {},{},{} indexRoutings , should find two", routingValue, secondRoutingValue, routingValue);
|
||||
for (int i = 0; i < 5; i++) {
|
||||
assertThat(client().prepareSearch().setRouting(routingValue, secondRoutingValue, routingValue).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(2L));
|
||||
assertThat(client().prepareSearch().setSize(0).setRouting(routingValue, secondRoutingValue,routingValue).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(2L));
|
||||
|
|
|
@ -112,8 +112,8 @@ public class DefaultSearchContextTests extends ESTestCase {
|
|||
IndexReader reader = w.getReader();
|
||||
Engine.Searcher searcher = new Engine.Searcher("test", new IndexSearcher(reader))) {
|
||||
|
||||
DefaultSearchContext context1 = new DefaultSearchContext(1L, shardSearchRequest, null, searcher, indexService,
|
||||
indexShard, bigArrays, null, timeout, null, null);
|
||||
DefaultSearchContext context1 = new DefaultSearchContext(1L, shardSearchRequest, null, searcher, null, indexService,
|
||||
indexShard, bigArrays, null, timeout, null, null, Version.CURRENT);
|
||||
context1.from(300);
|
||||
|
||||
// resultWindow greater than maxResultWindow and scrollContext is null
|
||||
|
@ -153,8 +153,8 @@ public class DefaultSearchContextTests extends ESTestCase {
|
|||
+ "] index level setting."));
|
||||
|
||||
// rescore is null but sliceBuilder is not null
|
||||
DefaultSearchContext context2 = new DefaultSearchContext(2L, shardSearchRequest, null, searcher, indexService,
|
||||
indexShard, bigArrays, null, timeout, null, null);
|
||||
DefaultSearchContext context2 = new DefaultSearchContext(2L, shardSearchRequest, null, searcher,
|
||||
null, indexService, indexShard, bigArrays, null, timeout, null, null, Version.CURRENT);
|
||||
|
||||
SliceBuilder sliceBuilder = mock(SliceBuilder.class);
|
||||
int numSlices = maxSlicesPerScroll + randomIntBetween(1, 100);
|
||||
|
@ -170,8 +170,8 @@ public class DefaultSearchContextTests extends ESTestCase {
|
|||
when(shardSearchRequest.getAliasFilter()).thenReturn(AliasFilter.EMPTY);
|
||||
when(shardSearchRequest.indexBoost()).thenReturn(AbstractQueryBuilder.DEFAULT_BOOST);
|
||||
|
||||
DefaultSearchContext context3 = new DefaultSearchContext(3L, shardSearchRequest, null, searcher, indexService,
|
||||
indexShard, bigArrays, null, timeout, null, null);
|
||||
DefaultSearchContext context3 = new DefaultSearchContext(3L, shardSearchRequest, null, searcher, null,
|
||||
indexService, indexShard, bigArrays, null, timeout, null, null, Version.CURRENT);
|
||||
ParsedQuery parsedQuery = ParsedQuery.parsedMatchAllQuery();
|
||||
context3.sliceBuilder(null).parsedQuery(parsedQuery).preProcess(false);
|
||||
assertEquals(context3.query(), context3.buildFilteredQuery(parsedQuery.query()));
|
||||
|
|
|
@ -213,7 +213,7 @@ public class SearchServiceTests extends ESSingleNodeTestCase {
|
|||
SearchPhaseResult searchPhaseResult = service.executeQueryPhase(
|
||||
new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT,
|
||||
new SearchSourceBuilder(), new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f,
|
||||
true),
|
||||
true, null, null),
|
||||
new SearchTask(123L, "", "", "", null, Collections.emptyMap()));
|
||||
IntArrayList intCursors = new IntArrayList(1);
|
||||
intCursors.add(0);
|
||||
|
@ -249,7 +249,7 @@ public class SearchServiceTests extends ESSingleNodeTestCase {
|
|||
new String[0],
|
||||
false,
|
||||
new AliasFilter(null, Strings.EMPTY_ARRAY),
|
||||
1.0f, true)
|
||||
1.0f, true, null, null)
|
||||
);
|
||||
try {
|
||||
// the search context should inherit the default timeout
|
||||
|
@ -269,7 +269,7 @@ public class SearchServiceTests extends ESSingleNodeTestCase {
|
|||
new String[0],
|
||||
false,
|
||||
new AliasFilter(null, Strings.EMPTY_ARRAY),
|
||||
1.0f, true)
|
||||
1.0f, true, null, null)
|
||||
);
|
||||
try {
|
||||
// the search context should inherit the query timeout
|
||||
|
@ -297,12 +297,13 @@ public class SearchServiceTests extends ESSingleNodeTestCase {
|
|||
searchSourceBuilder.docValueField("field" + i);
|
||||
}
|
||||
try (SearchContext context = service.createContext(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT,
|
||||
searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, true))) {
|
||||
searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, true, null, null))) {
|
||||
assertNotNull(context);
|
||||
searchSourceBuilder.docValueField("one_field_too_much");
|
||||
IllegalArgumentException ex = expectThrows(IllegalArgumentException.class,
|
||||
() -> service.createContext(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT,
|
||||
searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, true)));
|
||||
searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f,
|
||||
true, null, null)));
|
||||
assertEquals(
|
||||
"Trying to retrieve too many docvalue_fields. Must be less than or equal to: [100] but was [101]. "
|
||||
+ "This limit can be set by changing the [index.max_docvalue_fields_search] index level setting.",
|
||||
|
@ -328,13 +329,14 @@ public class SearchServiceTests extends ESSingleNodeTestCase {
|
|||
new Script(ScriptType.INLINE, MockScriptEngine.NAME, CustomScriptPlugin.DUMMY_SCRIPT, Collections.emptyMap()));
|
||||
}
|
||||
try (SearchContext context = service.createContext(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT,
|
||||
searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, true))) {
|
||||
searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, true, null, null))) {
|
||||
assertNotNull(context);
|
||||
searchSourceBuilder.scriptField("anotherScriptField",
|
||||
new Script(ScriptType.INLINE, MockScriptEngine.NAME, CustomScriptPlugin.DUMMY_SCRIPT, Collections.emptyMap()));
|
||||
IllegalArgumentException ex = expectThrows(IllegalArgumentException.class,
|
||||
() -> service.createContext(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT,
|
||||
searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, true)));
|
||||
searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY),
|
||||
1.0f, true, null, null)));
|
||||
assertEquals(
|
||||
"Trying to retrieve too many script_fields. Must be less than or equal to: [" + maxScriptFields + "] but was ["
|
||||
+ (maxScriptFields + 1)
|
||||
|
@ -406,28 +408,28 @@ public class SearchServiceTests extends ESSingleNodeTestCase {
|
|||
final IndexShard indexShard = indexService.getShard(0);
|
||||
final boolean allowPartialSearchResults = true;
|
||||
assertTrue(service.canMatch(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.QUERY_THEN_FETCH, null,
|
||||
Strings.EMPTY_ARRAY, false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults)));
|
||||
Strings.EMPTY_ARRAY, false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults, null, null)));
|
||||
|
||||
assertTrue(service.canMatch(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.QUERY_THEN_FETCH,
|
||||
new SearchSourceBuilder(), Strings.EMPTY_ARRAY, false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1f,
|
||||
allowPartialSearchResults)));
|
||||
new SearchSourceBuilder(), Strings.EMPTY_ARRAY, false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1f,
|
||||
allowPartialSearchResults, null, null)));
|
||||
|
||||
assertTrue(service.canMatch(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.QUERY_THEN_FETCH,
|
||||
new SearchSourceBuilder().query(new MatchAllQueryBuilder()), Strings.EMPTY_ARRAY, false,
|
||||
new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults)));
|
||||
new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults, null, null)));
|
||||
|
||||
assertTrue(service.canMatch(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.QUERY_THEN_FETCH,
|
||||
new SearchSourceBuilder().query(new MatchNoneQueryBuilder())
|
||||
.aggregation(new TermsAggregationBuilder("test", ValueType.STRING).minDocCount(0)), Strings.EMPTY_ARRAY, false,
|
||||
new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults)));
|
||||
new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults, null, null)));
|
||||
assertTrue(service.canMatch(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.QUERY_THEN_FETCH,
|
||||
new SearchSourceBuilder().query(new MatchNoneQueryBuilder())
|
||||
.aggregation(new GlobalAggregationBuilder("test")), Strings.EMPTY_ARRAY, false,
|
||||
new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults)));
|
||||
new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults, null, null)));
|
||||
|
||||
assertFalse(service.canMatch(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.QUERY_THEN_FETCH,
|
||||
new SearchSourceBuilder().query(new MatchNoneQueryBuilder()), Strings.EMPTY_ARRAY, false,
|
||||
new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults)));
|
||||
new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults, null, null)));
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -74,6 +74,8 @@ public class ShardSearchTransportRequestTests extends AbstractSearchTestCase {
|
|||
assertEquals(deserializedRequest.searchType(), shardSearchTransportRequest.searchType());
|
||||
assertEquals(deserializedRequest.shardId(), shardSearchTransportRequest.shardId());
|
||||
assertEquals(deserializedRequest.numberOfShards(), shardSearchTransportRequest.numberOfShards());
|
||||
assertEquals(deserializedRequest.indexRoutings(), shardSearchTransportRequest.indexRoutings());
|
||||
assertEquals(deserializedRequest.preference(), shardSearchTransportRequest.preference());
|
||||
assertEquals(deserializedRequest.cacheKey(), shardSearchTransportRequest.cacheKey());
|
||||
assertNotSame(deserializedRequest, shardSearchTransportRequest);
|
||||
assertEquals(deserializedRequest.getAliasFilter(), shardSearchTransportRequest.getAliasFilter());
|
||||
|
@ -92,8 +94,10 @@ public class ShardSearchTransportRequestTests extends AbstractSearchTestCase {
|
|||
} else {
|
||||
filteringAliases = new AliasFilter(null, Strings.EMPTY_ARRAY);
|
||||
}
|
||||
final String[] routings = generateRandomStringArray(5, 10, false, true);
|
||||
return new ShardSearchTransportRequest(new OriginalIndices(searchRequest), searchRequest, shardId,
|
||||
randomIntBetween(1, 100), filteringAliases, randomBoolean() ? 1.0f : randomFloat(), Math.abs(randomLong()), null);
|
||||
randomIntBetween(1, 100), filteringAliases, randomBoolean() ? 1.0f : randomFloat(),
|
||||
Math.abs(randomLong()), null, routings);
|
||||
}
|
||||
|
||||
public void testFilteringAliases() throws Exception {
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.search.slice;
|
||||
|
||||
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.action.search.SearchPhaseExecutionException;
|
||||
import org.elasticsearch.action.search.SearchRequestBuilder;
|
||||
|
@ -48,9 +49,7 @@ import static org.hamcrest.Matchers.equalTo;
|
|||
import static org.hamcrest.Matchers.startsWith;
|
||||
|
||||
public class SearchSliceIT extends ESIntegTestCase {
|
||||
private static final int NUM_DOCS = 1000;
|
||||
|
||||
private int setupIndex(boolean withDocs) throws IOException, ExecutionException, InterruptedException {
|
||||
private void setupIndex(int numDocs, int numberOfShards) throws IOException, ExecutionException, InterruptedException {
|
||||
String mapping = Strings.toString(XContentFactory.jsonBuilder().
|
||||
startObject()
|
||||
.startObject("type")
|
||||
|
@ -70,74 +69,112 @@ public class SearchSliceIT extends ESIntegTestCase {
|
|||
.endObject()
|
||||
.endObject()
|
||||
.endObject());
|
||||
int numberOfShards = randomIntBetween(1, 7);
|
||||
assertAcked(client().admin().indices().prepareCreate("test")
|
||||
.setSettings(Settings.builder().put("number_of_shards", numberOfShards).put("index.max_slices_per_scroll", 10000))
|
||||
.addMapping("type", mapping, XContentType.JSON));
|
||||
ensureGreen();
|
||||
|
||||
if (withDocs == false) {
|
||||
return numberOfShards;
|
||||
}
|
||||
|
||||
List<IndexRequestBuilder> requests = new ArrayList<>();
|
||||
for (int i = 0; i < NUM_DOCS; i++) {
|
||||
XContentBuilder builder = jsonBuilder();
|
||||
builder.startObject();
|
||||
builder.field("invalid_random_kw", randomAlphaOfLengthBetween(5, 20));
|
||||
builder.field("random_int", randomInt());
|
||||
builder.field("static_int", 0);
|
||||
builder.field("invalid_random_int", randomInt());
|
||||
builder.endObject();
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
XContentBuilder builder = jsonBuilder()
|
||||
.startObject()
|
||||
.field("invalid_random_kw", randomAlphaOfLengthBetween(5, 20))
|
||||
.field("random_int", randomInt())
|
||||
.field("static_int", 0)
|
||||
.field("invalid_random_int", randomInt())
|
||||
.endObject();
|
||||
requests.add(client().prepareIndex("test", "type").setSource(builder));
|
||||
}
|
||||
indexRandom(true, requests);
|
||||
return numberOfShards;
|
||||
}
|
||||
|
||||
public void testDocIdSort() throws Exception {
|
||||
int numShards = setupIndex(true);
|
||||
SearchResponse sr = client().prepareSearch("test")
|
||||
.setQuery(matchAllQuery())
|
||||
.setSize(0)
|
||||
.get();
|
||||
int numDocs = (int) sr.getHits().getTotalHits();
|
||||
assertThat(numDocs, equalTo(NUM_DOCS));
|
||||
int max = randomIntBetween(2, numShards*3);
|
||||
public void testSearchSort() throws Exception {
|
||||
int numShards = randomIntBetween(1, 7);
|
||||
int numDocs = randomIntBetween(100, 1000);
|
||||
setupIndex(numDocs, numShards);
|
||||
int max = randomIntBetween(2, numShards * 3);
|
||||
for (String field : new String[]{"_id", "random_int", "static_int"}) {
|
||||
int fetchSize = randomIntBetween(10, 100);
|
||||
// test _doc sort
|
||||
SearchRequestBuilder request = client().prepareSearch("test")
|
||||
.setQuery(matchAllQuery())
|
||||
.setScroll(new Scroll(TimeValue.timeValueSeconds(10)))
|
||||
.setSize(fetchSize)
|
||||
.addSort(SortBuilders.fieldSort("_doc"));
|
||||
assertSearchSlicesWithScroll(request, field, max);
|
||||
}
|
||||
}
|
||||
assertSearchSlicesWithScroll(request, field, max, numDocs);
|
||||
|
||||
public void testNumericSort() throws Exception {
|
||||
int numShards = setupIndex(true);
|
||||
SearchResponse sr = client().prepareSearch("test")
|
||||
.setQuery(matchAllQuery())
|
||||
.setSize(0)
|
||||
.get();
|
||||
int numDocs = (int) sr.getHits().getTotalHits();
|
||||
assertThat(numDocs, equalTo(NUM_DOCS));
|
||||
|
||||
int max = randomIntBetween(2, numShards*3);
|
||||
for (String field : new String[]{"_id", "random_int", "static_int"}) {
|
||||
int fetchSize = randomIntBetween(10, 100);
|
||||
SearchRequestBuilder request = client().prepareSearch("test")
|
||||
// test numeric sort
|
||||
request = client().prepareSearch("test")
|
||||
.setQuery(matchAllQuery())
|
||||
.setScroll(new Scroll(TimeValue.timeValueSeconds(10)))
|
||||
.addSort(SortBuilders.fieldSort("random_int"))
|
||||
.setSize(fetchSize);
|
||||
assertSearchSlicesWithScroll(request, field, max);
|
||||
assertSearchSlicesWithScroll(request, field, max, numDocs);
|
||||
}
|
||||
}
|
||||
|
||||
public void testWithPreferenceAndRoutings() throws Exception {
|
||||
int numShards = 10;
|
||||
int totalDocs = randomIntBetween(100, 1000);
|
||||
setupIndex(totalDocs, numShards);
|
||||
{
|
||||
SearchResponse sr = client().prepareSearch("test")
|
||||
.setQuery(matchAllQuery())
|
||||
.setPreference("_shards:1,4")
|
||||
.setSize(0)
|
||||
.get();
|
||||
int numDocs = (int) sr.getHits().getTotalHits();
|
||||
int max = randomIntBetween(2, numShards * 3);
|
||||
int fetchSize = randomIntBetween(10, 100);
|
||||
SearchRequestBuilder request = client().prepareSearch("test")
|
||||
.setQuery(matchAllQuery())
|
||||
.setScroll(new Scroll(TimeValue.timeValueSeconds(10)))
|
||||
.setSize(fetchSize)
|
||||
.setPreference("_shards:1,4")
|
||||
.addSort(SortBuilders.fieldSort("_doc"));
|
||||
assertSearchSlicesWithScroll(request, "_id", max, numDocs);
|
||||
}
|
||||
{
|
||||
SearchResponse sr = client().prepareSearch("test")
|
||||
.setQuery(matchAllQuery())
|
||||
.setRouting("foo", "bar")
|
||||
.setSize(0)
|
||||
.get();
|
||||
int numDocs = (int) sr.getHits().getTotalHits();
|
||||
int max = randomIntBetween(2, numShards * 3);
|
||||
int fetchSize = randomIntBetween(10, 100);
|
||||
SearchRequestBuilder request = client().prepareSearch("test")
|
||||
.setQuery(matchAllQuery())
|
||||
.setScroll(new Scroll(TimeValue.timeValueSeconds(10)))
|
||||
.setSize(fetchSize)
|
||||
.setRouting("foo", "bar")
|
||||
.addSort(SortBuilders.fieldSort("_doc"));
|
||||
assertSearchSlicesWithScroll(request, "_id", max, numDocs);
|
||||
}
|
||||
{
|
||||
assertAcked(client().admin().indices().prepareAliases()
|
||||
.addAliasAction(IndicesAliasesRequest.AliasActions.add().index("test").alias("alias1").routing("foo"))
|
||||
.addAliasAction(IndicesAliasesRequest.AliasActions.add().index("test").alias("alias2").routing("bar"))
|
||||
.addAliasAction(IndicesAliasesRequest.AliasActions.add().index("test").alias("alias3").routing("baz"))
|
||||
.get());
|
||||
SearchResponse sr = client().prepareSearch("alias1", "alias3")
|
||||
.setQuery(matchAllQuery())
|
||||
.setSize(0)
|
||||
.get();
|
||||
int numDocs = (int) sr.getHits().getTotalHits();
|
||||
int max = randomIntBetween(2, numShards * 3);
|
||||
int fetchSize = randomIntBetween(10, 100);
|
||||
SearchRequestBuilder request = client().prepareSearch("alias1", "alias3")
|
||||
.setQuery(matchAllQuery())
|
||||
.setScroll(new Scroll(TimeValue.timeValueSeconds(10)))
|
||||
.setSize(fetchSize)
|
||||
.addSort(SortBuilders.fieldSort("_doc"));
|
||||
assertSearchSlicesWithScroll(request, "_id", max, numDocs);
|
||||
}
|
||||
}
|
||||
|
||||
public void testInvalidFields() throws Exception {
|
||||
setupIndex(false);
|
||||
setupIndex(0, 1);
|
||||
SearchPhaseExecutionException exc = expectThrows(SearchPhaseExecutionException.class,
|
||||
() -> client().prepareSearch("test")
|
||||
.setQuery(matchAllQuery())
|
||||
|
@ -161,7 +198,7 @@ public class SearchSliceIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
public void testInvalidQuery() throws Exception {
|
||||
setupIndex(false);
|
||||
setupIndex(0, 1);
|
||||
SearchPhaseExecutionException exc = expectThrows(SearchPhaseExecutionException.class,
|
||||
() -> client().prepareSearch()
|
||||
.setQuery(matchAllQuery())
|
||||
|
@ -173,7 +210,7 @@ public class SearchSliceIT extends ESIntegTestCase {
|
|||
equalTo("`slice` cannot be used outside of a scroll context"));
|
||||
}
|
||||
|
||||
private void assertSearchSlicesWithScroll(SearchRequestBuilder request, String field, int numSlice) {
|
||||
private void assertSearchSlicesWithScroll(SearchRequestBuilder request, String field, int numSlice, int numDocs) {
|
||||
int totalResults = 0;
|
||||
List<String> keys = new ArrayList<>();
|
||||
for (int id = 0; id < numSlice; id++) {
|
||||
|
@ -184,7 +221,7 @@ public class SearchSliceIT extends ESIntegTestCase {
|
|||
int numSliceResults = searchResponse.getHits().getHits().length;
|
||||
String scrollId = searchResponse.getScrollId();
|
||||
for (SearchHit hit : searchResponse.getHits().getHits()) {
|
||||
keys.add(hit.getId());
|
||||
assertTrue(keys.add(hit.getId()));
|
||||
}
|
||||
while (searchResponse.getHits().getHits().length > 0) {
|
||||
searchResponse = client().prepareSearchScroll("test")
|
||||
|
@ -195,15 +232,15 @@ public class SearchSliceIT extends ESIntegTestCase {
|
|||
totalResults += searchResponse.getHits().getHits().length;
|
||||
numSliceResults += searchResponse.getHits().getHits().length;
|
||||
for (SearchHit hit : searchResponse.getHits().getHits()) {
|
||||
keys.add(hit.getId());
|
||||
assertTrue(keys.add(hit.getId()));
|
||||
}
|
||||
}
|
||||
assertThat(numSliceResults, equalTo(expectedSliceResults));
|
||||
clearScroll(scrollId);
|
||||
}
|
||||
assertThat(totalResults, equalTo(NUM_DOCS));
|
||||
assertThat(keys.size(), equalTo(NUM_DOCS));
|
||||
assertThat(new HashSet(keys).size(), equalTo(NUM_DOCS));
|
||||
assertThat(totalResults, equalTo(numDocs));
|
||||
assertThat(keys.size(), equalTo(numDocs));
|
||||
assertThat(new HashSet(keys).size(), equalTo(numDocs));
|
||||
}
|
||||
|
||||
private Throwable findRootCause(Exception e) {
|
||||
|
|
|
@ -30,19 +30,38 @@ import org.apache.lucene.search.Query;
|
|||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.search.SearchShardIterator;
|
||||
import org.elasticsearch.action.search.SearchType;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
import org.elasticsearch.cluster.routing.OperationRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.fielddata.IndexNumericFieldData;
|
||||
import org.elasticsearch.index.mapper.IdFieldMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.query.Rewriteable;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.search.Scroll;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.search.internal.AliasFilter;
|
||||
import org.elasticsearch.search.internal.ShardSearchRequest;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -58,13 +77,138 @@ import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashC
|
|||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
public class SliceBuilderTests extends ESTestCase {
|
||||
private static final int MAX_SLICE = 20;
|
||||
|
||||
private static SliceBuilder randomSliceBuilder() throws IOException {
|
||||
static class ShardSearchRequestTest implements IndicesRequest, ShardSearchRequest {
|
||||
private final String[] indices;
|
||||
private final int shardId;
|
||||
private final String[] indexRoutings;
|
||||
private final String preference;
|
||||
|
||||
ShardSearchRequestTest(String index, int shardId, String[] indexRoutings, String preference) {
|
||||
this.indices = new String[] { index };
|
||||
this.shardId = shardId;
|
||||
this.indexRoutings = indexRoutings;
|
||||
this.preference = preference;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String[] indices() {
|
||||
return indices;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndicesOptions indicesOptions() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ShardId shardId() {
|
||||
return new ShardId(new Index(indices[0], indices[0]), shardId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String[] types() {
|
||||
return new String[0];
|
||||
}
|
||||
|
||||
@Override
|
||||
public SearchSourceBuilder source() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public AliasFilter getAliasFilter() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setAliasFilter(AliasFilter filter) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void source(SearchSourceBuilder source) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public int numberOfShards() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SearchType searchType() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public float indexBoost() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long nowInMillis() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Boolean requestCache() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Boolean allowPartialSearchResults() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Scroll scroll() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String[] indexRoutings() {
|
||||
return indexRoutings;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String preference() {
|
||||
return preference;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setProfile(boolean profile) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isProfile() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BytesReference cacheKey() throws IOException {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getClusterAlias() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Rewriteable<Rewriteable> getRewriteable() {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private static SliceBuilder randomSliceBuilder() {
|
||||
int max = randomIntBetween(2, MAX_SLICE);
|
||||
int id = randomIntBetween(1, max - 1);
|
||||
String field = randomAlphaOfLengthBetween(5, 20);
|
||||
|
@ -75,7 +219,7 @@ public class SliceBuilderTests extends ESTestCase {
|
|||
return copyWriteable(original, new NamedWriteableRegistry(Collections.emptyList()), SliceBuilder::new);
|
||||
}
|
||||
|
||||
private static SliceBuilder mutate(SliceBuilder original) throws IOException {
|
||||
private static SliceBuilder mutate(SliceBuilder original) {
|
||||
switch (randomIntBetween(0, 2)) {
|
||||
case 0: return new SliceBuilder(original.getField() + "_xyz", original.getId(), original.getMax());
|
||||
case 1: return new SliceBuilder(original.getField(), original.getId() - 1, original.getMax());
|
||||
|
@ -84,6 +228,63 @@ public class SliceBuilderTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private IndexSettings createIndexSettings(Version indexVersionCreated, int numShards) {
|
||||
Settings settings = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, indexVersionCreated)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numShards)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
.build();
|
||||
IndexMetaData indexState = IndexMetaData.builder("index").settings(settings).build();
|
||||
return new IndexSettings(indexState, Settings.EMPTY);
|
||||
}
|
||||
|
||||
private ShardSearchRequest createRequest(int shardId) {
|
||||
return createRequest(shardId, Strings.EMPTY_ARRAY, null);
|
||||
}
|
||||
|
||||
private ShardSearchRequest createRequest(int shardId, String[] routings, String preference) {
|
||||
return new ShardSearchRequestTest("index", shardId, routings, preference);
|
||||
}
|
||||
|
||||
private QueryShardContext createShardContext(Version indexVersionCreated, IndexReader reader,
|
||||
String fieldName, DocValuesType dvType, int numShards, int shardId) {
|
||||
MappedFieldType fieldType = new MappedFieldType() {
|
||||
@Override
|
||||
public MappedFieldType clone() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String typeName() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query termQuery(Object value, @Nullable QueryShardContext context) {
|
||||
return null;
|
||||
}
|
||||
|
||||
public Query existsQuery(QueryShardContext context) {
|
||||
return null;
|
||||
}
|
||||
};
|
||||
fieldType.setName(fieldName);
|
||||
QueryShardContext context = mock(QueryShardContext.class);
|
||||
when(context.fieldMapper(fieldName)).thenReturn(fieldType);
|
||||
when(context.getIndexReader()).thenReturn(reader);
|
||||
when(context.getShardId()).thenReturn(shardId);
|
||||
IndexSettings indexSettings = createIndexSettings(indexVersionCreated, numShards);
|
||||
when(context.getIndexSettings()).thenReturn(indexSettings);
|
||||
if (dvType != null) {
|
||||
fieldType.setHasDocValues(true);
|
||||
fieldType.setDocValuesType(dvType);
|
||||
IndexNumericFieldData fd = mock(IndexNumericFieldData.class);
|
||||
when(context.getForField(fieldType)).thenReturn(fd);
|
||||
}
|
||||
return context;
|
||||
|
||||
}
|
||||
|
||||
public void testSerialization() throws Exception {
|
||||
SliceBuilder original = randomSliceBuilder();
|
||||
SliceBuilder deserialized = serializedCopy(original);
|
||||
|
@ -131,92 +332,41 @@ public class SliceBuilderTests extends ESTestCase {
|
|||
assertEquals("max must be greater than id", e.getMessage());
|
||||
}
|
||||
|
||||
public void testToFilter() throws IOException {
|
||||
public void testToFilterSimple() throws IOException {
|
||||
Directory dir = new RAMDirectory();
|
||||
try (IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())))) {
|
||||
writer.commit();
|
||||
}
|
||||
QueryShardContext context = mock(QueryShardContext.class);
|
||||
try (IndexReader reader = DirectoryReader.open(dir)) {
|
||||
MappedFieldType fieldType = new MappedFieldType() {
|
||||
@Override
|
||||
public MappedFieldType clone() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String typeName() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query termQuery(Object value, @Nullable QueryShardContext context) {
|
||||
return null;
|
||||
}
|
||||
|
||||
public Query existsQuery(QueryShardContext context) {
|
||||
return null;
|
||||
}
|
||||
};
|
||||
fieldType.setName(IdFieldMapper.NAME);
|
||||
fieldType.setHasDocValues(false);
|
||||
when(context.fieldMapper(IdFieldMapper.NAME)).thenReturn(fieldType);
|
||||
when(context.getIndexReader()).thenReturn(reader);
|
||||
Settings settings = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 2)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
.build();
|
||||
IndexMetaData indexState = IndexMetaData.builder("index").settings(settings).build();
|
||||
IndexSettings indexSettings = new IndexSettings(indexState, Settings.EMPTY);
|
||||
when(context.getIndexSettings()).thenReturn(indexSettings);
|
||||
QueryShardContext context =
|
||||
createShardContext(Version.CURRENT, reader, "_id", DocValuesType.SORTED_NUMERIC, 1,0);
|
||||
SliceBuilder builder = new SliceBuilder(5, 10);
|
||||
Query query = builder.toFilter(context, 0, 1);
|
||||
Query query = builder.toFilter(null, createRequest(0), context, Version.CURRENT);
|
||||
assertThat(query, instanceOf(TermsSliceQuery.class));
|
||||
|
||||
assertThat(builder.toFilter(context, 0, 1), equalTo(query));
|
||||
assertThat(builder.toFilter(null, createRequest(0), context, Version.CURRENT), equalTo(query));
|
||||
try (IndexReader newReader = DirectoryReader.open(dir)) {
|
||||
when(context.getIndexReader()).thenReturn(newReader);
|
||||
assertThat(builder.toFilter(context, 0, 1), equalTo(query));
|
||||
assertThat(builder.toFilter(null, createRequest(0), context, Version.CURRENT), equalTo(query));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testToFilterRandom() throws IOException {
|
||||
Directory dir = new RAMDirectory();
|
||||
try (IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())))) {
|
||||
writer.commit();
|
||||
}
|
||||
try (IndexReader reader = DirectoryReader.open(dir)) {
|
||||
MappedFieldType fieldType = new MappedFieldType() {
|
||||
@Override
|
||||
public MappedFieldType clone() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String typeName() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query termQuery(Object value, @Nullable QueryShardContext context) {
|
||||
return null;
|
||||
}
|
||||
|
||||
public Query existsQuery(QueryShardContext context) {
|
||||
return null;
|
||||
}
|
||||
};
|
||||
fieldType.setName("field_doc_values");
|
||||
fieldType.setHasDocValues(true);
|
||||
fieldType.setDocValuesType(DocValuesType.SORTED_NUMERIC);
|
||||
when(context.fieldMapper("field_doc_values")).thenReturn(fieldType);
|
||||
when(context.getIndexReader()).thenReturn(reader);
|
||||
IndexNumericFieldData fd = mock(IndexNumericFieldData.class);
|
||||
when(context.getForField(fieldType)).thenReturn(fd);
|
||||
SliceBuilder builder = new SliceBuilder("field_doc_values", 5, 10);
|
||||
Query query = builder.toFilter(context, 0, 1);
|
||||
QueryShardContext context =
|
||||
createShardContext(Version.CURRENT, reader, "field", DocValuesType.SORTED_NUMERIC, 1,0);
|
||||
SliceBuilder builder = new SliceBuilder("field", 5, 10);
|
||||
Query query = builder.toFilter(null, createRequest(0), context, Version.CURRENT);
|
||||
assertThat(query, instanceOf(DocValuesSliceQuery.class));
|
||||
|
||||
assertThat(builder.toFilter(context, 0, 1), equalTo(query));
|
||||
assertThat(builder.toFilter(null, createRequest(0), context, Version.CURRENT), equalTo(query));
|
||||
try (IndexReader newReader = DirectoryReader.open(dir)) {
|
||||
when(context.getIndexReader()).thenReturn(newReader);
|
||||
assertThat(builder.toFilter(context, 0, 1), equalTo(query));
|
||||
assertThat(builder.toFilter(null, createRequest(0), context, Version.CURRENT), equalTo(query));
|
||||
}
|
||||
|
||||
// numSlices > numShards
|
||||
|
@ -226,7 +376,8 @@ public class SliceBuilderTests extends ESTestCase {
|
|||
for (int i = 0; i < numSlices; i++) {
|
||||
for (int j = 0; j < numShards; j++) {
|
||||
SliceBuilder slice = new SliceBuilder("_id", i, numSlices);
|
||||
Query q = slice.toFilter(context, j, numShards);
|
||||
context = createShardContext(Version.CURRENT, reader, "_id", DocValuesType.SORTED, numShards, j);
|
||||
Query q = slice.toFilter(null, createRequest(j), context, Version.CURRENT);
|
||||
if (q instanceof TermsSliceQuery || q instanceof MatchAllDocsQuery) {
|
||||
AtomicInteger count = numSliceMap.get(j);
|
||||
if (count == null) {
|
||||
|
@ -250,12 +401,13 @@ public class SliceBuilderTests extends ESTestCase {
|
|||
|
||||
// numShards > numSlices
|
||||
numShards = randomIntBetween(4, 100);
|
||||
numSlices = randomIntBetween(2, numShards-1);
|
||||
numSlices = randomIntBetween(2, numShards - 1);
|
||||
List<Integer> targetShards = new ArrayList<>();
|
||||
for (int i = 0; i < numSlices; i++) {
|
||||
for (int j = 0; j < numShards; j++) {
|
||||
SliceBuilder slice = new SliceBuilder("_id", i, numSlices);
|
||||
Query q = slice.toFilter(context, j, numShards);
|
||||
context = createShardContext(Version.CURRENT, reader, "_id", DocValuesType.SORTED, numShards, j);
|
||||
Query q = slice.toFilter(null, createRequest(j), context, Version.CURRENT);
|
||||
if (q instanceof MatchNoDocsQuery == false) {
|
||||
assertThat(q, instanceOf(MatchAllDocsQuery.class));
|
||||
targetShards.add(j);
|
||||
|
@ -271,7 +423,8 @@ public class SliceBuilderTests extends ESTestCase {
|
|||
for (int i = 0; i < numSlices; i++) {
|
||||
for (int j = 0; j < numShards; j++) {
|
||||
SliceBuilder slice = new SliceBuilder("_id", i, numSlices);
|
||||
Query q = slice.toFilter(context, j, numShards);
|
||||
context = createShardContext(Version.CURRENT, reader, "_id", DocValuesType.SORTED, numShards, j);
|
||||
Query q = slice.toFilter(null, createRequest(j), context, Version.CURRENT);
|
||||
if (i == j) {
|
||||
assertThat(q, instanceOf(MatchAllDocsQuery.class));
|
||||
} else {
|
||||
|
@ -280,85 +433,35 @@ public class SliceBuilderTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testInvalidField() throws IOException {
|
||||
Directory dir = new RAMDirectory();
|
||||
try (IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())))) {
|
||||
writer.commit();
|
||||
}
|
||||
try (IndexReader reader = DirectoryReader.open(dir)) {
|
||||
MappedFieldType fieldType = new MappedFieldType() {
|
||||
@Override
|
||||
public MappedFieldType clone() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String typeName() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query termQuery(Object value, @Nullable QueryShardContext context) {
|
||||
return null;
|
||||
}
|
||||
|
||||
public Query existsQuery(QueryShardContext context) {
|
||||
return null;
|
||||
}
|
||||
};
|
||||
fieldType.setName("field_without_doc_values");
|
||||
when(context.fieldMapper("field_without_doc_values")).thenReturn(fieldType);
|
||||
when(context.getIndexReader()).thenReturn(reader);
|
||||
SliceBuilder builder = new SliceBuilder("field_without_doc_values", 5, 10);
|
||||
IllegalArgumentException exc =
|
||||
expectThrows(IllegalArgumentException.class, () -> builder.toFilter(context, 0, 1));
|
||||
QueryShardContext context = createShardContext(Version.CURRENT, reader, "field", null, 1,0);
|
||||
SliceBuilder builder = new SliceBuilder("field", 5, 10);
|
||||
IllegalArgumentException exc = expectThrows(IllegalArgumentException.class,
|
||||
() -> builder.toFilter(null, createRequest(0), context, Version.CURRENT));
|
||||
assertThat(exc.getMessage(), containsString("cannot load numeric doc values"));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public void testToFilterDeprecationMessage() throws IOException {
|
||||
Directory dir = new RAMDirectory();
|
||||
try (IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())))) {
|
||||
writer.commit();
|
||||
}
|
||||
QueryShardContext context = mock(QueryShardContext.class);
|
||||
try (IndexReader reader = DirectoryReader.open(dir)) {
|
||||
MappedFieldType fieldType = new MappedFieldType() {
|
||||
@Override
|
||||
public MappedFieldType clone() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String typeName() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query termQuery(Object value, @Nullable QueryShardContext context) {
|
||||
return null;
|
||||
}
|
||||
|
||||
public Query existsQuery(QueryShardContext context) {
|
||||
return null;
|
||||
}
|
||||
};
|
||||
fieldType.setName("_uid");
|
||||
fieldType.setHasDocValues(false);
|
||||
when(context.fieldMapper("_uid")).thenReturn(fieldType);
|
||||
when(context.getIndexReader()).thenReturn(reader);
|
||||
Settings settings = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 2)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
.build();
|
||||
IndexMetaData indexState = IndexMetaData.builder("index").settings(settings).build();
|
||||
IndexSettings indexSettings = new IndexSettings(indexState, Settings.EMPTY);
|
||||
when(context.getIndexSettings()).thenReturn(indexSettings);
|
||||
QueryShardContext context = createShardContext(Version.V_6_3_0, reader, "_uid", null, 1,0);
|
||||
SliceBuilder builder = new SliceBuilder("_uid", 5, 10);
|
||||
Query query = builder.toFilter(context, 0, 1);
|
||||
Query query = builder.toFilter(null, createRequest(0), context, Version.CURRENT);
|
||||
assertThat(query, instanceOf(TermsSliceQuery.class));
|
||||
assertThat(builder.toFilter(context, 0, 1), equalTo(query));
|
||||
assertThat(builder.toFilter(null, createRequest(0), context, Version.CURRENT), equalTo(query));
|
||||
assertWarnings("Computing slices on the [_uid] field is deprecated for 6.x indices, use [_id] instead");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public void testSerializationBackcompat() throws IOException {
|
||||
|
@ -375,4 +478,35 @@ public class SliceBuilderTests extends ESTestCase {
|
|||
SliceBuilder::new, Version.V_6_3_0);
|
||||
assertEquals(sliceBuilder, copy63);
|
||||
}
|
||||
|
||||
public void testToFilterWithRouting() throws IOException {
|
||||
Directory dir = new RAMDirectory();
|
||||
try (IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())))) {
|
||||
writer.commit();
|
||||
}
|
||||
ClusterService clusterService = mock(ClusterService.class);
|
||||
ClusterState state = mock(ClusterState.class);
|
||||
when(state.metaData()).thenReturn(MetaData.EMPTY_META_DATA);
|
||||
when(clusterService.state()).thenReturn(state);
|
||||
OperationRouting routing = mock(OperationRouting.class);
|
||||
GroupShardsIterator<ShardIterator> it = new GroupShardsIterator<>(
|
||||
Collections.singletonList(
|
||||
new SearchShardIterator(null, new ShardId("index", "index", 1), null, null)
|
||||
)
|
||||
);
|
||||
when(routing.searchShards(any(), any(), any(), any())).thenReturn(it);
|
||||
when(clusterService.operationRouting()).thenReturn(routing);
|
||||
when(clusterService.getSettings()).thenReturn(Settings.EMPTY);
|
||||
try (IndexReader reader = DirectoryReader.open(dir)) {
|
||||
QueryShardContext context = createShardContext(Version.CURRENT, reader, "field", DocValuesType.SORTED, 5, 0);
|
||||
SliceBuilder builder = new SliceBuilder("field", 6, 10);
|
||||
String[] routings = new String[] { "foo" };
|
||||
Query query = builder.toFilter(clusterService, createRequest(1, routings, null), context, Version.CURRENT);
|
||||
assertEquals(new DocValuesSliceQuery("field", 6, 10), query);
|
||||
query = builder.toFilter(clusterService, createRequest(1, Strings.EMPTY_ARRAY, "foo"), context, Version.CURRENT);
|
||||
assertEquals(new DocValuesSliceQuery("field", 6, 10), query);
|
||||
query = builder.toFilter(clusterService, createRequest(1, Strings.EMPTY_ARRAY, "foo"), context, Version.V_6_2_0);
|
||||
assertEquals(new DocValuesSliceQuery("field", 1, 2), query);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,10 +23,8 @@ subprojects {
|
|||
ext.licenseName = 'Elastic License'
|
||||
ext.licenseUrl = "https://raw.githubusercontent.com/elastic/elasticsearch/${licenseCommit}/licenses/ELASTIC-LICENSE.txt"
|
||||
|
||||
plugins.withType(BuildPlugin).whenPluginAdded {
|
||||
project.licenseFile = rootProject.file('licenses/ELASTIC-LICENSE.txt')
|
||||
project.noticeFile = xpackRootProject.file('NOTICE.txt')
|
||||
}
|
||||
project.ext.licenseFile = rootProject.file('licenses/ELASTIC-LICENSE.txt')
|
||||
project.ext.noticeFile = xpackRootProject.file('NOTICE.txt')
|
||||
|
||||
plugins.withType(PluginBuildPlugin).whenPluginAdded {
|
||||
project.esplugin.licenseFile = rootProject.file('licenses/ELASTIC-LICENSE.txt')
|
||||
|
|
|
@ -3,8 +3,7 @@
|
|||
=== Install {es} with Docker
|
||||
|
||||
{es} is also available as Docker images.
|
||||
The images use https://hub.docker.com/_/centos/[centos:7] as the base image and
|
||||
are available with {xpack-ref}/xpack-introduction.html[X-Pack].
|
||||
The images use https://hub.docker.com/_/centos/[centos:7] as the base image.
|
||||
|
||||
A list of all published Docker images and tags can be found in
|
||||
https://www.docker.elastic.co[www.docker.elastic.co]. The source code can be found
|
||||
|
@ -12,28 +11,19 @@ on https://github.com/elastic/elasticsearch-docker/tree/{branch}[GitHub].
|
|||
|
||||
==== Image types
|
||||
|
||||
The images are available in three different configurations or "flavors". The
|
||||
`basic` flavor, which is the default, ships with {xpack} Basic features
|
||||
pre-installed and automatically activated with a free licence. The `platinum`
|
||||
flavor features all {xpack} functionally under a 30-day trial licence. The `oss`
|
||||
flavor does not include {xpack}, and contains only open-source {es}.
|
||||
These images are free to use under the Elastic license. They contain open source
|
||||
and free commercial features and access to paid commercial features.
|
||||
{xpack-ref}/license-management.html[Start a 30-day trial] to try out all of the
|
||||
paid commercial features. See the
|
||||
https://www.elastic.co/subscriptions[Subscriptions] page for information about
|
||||
Elastic license levels.
|
||||
|
||||
NOTE: {xpack-ref}/xpack-security.html[X-Pack Security] is enabled in the `platinum`
|
||||
image. To access your cluster, it's necessary to set an initial password for the
|
||||
`elastic` user. The initial password can be set at start up time via the
|
||||
`ELASTIC_PASSWORD` environment variable:
|
||||
Alternatively, you can download `-oss` images, which contain only features that
|
||||
are available under the Apache 2.0 license.
|
||||
|
||||
["source","txt",subs="attributes"]
|
||||
--------------------------------------------
|
||||
docker run -e ELASTIC_PASSWORD=MagicWord {docker-repo}-platinum:{version}
|
||||
--------------------------------------------
|
||||
==== Pulling the image
|
||||
|
||||
NOTE: The `platinum` image includes a trial license for 30 days. After that, you
|
||||
can obtain one of the https://www.elastic.co/subscriptions[available
|
||||
subscriptions] or revert to a Basic licence. The Basic license is free and
|
||||
includes a selection of {xpack} features.
|
||||
|
||||
Obtaining {Es} for Docker is as simple as issuing a +docker pull+ command
|
||||
Obtaining {es} for Docker is as simple as issuing a +docker pull+ command
|
||||
against the Elastic Docker registry.
|
||||
|
||||
ifeval::["{release-state}"=="unreleased"]
|
||||
|
|
|
@ -31,6 +31,20 @@ configurations {
|
|||
task testJar(type: Jar) {
|
||||
appendix 'test'
|
||||
from sourceSets.test.output
|
||||
/*
|
||||
* Stick the license and notice file in the jar. This isn't strictly
|
||||
* needed because we don't publish it but it makes our super-paranoid
|
||||
* tests happy.
|
||||
*/
|
||||
metaInf {
|
||||
from(project.licenseFile.parent) {
|
||||
include project.licenseFile.name
|
||||
rename { 'LICENSE.txt' }
|
||||
}
|
||||
from(project.noticeFile.parent) {
|
||||
include project.noticeFile.name
|
||||
}
|
||||
}
|
||||
}
|
||||
artifacts {
|
||||
testArtifacts testJar
|
||||
|
|
|
@ -3,23 +3,6 @@
|
|||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
/*
|
||||
* ELASTICSEARCH CONFIDENTIAL
|
||||
* __________________
|
||||
*
|
||||
* [2017] Elasticsearch Incorporated. All Rights Reserved.
|
||||
*
|
||||
* NOTICE: All information contained herein is, and remains
|
||||
* the property of Elasticsearch Incorporated and its suppliers,
|
||||
* if any. The intellectual and technical concepts contained
|
||||
* herein are proprietary to Elasticsearch Incorporated
|
||||
* and its suppliers and may be covered by U.S. and Foreign Patents,
|
||||
* patents in process, and are protected by trade secret or copyright law.
|
||||
* Dissemination of this information or reproduction of this material
|
||||
* is strictly forbidden unless prior written permission is obtained
|
||||
* from Elasticsearch Incorporated.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.xpack.security.authc.support.mapper;
|
||||
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
|
|
|
@ -45,9 +45,12 @@ dependencyLicenses {
|
|||
* can be easilly shipped around and used.
|
||||
*/
|
||||
jar {
|
||||
from {
|
||||
from({
|
||||
configurations.compile.collect { it.isDirectory() ? it : zipTree(it) }
|
||||
configurations.runtime.collect { it.isDirectory() ? it : zipTree(it) }
|
||||
}) {
|
||||
// We don't need the META-INF from the things we bundle. For now.
|
||||
exclude 'META-INF/*'
|
||||
}
|
||||
manifest {
|
||||
attributes 'Main-Class': 'org.elasticsearch.xpack.sql.cli.Cli'
|
||||
|
|
|
@ -15,6 +15,7 @@ import org.joda.time.DateTime;
|
|||
import org.joda.time.DateTimeZone;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.TimeZone;
|
||||
|
||||
import static java.util.Arrays.asList;
|
||||
import static java.util.Collections.emptyList;
|
||||
|
@ -24,7 +25,7 @@ import static java.util.Collections.singletonMap;
|
|||
public class CompositeKeyExtractorTests extends AbstractWireSerializingTestCase<CompositeKeyExtractor> {
|
||||
|
||||
public static CompositeKeyExtractor randomCompositeKeyExtractor() {
|
||||
return new CompositeKeyExtractor(randomAlphaOfLength(16), randomFrom(asList(Property.values())), randomTimeZone());
|
||||
return new CompositeKeyExtractor(randomAlphaOfLength(16), randomFrom(asList(Property.values())), randomSafeTimeZone());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -58,7 +59,7 @@ public class CompositeKeyExtractorTests extends AbstractWireSerializingTestCase<
|
|||
}
|
||||
|
||||
public void testExtractDate() {
|
||||
CompositeKeyExtractor extractor = new CompositeKeyExtractor(randomAlphaOfLength(16), Property.VALUE, randomTimeZone());
|
||||
CompositeKeyExtractor extractor = new CompositeKeyExtractor(randomAlphaOfLength(16), Property.VALUE, randomSafeTimeZone());
|
||||
|
||||
long millis = System.currentTimeMillis();
|
||||
Bucket bucket = new TestBucket(singletonMap(extractor.key(), millis), randomLong(), new Aggregations(emptyList()));
|
||||
|
@ -73,4 +74,13 @@ public class CompositeKeyExtractorTests extends AbstractWireSerializingTestCase<
|
|||
SqlIllegalArgumentException exception = expectThrows(SqlIllegalArgumentException.class, () -> extractor.extract(bucket));
|
||||
assertEquals("Invalid date key returned: " + value, exception.getMessage());
|
||||
}
|
||||
|
||||
/**
|
||||
* We need to exclude SystemV/* time zones because they cannot be converted
|
||||
* back to DateTimeZone which we currently still need to do internally,
|
||||
* e.g. in bwc serialization and in the extract() method
|
||||
*/
|
||||
private static TimeZone randomSafeTimeZone() {
|
||||
return randomValueOtherThanMany(tz -> tz.getID().startsWith("SystemV"), () -> randomTimeZone());
|
||||
}
|
||||
}
|
|
@ -3,23 +3,6 @@
|
|||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
/*
|
||||
* ELASTICSEARCH CONFIDENTIAL
|
||||
* __________________
|
||||
*
|
||||
* [2017] Elasticsearch Incorporated. All Rights Reserved.
|
||||
*
|
||||
* NOTICE: All information contained herein is, and remains
|
||||
* the property of Elasticsearch Incorporated and its suppliers,
|
||||
* if any. The intellectual and technical concepts contained
|
||||
* herein are proprietary to Elasticsearch Incorporated
|
||||
* and its suppliers and may be covered by U.S. and Foreign Patents,
|
||||
* patents in process, and are protected by trade secret or copyright law.
|
||||
* Dissemination of this information or reproduction of this material
|
||||
* is strictly forbidden unless prior written permission is obtained
|
||||
* from Elasticsearch Incorporated.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.xpack.sql.expression;
|
||||
|
||||
import org.elasticsearch.xpack.sql.tree.AbstractNodeTestCase;
|
||||
|
|
|
@ -3,23 +3,6 @@
|
|||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
/*
|
||||
* ELASTICSEARCH CONFIDENTIAL
|
||||
* __________________
|
||||
*
|
||||
* [2017] Elasticsearch Incorporated. All Rights Reserved.
|
||||
*
|
||||
* NOTICE: All information contained herein is, and remains
|
||||
* the property of Elasticsearch Incorporated and its suppliers,
|
||||
* if any. The intellectual and technical concepts contained
|
||||
* herein are proprietary to Elasticsearch Incorporated
|
||||
* and its suppliers and may be covered by U.S. and Foreign Patents,
|
||||
* patents in process, and are protected by trade secret or copyright law.
|
||||
* Dissemination of this information or reproduction of this material
|
||||
* is strictly forbidden unless prior written permission is obtained
|
||||
* from Elasticsearch Incorporated.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.xpack.sql.expression.function;
|
||||
|
||||
import org.elasticsearch.xpack.sql.expression.Expression;
|
||||
|
|
|
@ -39,34 +39,6 @@ public class UpgradeClusterClientYamlTestSuiteIT extends ESClientYamlSuiteTestCa
|
|||
XPackRestTestHelper.waitForMlTemplates(client());
|
||||
}
|
||||
|
||||
/**
|
||||
* Enables an HTTP exporter for monitoring so that we can test the production-level exporter (not the local exporter).
|
||||
*
|
||||
* The build.gradle file disables data collection, so the expectation is that any monitoring rest tests will use the
|
||||
* "_xpack/monitoring/_bulk" endpoint to lazily setup the templates on-demand and fill in data without worrying about
|
||||
* timing.
|
||||
*/
|
||||
@Before
|
||||
public void waitForMonitoring() throws Exception {
|
||||
final String[] nodes = System.getProperty("tests.rest.cluster").split(",");
|
||||
final Map<String, Object> settings = new HashMap<>();
|
||||
|
||||
settings.put("xpack.monitoring.exporters._http.enabled", true);
|
||||
// only select the last node to avoid getting the "old" node in a mixed cluster
|
||||
// if we ever randomize the order that the nodes are restarted (or add more nodes), then we need to verify which node we select
|
||||
settings.put("xpack.monitoring.exporters._http.host", nodes[nodes.length - 1]);
|
||||
|
||||
assertBusy(() -> {
|
||||
final ClientYamlTestResponse response =
|
||||
getAdminExecutionContext().callApi("cluster.put_settings",
|
||||
emptyMap(),
|
||||
singletonList(singletonMap("transient", settings)),
|
||||
emptyMap());
|
||||
|
||||
assertThat(response.evaluate("acknowledged"), is(true));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean preserveIndicesUponCompletion() {
|
||||
return true;
|
||||
|
|
|
@ -1,48 +0,0 @@
|
|||
---
|
||||
setup:
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: yellow
|
||||
|
||||
---
|
||||
"Index monitoring data and search on the mixed cluster":
|
||||
- skip:
|
||||
version: "all"
|
||||
reason: "AwaitsFix'ing, see x-pack-elasticsearch #2948"
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: .monitoring-kibana-*
|
||||
body: { "query": { "term" : { "type": "old_cluster" } } }
|
||||
- match: { hits.total: 2 }
|
||||
|
||||
- do:
|
||||
xpack.monitoring.bulk:
|
||||
system_id: "kibana"
|
||||
system_api_version: "6"
|
||||
interval: "123456ms"
|
||||
type: "mixed_cluster"
|
||||
body:
|
||||
- '{"index": {}}'
|
||||
- '{"field": "value_3"}'
|
||||
- '{"index": {}}'
|
||||
- '{"field": "value_4"}'
|
||||
- '{"index": {}}'
|
||||
- '{"field": "value_5"}'
|
||||
|
||||
- is_false: errors
|
||||
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: .monitoring-kibana-*
|
||||
body: { "query": { "term" : { "type": "old_cluster" } } }
|
||||
- match: { hits.total: 2 }
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: .monitoring-kibana-*
|
||||
body: { "query": { "term" : { "type": "mixed_cluster" } } }
|
||||
- match: { hits.total: 3 }
|
|
@ -1,33 +0,0 @@
|
|||
---
|
||||
setup:
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: yellow
|
||||
|
||||
---
|
||||
"Index monitoring data and search on the old cluster":
|
||||
- skip:
|
||||
version: "all"
|
||||
reason: "AwaitsFix'ing, see x-pack-elasticsearch #2948"
|
||||
- do:
|
||||
xpack.monitoring.bulk:
|
||||
system_id: "kibana"
|
||||
system_api_version: "6"
|
||||
interval: "123456ms"
|
||||
type: "old_cluster"
|
||||
body:
|
||||
- '{"index": {}}'
|
||||
- '{"field": "value_1"}'
|
||||
- '{"index": {}}'
|
||||
- '{"field": "value_2"}'
|
||||
|
||||
- is_false: errors
|
||||
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: .monitoring-kibana-*
|
||||
body: { "query": { "term" : { "type": "old_cluster" } } }
|
||||
- match: { hits.total: 2 }
|
|
@ -1,54 +0,0 @@
|
|||
---
|
||||
setup:
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: yellow
|
||||
|
||||
---
|
||||
"Index monitoring data and search on the upgraded cluster":
|
||||
- skip:
|
||||
version: "all"
|
||||
reason: "AwaitsFix'ing, see x-pack-elasticsearch #2948"
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: .monitoring-kibana-*
|
||||
body: { "query": { "term" : { "type": "old_cluster" } } }
|
||||
- match: { hits.total: 2 }
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: .monitoring-kibana-*
|
||||
body: { "query": { "term" : { "type": "mixed_cluster" } } }
|
||||
- match: { hits.total: 3 }
|
||||
|
||||
- do:
|
||||
xpack.monitoring.bulk:
|
||||
system_id: "kibana"
|
||||
system_api_version: "6"
|
||||
interval: "123456ms"
|
||||
type: "upgraded_cluster"
|
||||
body:
|
||||
- '{"index": {}}'
|
||||
- '{"field": "value_6"}'
|
||||
- '{"index": {}}'
|
||||
- '{"field": "value_7"}'
|
||||
- '{"index": {}}'
|
||||
- '{"field": "value_8"}'
|
||||
|
||||
- is_false: errors
|
||||
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: .monitoring-kibana-*
|
||||
body: { "query": { "terms" : { "type": [ "old_cluster", "mixed_cluster" ] } } }
|
||||
- match: { hits.total: 5 }
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: .monitoring-kibana-*
|
||||
body: { "query": { "term" : { "type": "upgraded_cluster" } } }
|
||||
- match: { hits.total: 3 }
|
|
@ -1,14 +0,0 @@
|
|||
apply plugin: 'elasticsearch.standalone-rest-test'
|
||||
apply plugin: 'elasticsearch.rest-test'
|
||||
|
||||
dependencies {
|
||||
testCompile project(path: xpackModule('core'), configuration: 'runtime')
|
||||
testCompile project(path: xpackModule('watcher'), configuration: 'runtime')
|
||||
testCompile project(path: ':modules:lang-mustache', configuration: 'runtime')
|
||||
}
|
||||
|
||||
integTestCluster {
|
||||
setting 'xpack.security.enabled', 'false'
|
||||
setting 'xpack.monitoring.enabled', 'false'
|
||||
setting 'xpack.license.self_generated.type', 'trial'
|
||||
}
|
|
@ -1,75 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.smoketest;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.annotations.Name;
|
||||
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
|
||||
import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
|
||||
import org.elasticsearch.test.rest.yaml.ClientYamlTestResponse;
|
||||
import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
|
||||
import org.elasticsearch.xpack.core.watcher.support.WatcherIndexTemplateRegistryField;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.singletonMap;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
/** Runs rest tests against external cluster */
|
||||
public class WatcherWithMustacheIT extends ESClientYamlSuiteTestCase {
|
||||
|
||||
public WatcherWithMustacheIT(@Name("yaml") ClientYamlTestCandidate testCandidate) {
|
||||
super(testCandidate);
|
||||
}
|
||||
|
||||
@ParametersFactory
|
||||
public static Iterable<Object[]> parameters() throws Exception {
|
||||
return ESClientYamlSuiteTestCase.createParameters();
|
||||
}
|
||||
|
||||
@Before
|
||||
public void startWatcher() throws Exception {
|
||||
final List<String> watcherTemplates = Arrays.asList(WatcherIndexTemplateRegistryField.TEMPLATE_NAMES);
|
||||
assertBusy(() -> {
|
||||
try {
|
||||
getAdminExecutionContext().callApi("xpack.watcher.start", emptyMap(), emptyList(), emptyMap());
|
||||
|
||||
for (String template : watcherTemplates) {
|
||||
ClientYamlTestResponse templateExistsResponse = getAdminExecutionContext().callApi("indices.exists_template",
|
||||
singletonMap("name", template), emptyList(), emptyMap());
|
||||
assertThat(templateExistsResponse.getStatusCode(), is(200));
|
||||
}
|
||||
|
||||
ClientYamlTestResponse response =
|
||||
getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap());
|
||||
String state = (String) response.evaluate("stats.0.watcher_state");
|
||||
assertThat(state, is("started"));
|
||||
} catch (IOException e) {
|
||||
throw new AssertionError(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@After
|
||||
public void stopWatcher() throws Exception {
|
||||
assertBusy(() -> {
|
||||
try {
|
||||
getAdminExecutionContext().callApi("xpack.watcher.stop", emptyMap(), emptyList(), emptyMap());
|
||||
ClientYamlTestResponse response =
|
||||
getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap());
|
||||
String state = (String) response.evaluate("stats.0.watcher_state");
|
||||
assertThat(state, is("stopped"));
|
||||
} catch (IOException e) {
|
||||
throw new AssertionError(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
|
@ -1,13 +0,0 @@
|
|||
apply plugin: 'elasticsearch.standalone-rest-test'
|
||||
apply plugin: 'elasticsearch.rest-test'
|
||||
|
||||
dependencies {
|
||||
testCompile project(path: xpackModule('core'), configuration: 'runtime')
|
||||
testCompile project(path: ':modules:lang-painless', configuration: 'runtime')
|
||||
}
|
||||
|
||||
integTestCluster {
|
||||
setting 'xpack.security.enabled', 'false'
|
||||
setting 'xpack.monitoring.enabled', 'false'
|
||||
setting 'xpack.license.self_generated.type', 'trial'
|
||||
}
|
|
@ -1,74 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.smoketest;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
|
||||
import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
|
||||
import org.elasticsearch.test.rest.yaml.ClientYamlTestResponse;
|
||||
import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
|
||||
import org.elasticsearch.xpack.core.watcher.support.WatcherIndexTemplateRegistryField;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.singletonMap;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
/** Runs rest tests against external cluster */
|
||||
public class WatcherWithPainlessIT extends ESClientYamlSuiteTestCase {
|
||||
|
||||
public WatcherWithPainlessIT(ClientYamlTestCandidate testCandidate) {
|
||||
super(testCandidate);
|
||||
}
|
||||
|
||||
@ParametersFactory
|
||||
public static Iterable<Object[]> parameters() throws Exception {
|
||||
return ESClientYamlSuiteTestCase.createParameters();
|
||||
}
|
||||
|
||||
@Before
|
||||
public void startWatcher() throws Exception {
|
||||
final List<String> watcherTemplates = Arrays.asList(WatcherIndexTemplateRegistryField.TEMPLATE_NAMES);
|
||||
assertBusy(() -> {
|
||||
try {
|
||||
getAdminExecutionContext().callApi("xpack.watcher.start", emptyMap(), emptyList(), emptyMap());
|
||||
|
||||
for (String template : watcherTemplates) {
|
||||
ClientYamlTestResponse templateExistsResponse = getAdminExecutionContext().callApi("indices.exists_template",
|
||||
singletonMap("name", template), emptyList(), emptyMap());
|
||||
assertThat(templateExistsResponse.getStatusCode(), is(200));
|
||||
}
|
||||
|
||||
ClientYamlTestResponse response =
|
||||
getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap());
|
||||
String state = (String) response.evaluate("stats.0.watcher_state");
|
||||
assertThat(state, is("started"));
|
||||
} catch (IOException e) {
|
||||
throw new AssertionError(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@After
|
||||
public void stopWatcher() throws Exception {
|
||||
assertBusy(() -> {
|
||||
try {
|
||||
getAdminExecutionContext().callApi("xpack.watcher.stop", emptyMap(), emptyList(), emptyMap());
|
||||
ClientYamlTestResponse response =
|
||||
getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap());
|
||||
String state = (String) response.evaluate("stats.0.watcher_state");
|
||||
assertThat(state, is("stopped"));
|
||||
} catch (IOException e) {
|
||||
throw new AssertionError(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
|
@ -9,6 +9,8 @@ apply plugin: 'elasticsearch.rest-test'
|
|||
dependencies {
|
||||
testCompile project(path: xpackModule('core'), configuration: 'runtime')
|
||||
testCompile project(path: xpackModule('watcher'), configuration: 'runtime')
|
||||
testCompile project(path: ':modules:lang-mustache', configuration: 'runtime')
|
||||
testCompile project(path: ':modules:lang-painless', configuration: 'runtime')
|
||||
}
|
||||
|
||||
integTestCluster {
|
||||
|
|
|
@ -0,0 +1,95 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.smoketest;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.annotations.Name;
|
||||
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
|
||||
import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
|
||||
import org.elasticsearch.test.rest.yaml.ClientYamlTestResponse;
|
||||
import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
|
||||
import org.elasticsearch.xpack.core.watcher.support.WatcherIndexTemplateRegistryField;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.singletonMap;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
/** Runs rest tests against external cluster */
|
||||
public class WatcherRestIT extends ESClientYamlSuiteTestCase {
|
||||
|
||||
public WatcherRestIT(@Name("yaml") ClientYamlTestCandidate testCandidate) {
|
||||
super(testCandidate);
|
||||
}
|
||||
|
||||
@ParametersFactory
|
||||
public static Iterable<Object[]> parameters() throws Exception {
|
||||
return ESClientYamlSuiteTestCase.createParameters();
|
||||
}
|
||||
|
||||
@Before
|
||||
public void startWatcher() throws Exception {
|
||||
assertBusy(() -> {
|
||||
ClientYamlTestResponse response =
|
||||
getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap());
|
||||
String state = (String) response.evaluate("stats.0.watcher_state");
|
||||
|
||||
switch (state) {
|
||||
case "stopped":
|
||||
ClientYamlTestResponse startResponse =
|
||||
getAdminExecutionContext().callApi("xpack.watcher.start", emptyMap(), emptyList(), emptyMap());
|
||||
boolean isAcknowledged = (boolean) startResponse.evaluate("acknowledged");
|
||||
assertThat(isAcknowledged, is(true));
|
||||
break;
|
||||
case "stopping":
|
||||
throw new AssertionError("waiting until stopping state reached stopped state to start again");
|
||||
case "starting":
|
||||
throw new AssertionError("waiting until starting state reached started state");
|
||||
case "started":
|
||||
// all good here, we are done
|
||||
break;
|
||||
default:
|
||||
throw new AssertionError("unknown state[" + state + "]");
|
||||
}
|
||||
});
|
||||
|
||||
assertBusy(() -> {
|
||||
for (String template : WatcherIndexTemplateRegistryField.TEMPLATE_NAMES) {
|
||||
ClientYamlTestResponse templateExistsResponse = getAdminExecutionContext().callApi("indices.exists_template",
|
||||
singletonMap("name", template), emptyList(), emptyMap());
|
||||
assertThat(templateExistsResponse.getStatusCode(), is(200));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@After
|
||||
public void stopWatcher() throws Exception {
|
||||
assertBusy(() -> {
|
||||
ClientYamlTestResponse response =
|
||||
getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap());
|
||||
String state = (String) response.evaluate("stats.0.watcher_state");
|
||||
|
||||
switch (state) {
|
||||
case "stopped":
|
||||
// all good here, we are done
|
||||
break;
|
||||
case "stopping":
|
||||
throw new AssertionError("waiting until stopping state reached stopped state");
|
||||
case "starting":
|
||||
throw new AssertionError("waiting until starting state reached started state to stop");
|
||||
case "started":
|
||||
ClientYamlTestResponse stopResponse =
|
||||
getAdminExecutionContext().callApi("xpack.watcher.stop", emptyMap(), emptyList(), emptyMap());
|
||||
boolean isAcknowledged = (boolean) stopResponse.evaluate("acknowledged");
|
||||
assertThat(isAcknowledged, is(true));
|
||||
break;
|
||||
default:
|
||||
throw new AssertionError("unknown state[" + state + "]");
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
|
@ -30,7 +30,7 @@ import java.util.Map;
|
|||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
|
||||
public class WatcherTemplateTests extends ESTestCase {
|
||||
public class WatcherTemplateIT extends ESTestCase {
|
||||
|
||||
private TextTemplateEngine textTemplateEngine;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue