Merge branch 'master' into ccr
This commit is contained in:
commit
6d55d075ab
|
@ -110,18 +110,22 @@ IntelliJ users can automatically configure their IDE: `gradle idea`
|
|||
then `File->New Project From Existing Sources`. Point to the root of
|
||||
the source directory, select
|
||||
`Import project from external model->Gradle`, enable
|
||||
`Use auto-import`. Additionally, in order to run tests directly from
|
||||
IDEA 2017.2 and above it is required to disable IDEA run launcher to avoid
|
||||
finding yourself in "jar hell", which can be achieved by adding the
|
||||
`Use auto-import`. In order to run tests directly from
|
||||
IDEA 2017.2 and above, it is required to disable the IDEA run launcher in order to avoid
|
||||
`idea_rt.jar` causing "jar hell". This can be achieved by adding the
|
||||
`-Didea.no.launcher=true` [JVM
|
||||
option](https://intellij-support.jetbrains.com/hc/en-us/articles/206544869-Configuring-JVM-options-and-platform-properties)
|
||||
or by adding `idea.no.launcher=true` to the
|
||||
`idea.properties`[https://www.jetbrains.com/help/idea/file-idea-properties.html]
|
||||
file which can be accessed under Help > Edit Custom Properties within IDEA. You
|
||||
may also need to [remove `ant-javafx.jar` from your
|
||||
classpath][https://github.com/elastic/elasticsearch/issues/14348] if that is
|
||||
option](https://intellij-support.jetbrains.com/hc/en-us/articles/206544869-Configuring-JVM-options-and-platform-properties).
|
||||
Alternatively, `idea.no.launcher=true` can be set in the
|
||||
[`idea.properties`](https://www.jetbrains.com/help/idea/file-idea-properties.html)
|
||||
file which can be accessed under Help > Edit Custom Properties (this will require a
|
||||
restart of IDEA). For IDEA 2017.3 and above, in addition to the JVM option, you will need to go to
|
||||
`Run->Edit Configurations->...->Defaults->JUnit` and change the value for the `Shorten command line` setting from
|
||||
`user-local default: none` to `classpath file`. You may also need to [remove `ant-javafx.jar` from your
|
||||
classpath](https://github.com/elastic/elasticsearch/issues/14348) if that is
|
||||
reported as a source of jar hell.
|
||||
|
||||
To run an instance of elasticsearch from the source code run `gradle run`
|
||||
|
||||
The Elasticsearch codebase makes heavy use of Java `assert`s and the
|
||||
test runner requires that assertions be enabled within the JVM. This
|
||||
can be accomplished by passing the flag `-ea` to the JVM on startup.
|
||||
|
|
|
@ -307,4 +307,9 @@ Defaults env_keep += "BATS_ARCHIVES"
|
|||
SUDOERS_VARS
|
||||
chmod 0440 /etc/sudoers.d/elasticsearch_vars
|
||||
SHELL
|
||||
# This prevents leftovers from previous tests using the
|
||||
# same VM from messing up the current test
|
||||
config.vm.provision "clean_tmp", run: "always", type: "shell", inline: <<-SHELL
|
||||
rm -rf /tmp/elasticsearch*
|
||||
SHELL
|
||||
end
|
||||
|
|
|
@ -196,6 +196,7 @@ subprojects {
|
|||
"org.elasticsearch.plugin:parent-join-client:${version}": ':modules:parent-join',
|
||||
"org.elasticsearch.plugin:aggs-matrix-stats-client:${version}": ':modules:aggs-matrix-stats',
|
||||
"org.elasticsearch.plugin:percolator-client:${version}": ':modules:percolator',
|
||||
"org.elasticsearch.plugin:rank-eval-client:${version}": ':modules:rank-eval',
|
||||
]
|
||||
|
||||
for (final Version version : versionCollection.versionsIndexCompatibleWithCurrent) {
|
||||
|
|
|
@ -131,12 +131,6 @@ class BuildPlugin implements Plugin<Project> {
|
|||
throw new GradleException("${minGradle} or above is required to build elasticsearch")
|
||||
}
|
||||
|
||||
final GradleVersion gradle42 = GradleVersion.version('4.2')
|
||||
final GradleVersion gradle43 = GradleVersion.version('4.3')
|
||||
if (currentGradleVersion >= gradle42 && currentGradleVersion < gradle43) {
|
||||
throw new GradleException("${currentGradleVersion} is not compatible with the elasticsearch build")
|
||||
}
|
||||
|
||||
// enforce Java version
|
||||
if (javaVersionEnum < minimumJava) {
|
||||
throw new GradleException("Java ${minimumJava} or above is required to build Elasticsearch")
|
||||
|
|
|
@ -29,6 +29,7 @@ import java.util.regex.Matcher
|
|||
class VersionCollection {
|
||||
|
||||
private final List<Version> versions
|
||||
private final boolean buildSnapshot = System.getProperty("build.snapshot", "true") == "true"
|
||||
|
||||
/**
|
||||
* Construct a VersionCollection from the lines of the Version.java file.
|
||||
|
@ -63,7 +64,10 @@ class VersionCollection {
|
|||
throw new GradleException("Unexpectedly found no version constants in Versions.java");
|
||||
}
|
||||
|
||||
// The tip of each minor series (>= 5.6) is unreleased, so set their 'snapshot' flags
|
||||
/*
|
||||
* The tip of each minor series (>= 5.6) is unreleased, so they must be built from source (we set branch to non-null), and we set
|
||||
* the snapshot flag if and only if build.snapshot is true.
|
||||
*/
|
||||
Version prevConsideredVersion = null
|
||||
boolean found6xSnapshot = false
|
||||
for (final int versionIndex = versions.size() - 1; versionIndex >= 0; versionIndex--) {
|
||||
|
@ -85,7 +89,7 @@ class VersionCollection {
|
|||
|
||||
versions[versionIndex] = new Version(
|
||||
currConsideredVersion.major, currConsideredVersion.minor,
|
||||
currConsideredVersion.revision, currConsideredVersion.suffix, true, branch)
|
||||
currConsideredVersion.revision, currConsideredVersion.suffix, buildSnapshot, branch)
|
||||
}
|
||||
|
||||
if (currConsideredVersion.onOrBefore("5.6.0")) {
|
||||
|
@ -95,12 +99,6 @@ class VersionCollection {
|
|||
prevConsideredVersion = currConsideredVersion
|
||||
}
|
||||
|
||||
// If we're making a release build then the current should not be a snapshot after all.
|
||||
final boolean currentIsSnapshot = "true" == System.getProperty("build.snapshot", "true")
|
||||
if (false == currentIsSnapshot) {
|
||||
versions[-1] = new Version(versions[-1].major, versions[-1].minor, versions[-1].revision, versions[-1].suffix, false, null)
|
||||
}
|
||||
|
||||
this.versions = Collections.unmodifiableList(versions)
|
||||
}
|
||||
|
||||
|
@ -137,7 +135,7 @@ class VersionCollection {
|
|||
private Version getLastSnapshotWithMajor(int targetMajor) {
|
||||
final String currentVersion = currentVersion.toString()
|
||||
final int snapshotIndex = versions.findLastIndexOf {
|
||||
it.major == targetMajor && it.before(currentVersion) && it.snapshot
|
||||
it.major == targetMajor && it.before(currentVersion) && it.snapshot == buildSnapshot
|
||||
}
|
||||
return snapshotIndex == -1 ? null : versions[snapshotIndex]
|
||||
}
|
||||
|
|
|
@ -69,7 +69,6 @@ public class AntFixture extends AntTask implements Fixture {
|
|||
* as well as a groovy AntBuilder, to enable running ant condition checks. The default wait
|
||||
* condition is for http on the http port.
|
||||
*/
|
||||
@Input
|
||||
Closure waitCondition = { AntFixture fixture, AntBuilder ant ->
|
||||
File tmpFile = new File(fixture.cwd, 'wait.success')
|
||||
ant.get(src: "http://${fixture.addressAndPort}",
|
||||
|
|
|
@ -166,11 +166,13 @@ class ClusterFormationTasks {
|
|||
Task setup = project.tasks.create(name: taskName(prefix, node, 'clean'), type: Delete, dependsOn: dependsOn) {
|
||||
delete node.homeDir
|
||||
delete node.cwd
|
||||
}
|
||||
setup = project.tasks.create(name: taskName(prefix, node, 'createCwd'), type: DefaultTask, dependsOn: setup) {
|
||||
doLast {
|
||||
node.cwd.mkdirs()
|
||||
}
|
||||
outputs.dir node.cwd
|
||||
}
|
||||
|
||||
setup = configureCheckPreviousTask(taskName(prefix, node, 'checkPrevious'), project, setup, node)
|
||||
setup = configureStopTask(taskName(prefix, node, 'stopPrevious'), project, setup, node)
|
||||
setup = configureExtractTask(taskName(prefix, node, 'extract'), project, setup, node, distribution)
|
||||
|
@ -267,33 +269,6 @@ class ClusterFormationTasks {
|
|||
into node.baseDir
|
||||
}
|
||||
break;
|
||||
case 'rpm':
|
||||
File rpmDatabase = new File(node.baseDir, 'rpm-database')
|
||||
File rpmExtracted = new File(node.baseDir, 'rpm-extracted')
|
||||
/* Delay reading the location of the rpm file until task execution */
|
||||
Object rpm = "${ -> configuration.singleFile}"
|
||||
extract = project.tasks.create(name: name, type: LoggedExec, dependsOn: extractDependsOn) {
|
||||
commandLine 'rpm', '--badreloc', '--nodeps', '--noscripts', '--notriggers',
|
||||
'--dbpath', rpmDatabase,
|
||||
'--relocate', "/=${rpmExtracted}",
|
||||
'-i', rpm
|
||||
doFirst {
|
||||
rpmDatabase.deleteDir()
|
||||
rpmExtracted.deleteDir()
|
||||
}
|
||||
}
|
||||
break;
|
||||
case 'deb':
|
||||
/* Delay reading the location of the deb file until task execution */
|
||||
File debExtracted = new File(node.baseDir, 'deb-extracted')
|
||||
Object deb = "${ -> configuration.singleFile}"
|
||||
extract = project.tasks.create(name: name, type: LoggedExec, dependsOn: extractDependsOn) {
|
||||
commandLine 'dpkg-deb', '-x', deb, debExtracted
|
||||
doFirst {
|
||||
debExtracted.deleteDir()
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
throw new InvalidUserDataException("Unknown distribution: ${node.config.distribution}")
|
||||
}
|
||||
|
|
|
@ -658,7 +658,6 @@
|
|||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]recovery[/\\]RelocationIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]recovery[/\\]TruncatedRecoveryIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]BytesRestResponseTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]routing[/\\]AliasResolveRoutingIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]routing[/\\]AliasRoutingIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]routing[/\\]SimpleRoutingIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]FileScriptTests.java" checks="LineLength" />
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
elasticsearch = 7.0.0-alpha1
|
||||
lucene = 7.2.0-snapshot-8c94404
|
||||
lucene = 7.2.0-snapshot-7deca62
|
||||
|
||||
# optional dependencies
|
||||
spatial4j = 0.6
|
||||
|
|
|
@ -39,6 +39,7 @@ dependencies {
|
|||
compile "org.elasticsearch.client:elasticsearch-rest-client:${version}"
|
||||
compile "org.elasticsearch.plugin:parent-join-client:${version}"
|
||||
compile "org.elasticsearch.plugin:aggs-matrix-stats-client:${version}"
|
||||
compile "org.elasticsearch.plugin:rank-eval-client:${version}"
|
||||
|
||||
testCompile "org.elasticsearch.client:test:${version}"
|
||||
testCompile "org.elasticsearch.test:framework:${version}"
|
||||
|
|
|
@ -21,21 +21,25 @@ package org.elasticsearch.client;
|
|||
|
||||
import org.apache.http.Header;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexResponse;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
|
||||
/**
|
||||
* A wrapper for the {@link RestHighLevelClient} that provides methods for accessing the Indices API.
|
||||
*
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices.html">Indices API on elastic.co</a>
|
||||
*/
|
||||
public final class IndicesClient {
|
||||
private final RestHighLevelClient restHighLevelClient;
|
||||
|
||||
public IndicesClient(RestHighLevelClient restHighLevelClient) {
|
||||
IndicesClient(RestHighLevelClient restHighLevelClient) {
|
||||
this.restHighLevelClient = restHighLevelClient;
|
||||
}
|
||||
|
||||
|
@ -56,8 +60,55 @@ public final class IndicesClient {
|
|||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html">
|
||||
* Delete Index API on elastic.co</a>
|
||||
*/
|
||||
public void deleteIndexAsync(DeleteIndexRequest deleteIndexRequest, ActionListener<DeleteIndexResponse> listener, Header... headers) {
|
||||
public void deleteIndexAsync(DeleteIndexRequest deleteIndexRequest, ActionListener<DeleteIndexResponse> listener,
|
||||
Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(deleteIndexRequest, Request::deleteIndex, DeleteIndexResponse::fromXContent,
|
||||
listener, Collections.emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an index using the Create Index API
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html">
|
||||
* Create Index API on elastic.co</a>
|
||||
*/
|
||||
public CreateIndexResponse createIndex(CreateIndexRequest createIndexRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(createIndexRequest, Request::createIndex, CreateIndexResponse::fromXContent,
|
||||
Collections.emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously creates an index using the Create Index API
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html">
|
||||
* Create Index API on elastic.co</a>
|
||||
*/
|
||||
public void createIndexAsync(CreateIndexRequest createIndexRequest, ActionListener<CreateIndexResponse> listener,
|
||||
Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(createIndexRequest, Request::createIndex, CreateIndexResponse::fromXContent,
|
||||
listener, Collections.emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Opens an index using the Open Index API
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html">
|
||||
* Open Index API on elastic.co</a>
|
||||
*/
|
||||
public OpenIndexResponse openIndex(OpenIndexRequest openIndexRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(openIndexRequest, Request::openIndex, OpenIndexResponse::fromXContent,
|
||||
Collections.emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously opens an index using the Open Index API
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html">
|
||||
* Open Index API on elastic.co</a>
|
||||
*/
|
||||
public void openIndexAsync(OpenIndexRequest openIndexRequest, ActionListener<OpenIndexResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(openIndexRequest, Request::openIndex, OpenIndexResponse::fromXContent,
|
||||
listener, Collections.emptySet(), headers);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -29,12 +29,15 @@ import org.apache.http.entity.ByteArrayEntity;
|
|||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.action.DocWriteRequest;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.get.GetRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.search.ClearScrollRequest;
|
||||
import org.elasticsearch.action.search.MultiSearchRequest;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchScrollRequest;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
|
@ -49,6 +52,7 @@ import org.elasticsearch.common.lucene.uid.Versions;
|
|||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
@ -135,6 +139,32 @@ public final class Request {
|
|||
return new Request(HttpDelete.METHOD_NAME, endpoint, parameters.getParams(), null);
|
||||
}
|
||||
|
||||
static Request openIndex(OpenIndexRequest openIndexRequest) {
|
||||
String endpoint = endpoint(openIndexRequest.indices(), Strings.EMPTY_ARRAY, "_open");
|
||||
|
||||
Params parameters = Params.builder();
|
||||
|
||||
parameters.withTimeout(openIndexRequest.timeout());
|
||||
parameters.withMasterTimeout(openIndexRequest.masterNodeTimeout());
|
||||
parameters.withWaitForActiveShards(openIndexRequest.waitForActiveShards());
|
||||
parameters.withIndicesOptions(openIndexRequest.indicesOptions());
|
||||
|
||||
return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), null);
|
||||
}
|
||||
|
||||
static Request createIndex(CreateIndexRequest createIndexRequest) throws IOException {
|
||||
String endpoint = endpoint(createIndexRequest.indices(), Strings.EMPTY_ARRAY, "");
|
||||
|
||||
Params parameters = Params.builder();
|
||||
parameters.withTimeout(createIndexRequest.timeout());
|
||||
parameters.withMasterTimeout(createIndexRequest.masterNodeTimeout());
|
||||
parameters.withWaitForActiveShards(createIndexRequest.waitForActiveShards());
|
||||
parameters.withUpdateAllTypes(createIndexRequest.updateAllTypes());
|
||||
|
||||
HttpEntity entity = createEntity(createIndexRequest, REQUEST_BODY_CONTENT_TYPE);
|
||||
return new Request(HttpPut.METHOD_NAME, endpoint, parameters.getParams(), entity);
|
||||
}
|
||||
|
||||
static Request info() {
|
||||
return new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null);
|
||||
}
|
||||
|
@ -381,6 +411,18 @@ public final class Request {
|
|||
return new Request("DELETE", "/_search/scroll", Collections.emptyMap(), entity);
|
||||
}
|
||||
|
||||
static Request multiSearch(MultiSearchRequest multiSearchRequest) throws IOException {
|
||||
Params params = Params.builder();
|
||||
params.putParam(RestSearchAction.TYPED_KEYS_PARAM, "true");
|
||||
if (multiSearchRequest.maxConcurrentSearchRequests() != MultiSearchRequest.MAX_CONCURRENT_SEARCH_REQUESTS_DEFAULT) {
|
||||
params.putParam("max_concurrent_searches", Integer.toString(multiSearchRequest.maxConcurrentSearchRequests()));
|
||||
}
|
||||
XContent xContent = REQUEST_BODY_CONTENT_TYPE.xContent();
|
||||
byte[] source = MultiSearchRequest.writeMultiLineFormat(multiSearchRequest, xContent);
|
||||
HttpEntity entity = new ByteArrayEntity(source, createContentType(xContent.type()));
|
||||
return new Request("GET", "/_msearch", params.getParams(), entity);
|
||||
}
|
||||
|
||||
private static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) throws IOException {
|
||||
BytesRef source = XContentHelper.toXContent(toXContent, xContentType, false).toBytesRef();
|
||||
return new ByteArrayEntity(source.bytes, source.offset, source.length, createContentType(xContentType));
|
||||
|
@ -520,6 +562,13 @@ public final class Request {
|
|||
return putParam("timeout", timeout);
|
||||
}
|
||||
|
||||
Params withUpdateAllTypes(boolean updateAllTypes) {
|
||||
if (updateAllTypes) {
|
||||
return putParam("update_all_types", Boolean.TRUE.toString());
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
Params withVersion(long version) {
|
||||
if (version != Versions.MATCH_ANY) {
|
||||
return putParam("version", Long.toString(version));
|
||||
|
|
|
@ -26,6 +26,8 @@ import org.elasticsearch.ElasticsearchStatusException;
|
|||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexResponse;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
|
@ -38,6 +40,8 @@ import org.elasticsearch.action.main.MainRequest;
|
|||
import org.elasticsearch.action.main.MainResponse;
|
||||
import org.elasticsearch.action.search.ClearScrollRequest;
|
||||
import org.elasticsearch.action.search.ClearScrollResponse;
|
||||
import org.elasticsearch.action.search.MultiSearchRequest;
|
||||
import org.elasticsearch.action.search.MultiSearchResponse;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.SearchScrollRequest;
|
||||
|
@ -281,7 +285,7 @@ public class RestHighLevelClient implements Closeable {
|
|||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html">Get API on elastic.co</a>
|
||||
*/
|
||||
public void getAsync(GetRequest getRequest, ActionListener<GetResponse> listener, Header... headers) {
|
||||
public final void getAsync(GetRequest getRequest, ActionListener<GetResponse> listener, Header... headers) {
|
||||
performRequestAsyncAndParseEntity(getRequest, Request::get, GetResponse::fromXContent, listener, singleton(404), headers);
|
||||
}
|
||||
|
||||
|
@ -377,6 +381,28 @@ public class RestHighLevelClient implements Closeable {
|
|||
performRequestAsyncAndParseEntity(searchRequest, Request::search, SearchResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a multi search using the msearch API
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html">Multi search API on
|
||||
* elastic.co</a>
|
||||
*/
|
||||
public final MultiSearchResponse multiSearch(MultiSearchRequest multiSearchRequest, Header... headers) throws IOException {
|
||||
return performRequestAndParseEntity(multiSearchRequest, Request::multiSearch, MultiSearchResponse::fromXContext,
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously executes a multi search using the msearch API
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html">Multi search API on
|
||||
* elastic.co</a>
|
||||
*/
|
||||
public final void multiSearchAsync(MultiSearchRequest searchRequest, ActionListener<MultiSearchResponse> listener, Header... headers) {
|
||||
performRequestAsyncAndParseEntity(searchRequest, Request::multiSearch, MultiSearchResponse::fromXContext, listener,
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a search using the Search Scroll API
|
||||
*
|
||||
|
|
|
@ -20,14 +20,95 @@
|
|||
package org.elasticsearch.client;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.admin.indices.alias.Alias;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexResponse;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Locale;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
|
||||
|
||||
public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testCreateIndex() throws IOException {
|
||||
{
|
||||
// Create index
|
||||
String indexName = "plain_index";
|
||||
assertFalse(indexExists(indexName));
|
||||
|
||||
CreateIndexRequest createIndexRequest = new CreateIndexRequest(indexName);
|
||||
|
||||
CreateIndexResponse createIndexResponse =
|
||||
execute(createIndexRequest, highLevelClient().indices()::createIndex, highLevelClient().indices()::createIndexAsync);
|
||||
assertTrue(createIndexResponse.isAcknowledged());
|
||||
|
||||
assertTrue(indexExists(indexName));
|
||||
}
|
||||
{
|
||||
// Create index with mappings, aliases and settings
|
||||
String indexName = "rich_index";
|
||||
assertFalse(indexExists(indexName));
|
||||
|
||||
CreateIndexRequest createIndexRequest = new CreateIndexRequest(indexName);
|
||||
|
||||
Alias alias = new Alias("alias_name");
|
||||
alias.filter("{\"term\":{\"year\":2016}}");
|
||||
alias.routing("1");
|
||||
createIndexRequest.alias(alias);
|
||||
|
||||
Settings.Builder settings = Settings.builder();
|
||||
settings.put(SETTING_NUMBER_OF_REPLICAS, 2);
|
||||
createIndexRequest.settings(settings);
|
||||
|
||||
XContentBuilder mappingBuilder = JsonXContent.contentBuilder();
|
||||
mappingBuilder.startObject().startObject("properties").startObject("field");
|
||||
mappingBuilder.field("type", "text");
|
||||
mappingBuilder.endObject().endObject().endObject();
|
||||
createIndexRequest.mapping("type_name", mappingBuilder);
|
||||
|
||||
CreateIndexResponse createIndexResponse =
|
||||
execute(createIndexRequest, highLevelClient().indices()::createIndex, highLevelClient().indices()::createIndexAsync);
|
||||
assertTrue(createIndexResponse.isAcknowledged());
|
||||
|
||||
Map<String, Object> indexMetaData = getIndexMetadata(indexName);
|
||||
|
||||
Map<String, Object> settingsData = (Map) indexMetaData.get("settings");
|
||||
Map<String, Object> indexSettings = (Map) settingsData.get("index");
|
||||
assertEquals("2", indexSettings.get("number_of_replicas"));
|
||||
|
||||
Map<String, Object> aliasesData = (Map) indexMetaData.get("aliases");
|
||||
Map<String, Object> aliasData = (Map) aliasesData.get("alias_name");
|
||||
assertEquals("1", aliasData.get("index_routing"));
|
||||
Map<String, Object> filter = (Map) aliasData.get("filter");
|
||||
Map<String, Object> term = (Map) filter.get("term");
|
||||
assertEquals(2016, term.get("year"));
|
||||
|
||||
Map<String, Object> mappingsData = (Map) indexMetaData.get("mappings");
|
||||
Map<String, Object> typeData = (Map) mappingsData.get("type_name");
|
||||
Map<String, Object> properties = (Map) typeData.get("properties");
|
||||
Map<String, Object> field = (Map) properties.get("field");
|
||||
|
||||
assertEquals("text", field.get("type"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testDeleteIndex() throws IOException {
|
||||
{
|
||||
// Delete index if exists
|
||||
|
@ -54,15 +135,86 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testOpenExistingIndex() throws IOException {
|
||||
String[] indices = randomIndices(1, 5);
|
||||
for (String index : indices) {
|
||||
createIndex(index);
|
||||
closeIndex(index);
|
||||
ResponseException exception = expectThrows(ResponseException.class, () -> client().performRequest("GET", index + "/_search"));
|
||||
assertThat(exception.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.BAD_REQUEST.getStatus()));
|
||||
assertThat(exception.getMessage().contains(index), equalTo(true));
|
||||
}
|
||||
|
||||
OpenIndexRequest openIndexRequest = new OpenIndexRequest(indices);
|
||||
OpenIndexResponse openIndexResponse = execute(openIndexRequest, highLevelClient().indices()::openIndex,
|
||||
highLevelClient().indices()::openIndexAsync);
|
||||
assertTrue(openIndexResponse.isAcknowledged());
|
||||
|
||||
for (String index : indices) {
|
||||
Response response = client().performRequest("GET", index + "/_search");
|
||||
assertThat(response.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus()));
|
||||
}
|
||||
}
|
||||
|
||||
public void testOpenNonExistentIndex() throws IOException {
|
||||
String[] nonExistentIndices = randomIndices(1, 5);
|
||||
for (String nonExistentIndex : nonExistentIndices) {
|
||||
assertFalse(indexExists(nonExistentIndex));
|
||||
}
|
||||
|
||||
OpenIndexRequest openIndexRequest = new OpenIndexRequest(nonExistentIndices);
|
||||
ElasticsearchException exception = expectThrows(ElasticsearchException.class,
|
||||
() -> execute(openIndexRequest, highLevelClient().indices()::openIndex, highLevelClient().indices()::openIndexAsync));
|
||||
assertEquals(RestStatus.NOT_FOUND, exception.status());
|
||||
|
||||
OpenIndexRequest lenientOpenIndexRequest = new OpenIndexRequest(nonExistentIndices);
|
||||
lenientOpenIndexRequest.indicesOptions(IndicesOptions.lenientExpandOpen());
|
||||
OpenIndexResponse lenientOpenIndexResponse = execute(lenientOpenIndexRequest, highLevelClient().indices()::openIndex,
|
||||
highLevelClient().indices()::openIndexAsync);
|
||||
assertThat(lenientOpenIndexResponse.isAcknowledged(), equalTo(true));
|
||||
|
||||
OpenIndexRequest strictOpenIndexRequest = new OpenIndexRequest(nonExistentIndices);
|
||||
strictOpenIndexRequest.indicesOptions(IndicesOptions.strictExpandOpen());
|
||||
ElasticsearchException strictException = expectThrows(ElasticsearchException.class,
|
||||
() -> execute(openIndexRequest, highLevelClient().indices()::openIndex, highLevelClient().indices()::openIndexAsync));
|
||||
assertEquals(RestStatus.NOT_FOUND, strictException.status());
|
||||
}
|
||||
|
||||
private static String[] randomIndices(int minIndicesNum, int maxIndicesNum) {
|
||||
int numIndices = randomIntBetween(minIndicesNum, maxIndicesNum);
|
||||
String[] indices = new String[numIndices];
|
||||
for (int i = 0; i < numIndices; i++) {
|
||||
indices[i] = "index-" + randomAlphaOfLengthBetween(2, 5).toLowerCase(Locale.ROOT);
|
||||
}
|
||||
return indices;
|
||||
}
|
||||
|
||||
private static void createIndex(String index) throws IOException {
|
||||
Response response = client().performRequest("PUT", index);
|
||||
|
||||
assertEquals(200, response.getStatusLine().getStatusCode());
|
||||
assertThat(response.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus()));
|
||||
}
|
||||
|
||||
private static boolean indexExists(String index) throws IOException {
|
||||
Response response = client().performRequest("HEAD", index);
|
||||
return RestStatus.OK.getStatus() == response.getStatusLine().getStatusCode();
|
||||
}
|
||||
|
||||
return response.getStatusLine().getStatusCode() == 200;
|
||||
private static void closeIndex(String index) throws IOException {
|
||||
Response response = client().performRequest("POST", index + "/_close");
|
||||
assertThat(response.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus()));
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private Map<String, Object> getIndexMetadata(String index) throws IOException {
|
||||
Response response = client().performRequest("GET", index);
|
||||
|
||||
XContentType entityContentType = XContentType.fromMediaTypeOrFormat(response.getEntity().getContentType().getValue());
|
||||
Map<String, Object> responseEntity = XContentHelper.convertToMap(entityContentType.xContent(), response.getEntity().getContent(),
|
||||
false);
|
||||
|
||||
Map<String, Object> indexMetaData = (Map) responseEntity.get(index);
|
||||
assertNotNull(indexMetaData);
|
||||
|
||||
return indexMetaData;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,23 +25,27 @@ import org.apache.http.entity.ContentType;
|
|||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.elasticsearch.action.DocWriteRequest;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.bulk.BulkShardRequest;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.get.GetRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.search.ClearScrollRequest;
|
||||
import org.elasticsearch.action.search.MultiSearchRequest;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchScrollRequest;
|
||||
import org.elasticsearch.action.search.SearchType;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequest;
|
||||
import org.elasticsearch.action.support.master.MasterNodeRequest;
|
||||
import org.elasticsearch.action.support.replication.ReplicatedWriteRequest;
|
||||
import org.elasticsearch.action.support.replication.ReplicationRequest;
|
||||
import org.elasticsearch.action.update.UpdateRequest;
|
||||
import org.elasticsearch.common.CheckedBiConsumer;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
|
@ -56,6 +60,7 @@ import org.elasticsearch.common.xcontent.XContentType;
|
|||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.query.TermQueryBuilder;
|
||||
import org.elasticsearch.rest.action.search.RestSearchAction;
|
||||
import org.elasticsearch.search.Scroll;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.support.ValueType;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
|
@ -72,7 +77,9 @@ import java.io.IOException;
|
|||
import java.io.InputStream;
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.lang.reflect.Modifier;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.StringJoiner;
|
||||
|
@ -81,12 +88,16 @@ import java.util.function.Function;
|
|||
import java.util.function.Supplier;
|
||||
|
||||
import static java.util.Collections.singletonMap;
|
||||
import static org.elasticsearch.client.Request.REQUEST_BODY_CONTENT_TYPE;
|
||||
import static org.elasticsearch.client.Request.enforceSameContentType;
|
||||
import static org.elasticsearch.search.RandomSearchRequestGenerator.randomSearchRequest;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent;
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
||||
public class RequestTests extends ESTestCase {
|
||||
|
||||
public void testConstructor() throws Exception {
|
||||
public void testConstructor() {
|
||||
final String method = randomFrom("GET", "PUT", "POST", "HEAD", "DELETE");
|
||||
final String endpoint = randomAlphaOfLengthBetween(1, 10);
|
||||
final Map<String, String> parameters = singletonMap(randomAlphaOfLength(5), randomAlphaOfLength(5));
|
||||
|
@ -112,7 +123,7 @@ public class RequestTests extends ESTestCase {
|
|||
assertTrue("Request constructor is not public", Modifier.isPublic(constructors[0].getModifiers()));
|
||||
}
|
||||
|
||||
public void testClassVisibility() throws Exception {
|
||||
public void testClassVisibility() {
|
||||
assertTrue("Request class is not public", Modifier.isPublic(Request.class.getModifiers()));
|
||||
}
|
||||
|
||||
|
@ -136,7 +147,7 @@ public class RequestTests extends ESTestCase {
|
|||
getAndExistsTest(Request::get, "GET");
|
||||
}
|
||||
|
||||
public void testDelete() throws IOException {
|
||||
public void testDelete() {
|
||||
String index = randomAlphaOfLengthBetween(3, 10);
|
||||
String type = randomAlphaOfLengthBetween(3, 10);
|
||||
String id = randomAlphaOfLengthBetween(3, 10);
|
||||
|
@ -145,9 +156,9 @@ public class RequestTests extends ESTestCase {
|
|||
Map<String, String> expectedParams = new HashMap<>();
|
||||
|
||||
setRandomTimeout(deleteRequest::timeout, ReplicationRequest.DEFAULT_TIMEOUT, expectedParams);
|
||||
setRandomRefreshPolicy(deleteRequest, expectedParams);
|
||||
setRandomRefreshPolicy(deleteRequest::setRefreshPolicy, expectedParams);
|
||||
setRandomVersion(deleteRequest, expectedParams);
|
||||
setRandomVersionType(deleteRequest, expectedParams);
|
||||
setRandomVersionType(deleteRequest::versionType, expectedParams);
|
||||
|
||||
if (frequently()) {
|
||||
if (randomBoolean()) {
|
||||
|
@ -212,27 +223,13 @@ public class RequestTests extends ESTestCase {
|
|||
expectedParams.put("version", Long.toString(version));
|
||||
}
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
VersionType versionType = randomFrom(VersionType.values());
|
||||
getRequest.versionType(versionType);
|
||||
if (versionType != VersionType.INTERNAL) {
|
||||
expectedParams.put("version_type", versionType.name().toLowerCase(Locale.ROOT));
|
||||
}
|
||||
}
|
||||
setRandomVersionType(getRequest::versionType, expectedParams);
|
||||
if (randomBoolean()) {
|
||||
int numStoredFields = randomIntBetween(1, 10);
|
||||
String[] storedFields = new String[numStoredFields];
|
||||
StringBuilder storedFieldsParam = new StringBuilder();
|
||||
for (int i = 0; i < numStoredFields; i++) {
|
||||
String storedField = randomAlphaOfLengthBetween(3, 10);
|
||||
storedFields[i] = storedField;
|
||||
storedFieldsParam.append(storedField);
|
||||
if (i < numStoredFields - 1) {
|
||||
storedFieldsParam.append(",");
|
||||
}
|
||||
}
|
||||
String storedFieldsParam = randomFields(storedFields);
|
||||
getRequest.storedFields(storedFields);
|
||||
expectedParams.put("stored_fields", storedFieldsParam.toString());
|
||||
expectedParams.put("stored_fields", storedFieldsParam);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
randomizeFetchSourceContextParams(getRequest::fetchSourceContext, expectedParams);
|
||||
|
@ -245,7 +242,35 @@ public class RequestTests extends ESTestCase {
|
|||
assertEquals(method, request.getMethod());
|
||||
}
|
||||
|
||||
public void testDeleteIndex() throws IOException {
|
||||
public void testCreateIndex() throws IOException {
|
||||
CreateIndexRequest createIndexRequest = new CreateIndexRequest();
|
||||
|
||||
String indexName = "index-" + randomAlphaOfLengthBetween(2, 5);
|
||||
|
||||
createIndexRequest.index(indexName);
|
||||
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
|
||||
setRandomTimeout(createIndexRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams);
|
||||
setRandomMasterTimeout(createIndexRequest, expectedParams);
|
||||
setRandomWaitForActiveShards(createIndexRequest::waitForActiveShards, expectedParams);
|
||||
|
||||
if (randomBoolean()) {
|
||||
boolean updateAllTypes = randomBoolean();
|
||||
createIndexRequest.updateAllTypes(updateAllTypes);
|
||||
if (updateAllTypes) {
|
||||
expectedParams.put("update_all_types", Boolean.TRUE.toString());
|
||||
}
|
||||
}
|
||||
|
||||
Request request = Request.createIndex(createIndexRequest);
|
||||
assertEquals("/" + indexName, request.getEndpoint());
|
||||
assertEquals(expectedParams, request.getParameters());
|
||||
assertEquals("PUT", request.getMethod());
|
||||
assertToXContentBody(createIndexRequest, request.getEntity());
|
||||
}
|
||||
|
||||
public void testDeleteIndex() {
|
||||
DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest();
|
||||
|
||||
int numIndices = randomIntBetween(0, 5);
|
||||
|
@ -269,6 +294,29 @@ public class RequestTests extends ESTestCase {
|
|||
assertNull(request.getEntity());
|
||||
}
|
||||
|
||||
public void testOpenIndex() {
|
||||
OpenIndexRequest openIndexRequest = new OpenIndexRequest();
|
||||
int numIndices = randomIntBetween(1, 5);
|
||||
String[] indices = new String[numIndices];
|
||||
for (int i = 0; i < numIndices; i++) {
|
||||
indices[i] = "index-" + randomAlphaOfLengthBetween(2, 5);
|
||||
}
|
||||
openIndexRequest.indices(indices);
|
||||
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
setRandomTimeout(openIndexRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams);
|
||||
setRandomMasterTimeout(openIndexRequest, expectedParams);
|
||||
setRandomIndicesOptions(openIndexRequest::indicesOptions, openIndexRequest::indicesOptions, expectedParams);
|
||||
setRandomWaitForActiveShards(openIndexRequest::waitForActiveShards, expectedParams);
|
||||
|
||||
Request request = Request.openIndex(openIndexRequest);
|
||||
StringJoiner endpoint = new StringJoiner("/", "/", "").add(String.join(",", indices)).add("_open");
|
||||
assertThat(endpoint.toString(), equalTo(request.getEndpoint()));
|
||||
assertThat(expectedParams, equalTo(request.getParameters()));
|
||||
assertThat(request.getMethod(), equalTo("POST"));
|
||||
assertThat(request.getEntity(), nullValue());
|
||||
}
|
||||
|
||||
public void testIndex() throws IOException {
|
||||
String index = randomAlphaOfLengthBetween(3, 10);
|
||||
String type = randomAlphaOfLengthBetween(3, 10);
|
||||
|
@ -288,7 +336,7 @@ public class RequestTests extends ESTestCase {
|
|||
}
|
||||
|
||||
setRandomTimeout(indexRequest::timeout, ReplicationRequest.DEFAULT_TIMEOUT, expectedParams);
|
||||
setRandomRefreshPolicy(indexRequest, expectedParams);
|
||||
setRandomRefreshPolicy(indexRequest::setRefreshPolicy, expectedParams);
|
||||
|
||||
// There is some logic around _create endpoint and version/version type
|
||||
if (indexRequest.opType() == DocWriteRequest.OpType.CREATE) {
|
||||
|
@ -296,7 +344,7 @@ public class RequestTests extends ESTestCase {
|
|||
expectedParams.put("version", Long.toString(Versions.MATCH_DELETED));
|
||||
} else {
|
||||
setRandomVersion(indexRequest, expectedParams);
|
||||
setRandomVersionType(indexRequest, expectedParams);
|
||||
setRandomVersionType(indexRequest::versionType, expectedParams);
|
||||
}
|
||||
|
||||
if (frequently()) {
|
||||
|
@ -399,25 +447,9 @@ public class RequestTests extends ESTestCase {
|
|||
expectedParams.put("refresh", refreshPolicy.getValue());
|
||||
}
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
int waitForActiveShards = randomIntBetween(0, 10);
|
||||
updateRequest.waitForActiveShards(waitForActiveShards);
|
||||
expectedParams.put("wait_for_active_shards", String.valueOf(waitForActiveShards));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
long version = randomLong();
|
||||
updateRequest.version(version);
|
||||
if (version != Versions.MATCH_ANY) {
|
||||
expectedParams.put("version", Long.toString(version));
|
||||
}
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
VersionType versionType = randomFrom(VersionType.values());
|
||||
updateRequest.versionType(versionType);
|
||||
if (versionType != VersionType.INTERNAL) {
|
||||
expectedParams.put("version_type", versionType.name().toLowerCase(Locale.ROOT));
|
||||
}
|
||||
}
|
||||
setRandomWaitForActiveShards(updateRequest::waitForActiveShards, expectedParams);
|
||||
setRandomVersion(updateRequest, expectedParams);
|
||||
setRandomVersionType(updateRequest::versionType, expectedParams);
|
||||
if (randomBoolean()) {
|
||||
int retryOnConflict = randomIntBetween(0, 5);
|
||||
updateRequest.retryOnConflict(retryOnConflict);
|
||||
|
@ -461,7 +493,7 @@ public class RequestTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testUpdateWithDifferentContentTypes() throws IOException {
|
||||
public void testUpdateWithDifferentContentTypes() {
|
||||
IllegalStateException exception = expectThrows(IllegalStateException.class, () -> {
|
||||
UpdateRequest updateRequest = new UpdateRequest();
|
||||
updateRequest.doc(new IndexRequest().source(singletonMap("field", "doc"), XContentType.JSON));
|
||||
|
@ -484,13 +516,7 @@ public class RequestTests extends ESTestCase {
|
|||
expectedParams.put("timeout", BulkShardRequest.DEFAULT_TIMEOUT.getStringRep());
|
||||
}
|
||||
|
||||
if (randomBoolean()) {
|
||||
WriteRequest.RefreshPolicy refreshPolicy = randomFrom(WriteRequest.RefreshPolicy.values());
|
||||
bulkRequest.setRefreshPolicy(refreshPolicy);
|
||||
if (refreshPolicy != WriteRequest.RefreshPolicy.NONE) {
|
||||
expectedParams.put("refresh", refreshPolicy.getValue());
|
||||
}
|
||||
}
|
||||
setRandomRefreshPolicy(bulkRequest::setRefreshPolicy, expectedParams);
|
||||
|
||||
XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE);
|
||||
|
||||
|
@ -503,7 +529,7 @@ public class RequestTests extends ESTestCase {
|
|||
BytesReference source = RandomObjects.randomSource(random(), xContentType);
|
||||
DocWriteRequest.OpType opType = randomFrom(DocWriteRequest.OpType.values());
|
||||
|
||||
DocWriteRequest<?> docWriteRequest = null;
|
||||
DocWriteRequest<?> docWriteRequest;
|
||||
if (opType == DocWriteRequest.OpType.INDEX) {
|
||||
IndexRequest indexRequest = new IndexRequest(index, type, id).source(source, xContentType);
|
||||
docWriteRequest = indexRequest;
|
||||
|
@ -533,6 +559,8 @@ public class RequestTests extends ESTestCase {
|
|||
}
|
||||
} else if (opType == DocWriteRequest.OpType.DELETE) {
|
||||
docWriteRequest = new DeleteRequest(index, type, id);
|
||||
} else {
|
||||
throw new UnsupportedOperationException("optype [" + opType + "] not supported");
|
||||
}
|
||||
|
||||
if (randomBoolean()) {
|
||||
|
@ -771,6 +799,55 @@ public class RequestTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testMultiSearch() throws IOException {
|
||||
int numberOfSearchRequests = randomIntBetween(0, 32);
|
||||
MultiSearchRequest multiSearchRequest = new MultiSearchRequest();
|
||||
for (int i = 0; i < numberOfSearchRequests; i++) {
|
||||
SearchRequest searchRequest = randomSearchRequest(() -> {
|
||||
// No need to return a very complex SearchSourceBuilder here, that is tested elsewhere
|
||||
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
|
||||
searchSourceBuilder.from(randomInt(10));
|
||||
searchSourceBuilder.size(randomIntBetween(20, 100));
|
||||
return searchSourceBuilder;
|
||||
});
|
||||
// scroll is not supported in the current msearch api, so unset it:
|
||||
searchRequest.scroll((Scroll) null);
|
||||
// only expand_wildcards, ignore_unavailable and allow_no_indices can be specified from msearch api, so unset other options:
|
||||
IndicesOptions randomlyGenerated = searchRequest.indicesOptions();
|
||||
IndicesOptions msearchDefault = new MultiSearchRequest().indicesOptions();
|
||||
searchRequest.indicesOptions(IndicesOptions.fromOptions(
|
||||
randomlyGenerated.ignoreUnavailable(), randomlyGenerated.allowNoIndices(), randomlyGenerated.expandWildcardsOpen(),
|
||||
randomlyGenerated.expandWildcardsClosed(), msearchDefault.allowAliasesToMultipleIndices(),
|
||||
msearchDefault.forbidClosedIndices(), msearchDefault.ignoreAliases()
|
||||
));
|
||||
multiSearchRequest.add(searchRequest);
|
||||
}
|
||||
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
expectedParams.put(RestSearchAction.TYPED_KEYS_PARAM, "true");
|
||||
if (randomBoolean()) {
|
||||
multiSearchRequest.maxConcurrentSearchRequests(randomIntBetween(1, 8));
|
||||
expectedParams.put("max_concurrent_searches", Integer.toString(multiSearchRequest.maxConcurrentSearchRequests()));
|
||||
}
|
||||
|
||||
Request request = Request.multiSearch(multiSearchRequest);
|
||||
assertEquals("/_msearch", request.getEndpoint());
|
||||
assertEquals(expectedParams, request.getParameters());
|
||||
|
||||
List<SearchRequest> requests = new ArrayList<>();
|
||||
CheckedBiConsumer<SearchRequest, XContentParser, IOException> consumer = (searchRequest, p) -> {
|
||||
SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.fromXContent(p);
|
||||
if (searchSourceBuilder.equals(new SearchSourceBuilder()) == false) {
|
||||
searchRequest.source(searchSourceBuilder);
|
||||
}
|
||||
requests.add(searchRequest);
|
||||
};
|
||||
MultiSearchRequest.readMultiLineFormat(new BytesArray(EntityUtils.toByteArray(request.getEntity())),
|
||||
REQUEST_BODY_CONTENT_TYPE.xContent(), consumer, null, multiSearchRequest.indicesOptions(), null, null,
|
||||
null, xContentRegistry(), true);
|
||||
assertEquals(requests, multiSearchRequest.requests());
|
||||
}
|
||||
|
||||
public void testSearchScroll() throws IOException {
|
||||
SearchScrollRequest searchScrollRequest = new SearchScrollRequest();
|
||||
searchScrollRequest.scrollId(randomAlphaOfLengthBetween(5, 10));
|
||||
|
@ -782,7 +859,7 @@ public class RequestTests extends ESTestCase {
|
|||
assertEquals("/_search/scroll", request.getEndpoint());
|
||||
assertEquals(0, request.getParameters().size());
|
||||
assertToXContentBody(searchScrollRequest, request.getEntity());
|
||||
assertEquals(Request.REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue());
|
||||
assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue());
|
||||
}
|
||||
|
||||
public void testClearScroll() throws IOException {
|
||||
|
@ -796,11 +873,11 @@ public class RequestTests extends ESTestCase {
|
|||
assertEquals("/_search/scroll", request.getEndpoint());
|
||||
assertEquals(0, request.getParameters().size());
|
||||
assertToXContentBody(clearScrollRequest, request.getEntity());
|
||||
assertEquals(Request.REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue());
|
||||
assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue());
|
||||
}
|
||||
|
||||
private static void assertToXContentBody(ToXContent expectedBody, HttpEntity actualEntity) throws IOException {
|
||||
BytesReference expectedBytes = XContentHelper.toXContent(expectedBody, Request.REQUEST_BODY_CONTENT_TYPE, false);
|
||||
BytesReference expectedBytes = XContentHelper.toXContent(expectedBody, REQUEST_BODY_CONTENT_TYPE, false);
|
||||
assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), actualEntity.getContentType().getValue());
|
||||
assertEquals(expectedBytes, new BytesArray(EntityUtils.toByteArray(actualEntity)));
|
||||
}
|
||||
|
@ -888,31 +965,15 @@ public class RequestTests extends ESTestCase {
|
|||
} else {
|
||||
int numIncludes = randomIntBetween(0, 5);
|
||||
String[] includes = new String[numIncludes];
|
||||
StringBuilder includesParam = new StringBuilder();
|
||||
for (int i = 0; i < numIncludes; i++) {
|
||||
String include = randomAlphaOfLengthBetween(3, 10);
|
||||
includes[i] = include;
|
||||
includesParam.append(include);
|
||||
if (i < numIncludes - 1) {
|
||||
includesParam.append(",");
|
||||
}
|
||||
}
|
||||
String includesParam = randomFields(includes);
|
||||
if (numIncludes > 0) {
|
||||
expectedParams.put("_source_include", includesParam.toString());
|
||||
expectedParams.put("_source_include", includesParam);
|
||||
}
|
||||
int numExcludes = randomIntBetween(0, 5);
|
||||
String[] excludes = new String[numExcludes];
|
||||
StringBuilder excludesParam = new StringBuilder();
|
||||
for (int i = 0; i < numExcludes; i++) {
|
||||
String exclude = randomAlphaOfLengthBetween(3, 10);
|
||||
excludes[i] = exclude;
|
||||
excludesParam.append(exclude);
|
||||
if (i < numExcludes - 1) {
|
||||
excludesParam.append(",");
|
||||
}
|
||||
}
|
||||
String excludesParam = randomFields(excludes);
|
||||
if (numExcludes > 0) {
|
||||
expectedParams.put("_source_exclude", excludesParam.toString());
|
||||
expectedParams.put("_source_exclude", excludesParam);
|
||||
}
|
||||
consumer.accept(new FetchSourceContext(true, includes, excludes));
|
||||
}
|
||||
|
@ -959,10 +1020,24 @@ public class RequestTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private static void setRandomRefreshPolicy(ReplicatedWriteRequest<?> request, Map<String, String> expectedParams) {
|
||||
private static void setRandomWaitForActiveShards(Consumer<ActiveShardCount> setter, Map<String, String> expectedParams) {
|
||||
if (randomBoolean()) {
|
||||
String waitForActiveShardsString;
|
||||
int waitForActiveShards = randomIntBetween(-1, 5);
|
||||
if (waitForActiveShards == -1) {
|
||||
waitForActiveShardsString = "all";
|
||||
} else {
|
||||
waitForActiveShardsString = String.valueOf(waitForActiveShards);
|
||||
}
|
||||
setter.accept(ActiveShardCount.parseString(waitForActiveShardsString));
|
||||
expectedParams.put("wait_for_active_shards", waitForActiveShardsString);
|
||||
}
|
||||
}
|
||||
|
||||
private static void setRandomRefreshPolicy(Consumer<WriteRequest.RefreshPolicy> setter, Map<String, String> expectedParams) {
|
||||
if (randomBoolean()) {
|
||||
WriteRequest.RefreshPolicy refreshPolicy = randomFrom(WriteRequest.RefreshPolicy.values());
|
||||
request.setRefreshPolicy(refreshPolicy);
|
||||
setter.accept(refreshPolicy);
|
||||
if (refreshPolicy != WriteRequest.RefreshPolicy.NONE) {
|
||||
expectedParams.put("refresh", refreshPolicy.getValue());
|
||||
}
|
||||
|
@ -979,13 +1054,26 @@ public class RequestTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private static void setRandomVersionType(DocWriteRequest<?> request, Map<String, String> expectedParams) {
|
||||
private static void setRandomVersionType(Consumer<VersionType> setter, Map<String, String> expectedParams) {
|
||||
if (randomBoolean()) {
|
||||
VersionType versionType = randomFrom(VersionType.values());
|
||||
request.versionType(versionType);
|
||||
setter.accept(versionType);
|
||||
if (versionType != VersionType.INTERNAL) {
|
||||
expectedParams.put("version_type", versionType.name().toLowerCase(Locale.ROOT));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static String randomFields(String[] fields) {
|
||||
StringBuilder excludesParam = new StringBuilder();
|
||||
for (int i = 0; i < fields.length; i++) {
|
||||
String exclude = randomAlphaOfLengthBetween(3, 10);
|
||||
fields[i] = exclude;
|
||||
excludesParam.append(exclude);
|
||||
if (i < fields.length - 1) {
|
||||
excludesParam.append(",");
|
||||
}
|
||||
}
|
||||
return excludesParam.toString();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,20 +21,6 @@ package org.elasticsearch.client;
|
|||
|
||||
import com.fasterxml.jackson.core.JsonParseException;
|
||||
|
||||
import org.elasticsearch.Build;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.main.MainRequest;
|
||||
import org.elasticsearch.action.main.MainResponse;
|
||||
import org.elasticsearch.action.search.ClearScrollRequest;
|
||||
import org.elasticsearch.action.search.ClearScrollResponse;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.SearchResponseSections;
|
||||
import org.elasticsearch.action.search.SearchScrollRequest;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpHost;
|
||||
|
@ -49,6 +35,20 @@ import org.apache.http.message.BasicHttpResponse;
|
|||
import org.apache.http.message.BasicRequestLine;
|
||||
import org.apache.http.message.BasicStatusLine;
|
||||
import org.apache.http.nio.entity.NStringEntity;
|
||||
import org.elasticsearch.Build;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.main.MainRequest;
|
||||
import org.elasticsearch.action.main.MainResponse;
|
||||
import org.elasticsearch.action.search.ClearScrollRequest;
|
||||
import org.elasticsearch.action.search.ClearScrollResponse;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.SearchResponseSections;
|
||||
import org.elasticsearch.action.search.SearchScrollRequest;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.common.CheckedFunction;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
|
@ -57,6 +57,10 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.cbor.CborXContent;
|
||||
import org.elasticsearch.common.xcontent.smile.SmileXContent;
|
||||
import org.elasticsearch.index.rankeval.DiscountedCumulativeGain;
|
||||
import org.elasticsearch.index.rankeval.EvaluationMetric;
|
||||
import org.elasticsearch.index.rankeval.MeanReciprocalRank;
|
||||
import org.elasticsearch.index.rankeval.PrecisionAtK;
|
||||
import org.elasticsearch.join.aggregations.ChildrenAggregationBuilder;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.search.SearchHits;
|
||||
|
@ -648,7 +652,7 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
|
||||
public void testProvidedNamedXContents() {
|
||||
List<NamedXContentRegistry.Entry> namedXContents = RestHighLevelClient.getProvidedNamedXContents();
|
||||
assertEquals(2, namedXContents.size());
|
||||
assertEquals(5, namedXContents.size());
|
||||
Map<Class<?>, Integer> categories = new HashMap<>();
|
||||
List<String> names = new ArrayList<>();
|
||||
for (NamedXContentRegistry.Entry namedXContent : namedXContents) {
|
||||
|
@ -658,10 +662,14 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
categories.put(namedXContent.categoryClass, counter + 1);
|
||||
}
|
||||
}
|
||||
assertEquals(1, categories.size());
|
||||
assertEquals(2, categories.size());
|
||||
assertEquals(Integer.valueOf(2), categories.get(Aggregation.class));
|
||||
assertTrue(names.contains(ChildrenAggregationBuilder.NAME));
|
||||
assertTrue(names.contains(MatrixStatsAggregationBuilder.NAME));
|
||||
assertEquals(Integer.valueOf(3), categories.get(EvaluationMetric.class));
|
||||
assertTrue(names.contains(PrecisionAtK.NAME));
|
||||
assertTrue(names.contains(DiscountedCumulativeGain.NAME));
|
||||
assertTrue(names.contains(MeanReciprocalRank.NAME));
|
||||
}
|
||||
|
||||
private static class TrackingActionListener implements ActionListener<Integer> {
|
||||
|
|
|
@ -23,20 +23,30 @@ import org.apache.http.HttpEntity;
|
|||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.nio.entity.NStringEntity;
|
||||
import org.apache.lucene.search.join.ScoreMode;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ElasticsearchStatusException;
|
||||
import org.elasticsearch.action.search.ClearScrollRequest;
|
||||
import org.elasticsearch.action.search.ClearScrollResponse;
|
||||
import org.elasticsearch.action.search.MultiSearchRequest;
|
||||
import org.elasticsearch.action.search.MultiSearchResponse;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.SearchScrollRequest;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.query.MatchAllQueryBuilder;
|
||||
import org.elasticsearch.index.query.MatchQueryBuilder;
|
||||
import org.elasticsearch.index.query.NestedQueryBuilder;
|
||||
import org.elasticsearch.index.query.ScriptQueryBuilder;
|
||||
import org.elasticsearch.index.query.TermsQueryBuilder;
|
||||
import org.elasticsearch.join.aggregations.Children;
|
||||
import org.elasticsearch.join.aggregations.ChildrenAggregationBuilder;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptType;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.aggregations.BucketOrder;
|
||||
import org.elasticsearch.search.aggregations.bucket.range.Range;
|
||||
import org.elasticsearch.search.aggregations.bucket.range.RangeAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
|
||||
|
@ -45,10 +55,12 @@ import org.elasticsearch.search.aggregations.matrix.stats.MatrixStats;
|
|||
import org.elasticsearch.search.aggregations.matrix.stats.MatrixStatsAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.support.ValueType;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
import org.elasticsearch.search.suggest.Suggest;
|
||||
import org.elasticsearch.search.suggest.SuggestBuilder;
|
||||
import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder;
|
||||
import org.hamcrest.Matchers;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -64,6 +76,7 @@ import static org.hamcrest.Matchers.greaterThan;
|
|||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.lessThan;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
||||
public class SearchIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
|
@ -80,10 +93,24 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
|||
StringEntity doc5 = new StringEntity("{\"type\":\"type2\", \"num\":100, \"num2\":10}", ContentType.APPLICATION_JSON);
|
||||
client().performRequest("PUT", "/index/type/5", Collections.emptyMap(), doc5);
|
||||
client().performRequest("POST", "/index/_refresh");
|
||||
|
||||
StringEntity doc = new StringEntity("{\"field\":\"value1\"}", ContentType.APPLICATION_JSON);
|
||||
client().performRequest("PUT", "/index1/doc/1", Collections.emptyMap(), doc);
|
||||
doc = new StringEntity("{\"field\":\"value2\"}", ContentType.APPLICATION_JSON);
|
||||
client().performRequest("PUT", "/index1/doc/2", Collections.emptyMap(), doc);
|
||||
doc = new StringEntity("{\"field\":\"value1\"}", ContentType.APPLICATION_JSON);
|
||||
client().performRequest("PUT", "/index2/doc/3", Collections.emptyMap(), doc);
|
||||
doc = new StringEntity("{\"field\":\"value2\"}", ContentType.APPLICATION_JSON);
|
||||
client().performRequest("PUT", "/index2/doc/4", Collections.emptyMap(), doc);
|
||||
doc = new StringEntity("{\"field\":\"value1\"}", ContentType.APPLICATION_JSON);
|
||||
client().performRequest("PUT", "/index3/doc/5", Collections.emptyMap(), doc);
|
||||
doc = new StringEntity("{\"field\":\"value2\"}", ContentType.APPLICATION_JSON);
|
||||
client().performRequest("PUT", "/index3/doc/6", Collections.emptyMap(), doc);
|
||||
client().performRequest("POST", "/index1,index2,index3/_refresh");
|
||||
}
|
||||
|
||||
public void testSearchNoQuery() throws IOException {
|
||||
SearchRequest searchRequest = new SearchRequest();
|
||||
SearchRequest searchRequest = new SearchRequest("index");
|
||||
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync);
|
||||
assertSearchHeader(searchResponse);
|
||||
assertNull(searchResponse.getAggregations());
|
||||
|
@ -106,7 +133,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
|
||||
public void testSearchMatchQuery() throws IOException {
|
||||
SearchRequest searchRequest = new SearchRequest();
|
||||
SearchRequest searchRequest = new SearchRequest("index");
|
||||
searchRequest.source(new SearchSourceBuilder().query(new MatchQueryBuilder("num", 10)));
|
||||
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync);
|
||||
assertSearchHeader(searchResponse);
|
||||
|
@ -164,7 +191,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
|||
assertEquals(RestStatus.BAD_REQUEST, exception.status());
|
||||
}
|
||||
|
||||
SearchRequest searchRequest = new SearchRequest();
|
||||
SearchRequest searchRequest = new SearchRequest("index");
|
||||
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
|
||||
searchSourceBuilder.aggregation(new RangeAggregationBuilder("agg1").field("num")
|
||||
.addRange("first", 0, 30).addRange("second", 31, 200));
|
||||
|
@ -193,7 +220,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
|
||||
public void testSearchWithTermsAndRangeAgg() throws IOException {
|
||||
SearchRequest searchRequest = new SearchRequest();
|
||||
SearchRequest searchRequest = new SearchRequest("index");
|
||||
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
|
||||
TermsAggregationBuilder agg = new TermsAggregationBuilder("agg1", ValueType.STRING).field("type.keyword");
|
||||
agg.subAggregation(new RangeAggregationBuilder("subagg").field("num")
|
||||
|
@ -247,7 +274,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
|
||||
public void testSearchWithMatrixStats() throws IOException {
|
||||
SearchRequest searchRequest = new SearchRequest();
|
||||
SearchRequest searchRequest = new SearchRequest("index");
|
||||
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
|
||||
searchSourceBuilder.aggregation(new MatrixStatsAggregationBuilder("agg1").fields(Arrays.asList("num", "num2")));
|
||||
searchSourceBuilder.size(0);
|
||||
|
@ -374,7 +401,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
|
||||
public void testSearchWithSuggest() throws IOException {
|
||||
SearchRequest searchRequest = new SearchRequest();
|
||||
SearchRequest searchRequest = new SearchRequest("index");
|
||||
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
|
||||
searchSourceBuilder.suggest(new SuggestBuilder().addSuggestion("sugg1", new PhraseSuggestionBuilder("type"))
|
||||
.setGlobalText("type"));
|
||||
|
@ -464,6 +491,185 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testMultiSearch() throws Exception {
|
||||
MultiSearchRequest multiSearchRequest = new MultiSearchRequest();
|
||||
SearchRequest searchRequest1 = new SearchRequest("index1");
|
||||
searchRequest1.source().sort("_id", SortOrder.ASC);
|
||||
multiSearchRequest.add(searchRequest1);
|
||||
SearchRequest searchRequest2 = new SearchRequest("index2");
|
||||
searchRequest2.source().sort("_id", SortOrder.ASC);
|
||||
multiSearchRequest.add(searchRequest2);
|
||||
SearchRequest searchRequest3 = new SearchRequest("index3");
|
||||
searchRequest3.source().sort("_id", SortOrder.ASC);
|
||||
multiSearchRequest.add(searchRequest3);
|
||||
|
||||
MultiSearchResponse multiSearchResponse =
|
||||
execute(multiSearchRequest, highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync);
|
||||
assertThat(multiSearchResponse.getTook().millis(), Matchers.greaterThanOrEqualTo(0L));
|
||||
assertThat(multiSearchResponse.getResponses().length, Matchers.equalTo(3));
|
||||
|
||||
assertThat(multiSearchResponse.getResponses()[0].getFailure(), Matchers.nullValue());
|
||||
assertThat(multiSearchResponse.getResponses()[0].isFailure(), Matchers.is(false));
|
||||
SearchIT.assertSearchHeader(multiSearchResponse.getResponses()[0].getResponse());
|
||||
assertThat(multiSearchResponse.getResponses()[0].getResponse().getHits().getTotalHits(), Matchers.equalTo(2L));
|
||||
assertThat(multiSearchResponse.getResponses()[0].getResponse().getHits().getAt(0).getId(), Matchers.equalTo("1"));
|
||||
assertThat(multiSearchResponse.getResponses()[0].getResponse().getHits().getAt(1).getId(), Matchers.equalTo("2"));
|
||||
|
||||
assertThat(multiSearchResponse.getResponses()[1].getFailure(), Matchers.nullValue());
|
||||
assertThat(multiSearchResponse.getResponses()[1].isFailure(), Matchers.is(false));
|
||||
SearchIT.assertSearchHeader(multiSearchResponse.getResponses()[1].getResponse());
|
||||
assertThat(multiSearchResponse.getResponses()[1].getResponse().getHits().getTotalHits(), Matchers.equalTo(2L));
|
||||
assertThat(multiSearchResponse.getResponses()[1].getResponse().getHits().getAt(0).getId(), Matchers.equalTo("3"));
|
||||
assertThat(multiSearchResponse.getResponses()[1].getResponse().getHits().getAt(1).getId(), Matchers.equalTo("4"));
|
||||
|
||||
assertThat(multiSearchResponse.getResponses()[2].getFailure(), Matchers.nullValue());
|
||||
assertThat(multiSearchResponse.getResponses()[2].isFailure(), Matchers.is(false));
|
||||
SearchIT.assertSearchHeader(multiSearchResponse.getResponses()[2].getResponse());
|
||||
assertThat(multiSearchResponse.getResponses()[2].getResponse().getHits().getTotalHits(), Matchers.equalTo(2L));
|
||||
assertThat(multiSearchResponse.getResponses()[2].getResponse().getHits().getAt(0).getId(), Matchers.equalTo("5"));
|
||||
assertThat(multiSearchResponse.getResponses()[2].getResponse().getHits().getAt(1).getId(), Matchers.equalTo("6"));
|
||||
}
|
||||
|
||||
public void testMultiSearch_withAgg() throws Exception {
|
||||
MultiSearchRequest multiSearchRequest = new MultiSearchRequest();
|
||||
SearchRequest searchRequest1 = new SearchRequest("index1");
|
||||
searchRequest1.source().size(0).aggregation(new TermsAggregationBuilder("name", ValueType.STRING).field("field.keyword")
|
||||
.order(BucketOrder.key(true)));
|
||||
multiSearchRequest.add(searchRequest1);
|
||||
SearchRequest searchRequest2 = new SearchRequest("index2");
|
||||
searchRequest2.source().size(0).aggregation(new TermsAggregationBuilder("name", ValueType.STRING).field("field.keyword")
|
||||
.order(BucketOrder.key(true)));
|
||||
multiSearchRequest.add(searchRequest2);
|
||||
SearchRequest searchRequest3 = new SearchRequest("index3");
|
||||
searchRequest3.source().size(0).aggregation(new TermsAggregationBuilder("name", ValueType.STRING).field("field.keyword")
|
||||
.order(BucketOrder.key(true)));
|
||||
multiSearchRequest.add(searchRequest3);
|
||||
|
||||
MultiSearchResponse multiSearchResponse =
|
||||
execute(multiSearchRequest, highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync);
|
||||
assertThat(multiSearchResponse.getTook().millis(), Matchers.greaterThanOrEqualTo(0L));
|
||||
assertThat(multiSearchResponse.getResponses().length, Matchers.equalTo(3));
|
||||
|
||||
assertThat(multiSearchResponse.getResponses()[0].getFailure(), Matchers.nullValue());
|
||||
assertThat(multiSearchResponse.getResponses()[0].isFailure(), Matchers.is(false));
|
||||
SearchIT.assertSearchHeader(multiSearchResponse.getResponses()[0].getResponse());
|
||||
assertThat(multiSearchResponse.getResponses()[0].getResponse().getHits().getTotalHits(), Matchers.equalTo(2L));
|
||||
assertThat(multiSearchResponse.getResponses()[0].getResponse().getHits().getHits().length, Matchers.equalTo(0));
|
||||
Terms terms = multiSearchResponse.getResponses()[0].getResponse().getAggregations().get("name");
|
||||
assertThat(terms.getBuckets().size(), Matchers.equalTo(2));
|
||||
assertThat(terms.getBuckets().get(0).getKeyAsString(), Matchers.equalTo("value1"));
|
||||
assertThat(terms.getBuckets().get(1).getKeyAsString(), Matchers.equalTo("value2"));
|
||||
|
||||
assertThat(multiSearchResponse.getResponses()[1].getFailure(), Matchers.nullValue());
|
||||
assertThat(multiSearchResponse.getResponses()[1].isFailure(), Matchers.is(false));
|
||||
SearchIT.assertSearchHeader(multiSearchResponse.getResponses()[0].getResponse());
|
||||
assertThat(multiSearchResponse.getResponses()[1].getResponse().getHits().getTotalHits(), Matchers.equalTo(2L));
|
||||
assertThat(multiSearchResponse.getResponses()[1].getResponse().getHits().getHits().length, Matchers.equalTo(0));
|
||||
terms = multiSearchResponse.getResponses()[1].getResponse().getAggregations().get("name");
|
||||
assertThat(terms.getBuckets().size(), Matchers.equalTo(2));
|
||||
assertThat(terms.getBuckets().get(0).getKeyAsString(), Matchers.equalTo("value1"));
|
||||
assertThat(terms.getBuckets().get(1).getKeyAsString(), Matchers.equalTo("value2"));
|
||||
|
||||
assertThat(multiSearchResponse.getResponses()[2].getFailure(), Matchers.nullValue());
|
||||
assertThat(multiSearchResponse.getResponses()[2].isFailure(), Matchers.is(false));
|
||||
SearchIT.assertSearchHeader(multiSearchResponse.getResponses()[0].getResponse());
|
||||
assertThat(multiSearchResponse.getResponses()[2].getResponse().getHits().getTotalHits(), Matchers.equalTo(2L));
|
||||
assertThat(multiSearchResponse.getResponses()[2].getResponse().getHits().getHits().length, Matchers.equalTo(0));
|
||||
terms = multiSearchResponse.getResponses()[2].getResponse().getAggregations().get("name");
|
||||
assertThat(terms.getBuckets().size(), Matchers.equalTo(2));
|
||||
assertThat(terms.getBuckets().get(0).getKeyAsString(), Matchers.equalTo("value1"));
|
||||
assertThat(terms.getBuckets().get(1).getKeyAsString(), Matchers.equalTo("value2"));
|
||||
}
|
||||
|
||||
public void testMultiSearch_withQuery() throws Exception {
|
||||
MultiSearchRequest multiSearchRequest = new MultiSearchRequest();
|
||||
SearchRequest searchRequest1 = new SearchRequest("index1");
|
||||
searchRequest1.source().query(new TermsQueryBuilder("field", "value2"));
|
||||
multiSearchRequest.add(searchRequest1);
|
||||
SearchRequest searchRequest2 = new SearchRequest("index2");
|
||||
searchRequest2.source().query(new TermsQueryBuilder("field", "value2"));
|
||||
multiSearchRequest.add(searchRequest2);
|
||||
SearchRequest searchRequest3 = new SearchRequest("index3");
|
||||
searchRequest3.source().query(new TermsQueryBuilder("field", "value2"));
|
||||
multiSearchRequest.add(searchRequest3);
|
||||
|
||||
MultiSearchResponse multiSearchResponse =
|
||||
execute(multiSearchRequest, highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync);
|
||||
assertThat(multiSearchResponse.getTook().millis(), Matchers.greaterThanOrEqualTo(0L));
|
||||
assertThat(multiSearchResponse.getResponses().length, Matchers.equalTo(3));
|
||||
|
||||
assertThat(multiSearchResponse.getResponses()[0].getFailure(), Matchers.nullValue());
|
||||
assertThat(multiSearchResponse.getResponses()[0].isFailure(), Matchers.is(false));
|
||||
SearchIT.assertSearchHeader(multiSearchResponse.getResponses()[0].getResponse());
|
||||
assertThat(multiSearchResponse.getResponses()[0].getResponse().getHits().getTotalHits(), Matchers.equalTo(1L));
|
||||
assertThat(multiSearchResponse.getResponses()[0].getResponse().getHits().getAt(0).getId(), Matchers.equalTo("2"));
|
||||
|
||||
assertThat(multiSearchResponse.getResponses()[1].getFailure(), Matchers.nullValue());
|
||||
assertThat(multiSearchResponse.getResponses()[1].isFailure(), Matchers.is(false));
|
||||
SearchIT.assertSearchHeader(multiSearchResponse.getResponses()[1].getResponse());
|
||||
assertThat(multiSearchResponse.getResponses()[1].getResponse().getHits().getTotalHits(), Matchers.equalTo(1L));
|
||||
assertThat(multiSearchResponse.getResponses()[1].getResponse().getHits().getAt(0).getId(), Matchers.equalTo("4"));
|
||||
|
||||
assertThat(multiSearchResponse.getResponses()[2].getFailure(), Matchers.nullValue());
|
||||
assertThat(multiSearchResponse.getResponses()[2].isFailure(), Matchers.is(false));
|
||||
SearchIT.assertSearchHeader(multiSearchResponse.getResponses()[2].getResponse());
|
||||
assertThat(multiSearchResponse.getResponses()[2].getResponse().getHits().getTotalHits(), Matchers.equalTo(1L));
|
||||
assertThat(multiSearchResponse.getResponses()[2].getResponse().getHits().getAt(0).getId(), Matchers.equalTo("6"));
|
||||
|
||||
searchRequest1.source().highlighter(new HighlightBuilder().field("field"));
|
||||
searchRequest2.source().highlighter(new HighlightBuilder().field("field"));
|
||||
searchRequest3.source().highlighter(new HighlightBuilder().field("field"));
|
||||
multiSearchResponse = execute(multiSearchRequest, highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync);
|
||||
assertThat(multiSearchResponse.getTook().millis(), Matchers.greaterThanOrEqualTo(0L));
|
||||
assertThat(multiSearchResponse.getResponses().length, Matchers.equalTo(3));
|
||||
|
||||
assertThat(multiSearchResponse.getResponses()[0].getFailure(), Matchers.nullValue());
|
||||
assertThat(multiSearchResponse.getResponses()[0].isFailure(), Matchers.is(false));
|
||||
SearchIT.assertSearchHeader(multiSearchResponse.getResponses()[0].getResponse());
|
||||
assertThat(multiSearchResponse.getResponses()[0].getResponse().getHits().getTotalHits(), Matchers.equalTo(1L));
|
||||
assertThat(multiSearchResponse.getResponses()[0].getResponse().getHits().getAt(0).getHighlightFields()
|
||||
.get("field").fragments()[0].string(), Matchers.equalTo("<em>value2</em>"));
|
||||
|
||||
assertThat(multiSearchResponse.getResponses()[1].getFailure(), Matchers.nullValue());
|
||||
assertThat(multiSearchResponse.getResponses()[1].isFailure(), Matchers.is(false));
|
||||
SearchIT.assertSearchHeader(multiSearchResponse.getResponses()[1].getResponse());
|
||||
assertThat(multiSearchResponse.getResponses()[1].getResponse().getHits().getTotalHits(), Matchers.equalTo(1L));
|
||||
assertThat(multiSearchResponse.getResponses()[1].getResponse().getHits().getAt(0).getId(), Matchers.equalTo("4"));
|
||||
assertThat(multiSearchResponse.getResponses()[1].getResponse().getHits().getAt(0).getHighlightFields()
|
||||
.get("field").fragments()[0].string(), Matchers.equalTo("<em>value2</em>"));
|
||||
|
||||
assertThat(multiSearchResponse.getResponses()[2].getFailure(), Matchers.nullValue());
|
||||
assertThat(multiSearchResponse.getResponses()[2].isFailure(), Matchers.is(false));
|
||||
SearchIT.assertSearchHeader(multiSearchResponse.getResponses()[2].getResponse());
|
||||
assertThat(multiSearchResponse.getResponses()[2].getResponse().getHits().getTotalHits(), Matchers.equalTo(1L));
|
||||
assertThat(multiSearchResponse.getResponses()[2].getResponse().getHits().getAt(0).getId(), Matchers.equalTo("6"));
|
||||
assertThat(multiSearchResponse.getResponses()[2].getResponse().getHits().getAt(0).getHighlightFields()
|
||||
.get("field").fragments()[0].string(), Matchers.equalTo("<em>value2</em>"));
|
||||
}
|
||||
|
||||
public void testMultiSearch_failure() throws Exception {
|
||||
MultiSearchRequest multiSearchRequest = new MultiSearchRequest();
|
||||
SearchRequest searchRequest1 = new SearchRequest("index1");
|
||||
searchRequest1.source().query(new ScriptQueryBuilder(new Script(ScriptType.INLINE, "invalid", "code", Collections.emptyMap())));
|
||||
multiSearchRequest.add(searchRequest1);
|
||||
SearchRequest searchRequest2 = new SearchRequest("index2");
|
||||
searchRequest2.source().query(new ScriptQueryBuilder(new Script(ScriptType.INLINE, "invalid", "code", Collections.emptyMap())));
|
||||
multiSearchRequest.add(searchRequest2);
|
||||
|
||||
MultiSearchResponse multiSearchResponse =
|
||||
execute(multiSearchRequest, highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync);
|
||||
assertThat(multiSearchResponse.getTook().millis(), Matchers.greaterThanOrEqualTo(0L));
|
||||
assertThat(multiSearchResponse.getResponses().length, Matchers.equalTo(2));
|
||||
|
||||
assertThat(multiSearchResponse.getResponses()[0].isFailure(), Matchers.is(true));
|
||||
assertThat(multiSearchResponse.getResponses()[0].getFailure().getMessage(), containsString("search_phase_execution_exception"));
|
||||
assertThat(multiSearchResponse.getResponses()[0].getResponse(), nullValue());
|
||||
|
||||
assertThat(multiSearchResponse.getResponses()[1].isFailure(), Matchers.is(true));
|
||||
assertThat(multiSearchResponse.getResponses()[1].getFailure().getMessage(), containsString("search_phase_execution_exception"));
|
||||
assertThat(multiSearchResponse.getResponses()[1].getResponse(), nullValue());
|
||||
}
|
||||
|
||||
private static void assertSearchHeader(SearchResponse searchResponse) {
|
||||
assertThat(searchResponse.getTook().nanos(), greaterThanOrEqualTo(0L));
|
||||
assertEquals(0, searchResponse.getFailedShards());
|
||||
|
|
|
@ -21,13 +21,18 @@ package org.elasticsearch.client.documentation;
|
|||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.alias.Alias;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -52,8 +57,8 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
Response createIndexResponse = client().performRequest("PUT", "/posts");
|
||||
assertEquals(200, createIndexResponse.getStatusLine().getStatusCode());
|
||||
CreateIndexResponse createIndexResponse = client.indices().createIndex(new CreateIndexRequest("posts"));
|
||||
assertTrue(createIndexResponse.isAcknowledged());
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -61,14 +66,26 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
DeleteIndexRequest request = new DeleteIndexRequest("posts"); // <1>
|
||||
// end::delete-index-request
|
||||
|
||||
// tag::delete-index-request-timeout
|
||||
request.timeout(TimeValue.timeValueMinutes(2)); // <1>
|
||||
request.timeout("2m"); // <2>
|
||||
// end::delete-index-request-timeout
|
||||
// tag::delete-index-request-masterTimeout
|
||||
request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
request.masterNodeTimeout("1m"); // <2>
|
||||
// end::delete-index-request-masterTimeout
|
||||
// tag::delete-index-request-indicesOptions
|
||||
request.indicesOptions(IndicesOptions.lenientExpandOpen()); // <1>
|
||||
// end::delete-index-request-indicesOptions
|
||||
|
||||
// tag::delete-index-execute
|
||||
DeleteIndexResponse deleteIndexResponse = client.indices().deleteIndex(request);
|
||||
// end::delete-index-execute
|
||||
assertTrue(deleteIndexResponse.isAcknowledged());
|
||||
|
||||
// tag::delete-index-response
|
||||
boolean acknowledged = deleteIndexResponse.isAcknowledged(); // <1>
|
||||
// end::delete-index-response
|
||||
assertTrue(acknowledged);
|
||||
|
||||
// tag::delete-index-execute-async
|
||||
client.indices().deleteIndexAsync(request, new ActionListener<DeleteIndexResponse>() {
|
||||
|
@ -85,26 +102,11 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
// end::delete-index-execute-async
|
||||
}
|
||||
|
||||
{
|
||||
DeleteIndexRequest request = new DeleteIndexRequest("posts");
|
||||
// tag::delete-index-request-timeout
|
||||
request.timeout(TimeValue.timeValueMinutes(2)); // <1>
|
||||
request.timeout("2m"); // <2>
|
||||
// end::delete-index-request-timeout
|
||||
// tag::delete-index-request-masterTimeout
|
||||
request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
request.timeout("1m"); // <2>
|
||||
// end::delete-index-request-masterTimeout
|
||||
// tag::delete-index-request-indicesOptions
|
||||
request.indicesOptions(IndicesOptions.lenientExpandOpen()); // <1>
|
||||
// end::delete-index-request-indicesOptions
|
||||
}
|
||||
|
||||
{
|
||||
// tag::delete-index-notfound
|
||||
try {
|
||||
DeleteIndexRequest request = new DeleteIndexRequest("does_not_exist");
|
||||
DeleteIndexResponse deleteIndexResponse = client.indices().deleteIndex(request);
|
||||
client.indices().deleteIndex(request);
|
||||
} catch (ElasticsearchException exception) {
|
||||
if (exception.status() == RestStatus.NOT_FOUND) {
|
||||
// <1>
|
||||
|
@ -113,4 +115,79 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
// end::delete-index-notfound
|
||||
}
|
||||
}
|
||||
|
||||
public void testCreateIndex() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
// tag::create-index-request
|
||||
CreateIndexRequest request = new CreateIndexRequest("twitter"); // <1>
|
||||
// end::create-index-request
|
||||
|
||||
// tag::create-index-request-settings
|
||||
request.settings(Settings.builder() // <1>
|
||||
.put("index.number_of_shards", 3)
|
||||
.put("index.number_of_replicas", 2)
|
||||
);
|
||||
// end::create-index-request-settings
|
||||
|
||||
// tag::create-index-request-mappings
|
||||
request.mapping("tweet", // <1>
|
||||
" {\n" +
|
||||
" \"tweet\": {\n" +
|
||||
" \"properties\": {\n" +
|
||||
" \"message\": {\n" +
|
||||
" \"type\": \"text\"\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" }", // <2>
|
||||
XContentType.JSON);
|
||||
// end::create-index-request-mappings
|
||||
|
||||
// tag::create-index-request-aliases
|
||||
request.alias(
|
||||
new Alias("twitter_alias") // <1>
|
||||
);
|
||||
// end::create-index-request-aliases
|
||||
|
||||
// tag::create-index-request-timeout
|
||||
request.timeout(TimeValue.timeValueMinutes(2)); // <1>
|
||||
request.timeout("2m"); // <2>
|
||||
// end::create-index-request-timeout
|
||||
// tag::create-index-request-masterTimeout
|
||||
request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
request.masterNodeTimeout("1m"); // <2>
|
||||
// end::create-index-request-masterTimeout
|
||||
// tag::create-index-request-waitForActiveShards
|
||||
request.waitForActiveShards(2); // <1>
|
||||
request.waitForActiveShards(ActiveShardCount.DEFAULT); // <2>
|
||||
// end::create-index-request-waitForActiveShards
|
||||
|
||||
// tag::create-index-execute
|
||||
CreateIndexResponse createIndexResponse = client.indices().createIndex(request);
|
||||
// end::create-index-execute
|
||||
|
||||
// tag::create-index-response
|
||||
boolean acknowledged = createIndexResponse.isAcknowledged(); // <1>
|
||||
boolean shardsAcked = createIndexResponse.isShardsAcked(); // <2>
|
||||
// end::create-index-response
|
||||
assertTrue(acknowledged);
|
||||
assertTrue(shardsAcked);
|
||||
|
||||
// tag::create-index-execute-async
|
||||
client.indices().createIndexAsync(request, new ActionListener<CreateIndexResponse>() {
|
||||
@Override
|
||||
public void onResponse(CreateIndexResponse createIndexResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
});
|
||||
// end::create-index-execute-async
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.http.nio.entity.NStringEntity;
|
|||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.delete.DeleteResponse;
|
||||
import org.elasticsearch.action.get.GetRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
|
@ -43,6 +44,7 @@ import java.io.InputStream;
|
|||
import java.util.Map;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.singletonMap;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
|
||||
|
||||
|
@ -67,7 +69,7 @@ public class MigrationDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
public void testCreateIndex() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
//tag::migration-create-inded
|
||||
//tag::migration-create-index
|
||||
Settings indexSettings = Settings.builder() // <1>
|
||||
.put(SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
|
@ -95,7 +97,7 @@ public class MigrationDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
if (response.getStatusLine().getStatusCode() != HttpStatus.SC_OK) {
|
||||
// <7>
|
||||
}
|
||||
//end::migration-create-inded
|
||||
//end::migration-create-index
|
||||
assertEquals(200, response.getStatusLine().getStatusCode());
|
||||
}
|
||||
}
|
||||
|
@ -104,7 +106,8 @@ public class MigrationDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
//tag::migration-cluster-health
|
||||
Response response = client.getLowLevelClient().performRequest("GET", "/_cluster/health"); // <1>
|
||||
Map<String, String> parameters = singletonMap("wait_for_status", "green");
|
||||
Response response = client.getLowLevelClient().performRequest("GET", "/_cluster/health", parameters); // <1>
|
||||
|
||||
ClusterHealthStatus healthStatus;
|
||||
try (InputStream is = response.getEntity().getContent()) { // <2>
|
||||
|
@ -120,7 +123,7 @@ public class MigrationDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testRequests() throws IOException {
|
||||
public void testRequests() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
//tag::migration-request-ctor
|
||||
|
@ -133,13 +136,6 @@ public class MigrationDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
//end::migration-request-ctor-execution
|
||||
assertEquals(RestStatus.CREATED, response.status());
|
||||
}
|
||||
{
|
||||
//tag::migration-request-sync-execution
|
||||
DeleteRequest request = new DeleteRequest("index", "doc", "id");
|
||||
DeleteResponse response = client.delete(request); // <1>
|
||||
//end::migration-request-sync-execution
|
||||
assertEquals(RestStatus.OK, response.status());
|
||||
}
|
||||
{
|
||||
//tag::migration-request-async-execution
|
||||
DeleteRequest request = new DeleteRequest("index", "doc", "id"); // <1>
|
||||
|
@ -155,6 +151,14 @@ public class MigrationDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
});
|
||||
//end::migration-request-async-execution
|
||||
assertBusy(() -> assertFalse(client.exists(new GetRequest("index", "doc", "id"))));
|
||||
}
|
||||
{
|
||||
//tag::migration-request-sync-execution
|
||||
DeleteRequest request = new DeleteRequest("index", "doc", "id");
|
||||
DeleteResponse response = client.delete(request); // <1>
|
||||
//end::migration-request-sync-execution
|
||||
assertEquals(RestStatus.NOT_FOUND, response.status());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,7 +28,9 @@ import org.apache.http.impl.nio.client.CloseableHttpAsyncClient;
|
|||
import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
|
||||
import org.apache.http.nio.conn.SchemeIOSessionStrategy;
|
||||
|
||||
import javax.net.ssl.SSLContext;
|
||||
import java.security.AccessController;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.security.PrivilegedAction;
|
||||
import java.util.Objects;
|
||||
|
||||
|
@ -200,9 +202,11 @@ public final class RestClientBuilder {
|
|||
requestConfigBuilder = requestConfigCallback.customizeRequestConfig(requestConfigBuilder);
|
||||
}
|
||||
|
||||
try {
|
||||
HttpAsyncClientBuilder httpClientBuilder = HttpAsyncClientBuilder.create().setDefaultRequestConfig(requestConfigBuilder.build())
|
||||
//default settings for connection pooling may be too constraining
|
||||
.setMaxConnPerRoute(DEFAULT_MAX_CONN_PER_ROUTE).setMaxConnTotal(DEFAULT_MAX_CONN_TOTAL).useSystemProperties();
|
||||
.setMaxConnPerRoute(DEFAULT_MAX_CONN_PER_ROUTE).setMaxConnTotal(DEFAULT_MAX_CONN_TOTAL)
|
||||
.setSSLContext(SSLContext.getDefault());
|
||||
if (httpClientConfigCallback != null) {
|
||||
httpClientBuilder = httpClientConfigCallback.customizeHttpClient(httpClientBuilder);
|
||||
}
|
||||
|
@ -214,6 +218,9 @@ public final class RestClientBuilder {
|
|||
return finalBuilder.build();
|
||||
}
|
||||
});
|
||||
} catch (NoSuchAlgorithmException e) {
|
||||
throw new IllegalStateException("could not create the default ssl context", e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -41,6 +41,10 @@ public class FailureTrackingResponseListenerTests extends RestClientTestCase {
|
|||
MockResponseListener responseListener = new MockResponseListener();
|
||||
RestClient.FailureTrackingResponseListener listener = new RestClient.FailureTrackingResponseListener(responseListener);
|
||||
|
||||
final Response response = mockResponse();
|
||||
listener.onSuccess(response);
|
||||
assertSame(response, responseListener.response.get());
|
||||
assertNull(responseListener.exception.get());
|
||||
}
|
||||
|
||||
public void testOnFailure() {
|
||||
|
|
|
@ -32,6 +32,7 @@ dependencies {
|
|||
compile "org.elasticsearch.plugin:lang-mustache-client:${version}"
|
||||
compile "org.elasticsearch.plugin:percolator-client:${version}"
|
||||
compile "org.elasticsearch.plugin:parent-join-client:${version}"
|
||||
compile "org.elasticsearch.plugin:rank-eval-client:${version}"
|
||||
testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"
|
||||
testCompile "junit:junit:${versions.junit}"
|
||||
testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}"
|
||||
|
|
|
@ -45,8 +45,12 @@ import java.util.concurrent.TimeUnit;
|
|||
* {@link MustachePlugin},
|
||||
* {@link ParentJoinPlugin}
|
||||
* plugins for the client. These plugins are all the required modules for Elasticsearch.
|
||||
*
|
||||
* @deprecated {@link TransportClient} is deprecated in favour of the High Level REST client and will
|
||||
* be removed in Elasticsearch 8.0.
|
||||
*/
|
||||
@SuppressWarnings({"unchecked","varargs"})
|
||||
@Deprecated
|
||||
public class PreBuiltTransportClient extends TransportClient {
|
||||
|
||||
static {
|
||||
|
|
|
@ -20,6 +20,17 @@
|
|||
import org.elasticsearch.gradle.precommit.PrecommitTasks
|
||||
|
||||
apply plugin: 'elasticsearch.build'
|
||||
apply plugin: 'nebula.optional-base'
|
||||
apply plugin: 'nebula.maven-base-publish'
|
||||
apply plugin: 'nebula.maven-scm'
|
||||
|
||||
publishing {
|
||||
publications {
|
||||
nebula {
|
||||
artifactId 'elasticsearch-cli'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
archivesBaseName = 'elasticsearch-cli'
|
||||
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
0078019336ebd3ee475019c88a749ce8b7b261fd
|
|
@ -1 +0,0 @@
|
|||
4c515e5152e6938129a5e97c5afb5b3b360faed3
|
|
@ -0,0 +1 @@
|
|||
e66dfb2c0fb938a41d27059c923c3163187584c9
|
|
@ -1 +0,0 @@
|
|||
406c6cc0f8c2a47d42a1e343eaf2ad939fee905c
|
|
@ -0,0 +1 @@
|
|||
b79166ddbca0fddcb794026a22c363be1caecf5d
|
|
@ -1 +0,0 @@
|
|||
4c93f7fbc6e0caf87f7948b8481d80e0167133bf
|
|
@ -0,0 +1 @@
|
|||
dacc60bd0f55f75b24c434e2d715a10a91127fd0
|
|
@ -1 +0,0 @@
|
|||
b078ca50c6d579085c7755b4fd8de60711964dcc
|
|
@ -0,0 +1 @@
|
|||
9debc384c54d36b69aa8cd990fd047fc9ef00eea
|
|
@ -1 +0,0 @@
|
|||
fc5e61c8879f22b65ee053f1665bc9f13af79c1d
|
|
@ -0,0 +1 @@
|
|||
e048857ea7c66579d172295fc7394aa6163273d7
|
|
@ -1 +0,0 @@
|
|||
9a10839d3dfe7b369f0af8a78a630ee4d82e678e
|
|
@ -0,0 +1 @@
|
|||
9e8da627bcd3934e8b921810e71f225b1d2312ec
|
|
@ -1 +0,0 @@
|
|||
d45f2f51cf6f47a66ecafddecb83c1e08eb4061f
|
|
@ -0,0 +1 @@
|
|||
dfb4feab274c56d8a45098a398ea623e0d9d2a41
|
|
@ -1 +0,0 @@
|
|||
19cb7362be57104ad891259060af80fb4679e92c
|
|
@ -0,0 +1 @@
|
|||
e7962cf1c6a9654d428e0dceead0278083429044
|
|
@ -1 +0,0 @@
|
|||
ae24737048d95f56d0099fea77498324412eef50
|
|
@ -0,0 +1 @@
|
|||
6cc9de8bd74ded29572e0ca7ce14dc0ef11cefc4
|
|
@ -1 +0,0 @@
|
|||
a9d3422c9a72106026c19a8f76a4f4e62159ff5c
|
|
@ -0,0 +1 @@
|
|||
69f59c8ee5b0cad345af52f7ef1901b5828cf41a
|
|
@ -1 +0,0 @@
|
|||
66433006587ede3e01899fd6f5e55c8378032c2f
|
|
@ -0,0 +1 @@
|
|||
de04b5de5f08ef4699cbf3415753661ac69a1fda
|
|
@ -1 +0,0 @@
|
|||
b6b3082ba845f7bd41641b015624f46d4f20afb6
|
|
@ -0,0 +1 @@
|
|||
92c74817922c9ed6ec484453e21ffc5ba802b56c
|
|
@ -1 +0,0 @@
|
|||
7757cac49cb3e9f1a219346ce95fb80f61f7090e
|
|
@ -0,0 +1 @@
|
|||
99eff7d10daf8d53bba5a173a833dd2ac83fca55
|
|
@ -1 +0,0 @@
|
|||
92991fdcd185883050d9530ccc0d863b7a08e99c
|
|
@ -0,0 +1 @@
|
|||
1d44a9ed8ecefd3c6dceffa99f896ba957b7c31e
|
|
@ -1 +0,0 @@
|
|||
fb6a94b833a23a17e3721ea2f9679ad770dec48b
|
|
@ -16,6 +16,7 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.lucene.queries;
|
||||
|
||||
import org.apache.lucene.index.BinaryDocValues;
|
|
@ -75,9 +75,6 @@ public class CustomFieldQuery extends FieldQuery {
|
|||
} else if (sourceQuery instanceof BlendedTermQuery) {
|
||||
final BlendedTermQuery blendedTermQuery = (BlendedTermQuery) sourceQuery;
|
||||
flatten(blendedTermQuery.rewrite(reader), reader, flatQueries, boost);
|
||||
} else if (sourceQuery instanceof ESToParentBlockJoinQuery) {
|
||||
ESToParentBlockJoinQuery blockJoinQuery = (ESToParentBlockJoinQuery) sourceQuery;
|
||||
flatten(blockJoinQuery.getChildQuery(), reader, flatQueries, boost);
|
||||
} else if (sourceQuery instanceof BoostingQuery) {
|
||||
BoostingQuery boostingQuery = (BoostingQuery) sourceQuery;
|
||||
//flatten positive query with query boost
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.elasticsearch.common.xcontent.XContentParser;
|
|||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.search.aggregations.MultiBucketConsumerService;
|
||||
import org.elasticsearch.transport.TcpTransport;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -986,7 +987,10 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
SHARD_LOCK_OBTAIN_FAILED_EXCEPTION(org.elasticsearch.env.ShardLockObtainFailedException.class,
|
||||
org.elasticsearch.env.ShardLockObtainFailedException::new, 147, Version.V_5_0_2),
|
||||
UNKNOWN_NAMED_OBJECT_EXCEPTION(org.elasticsearch.common.xcontent.NamedXContentRegistry.UnknownNamedObjectException.class,
|
||||
org.elasticsearch.common.xcontent.NamedXContentRegistry.UnknownNamedObjectException::new, 148, Version.V_5_2_0);
|
||||
org.elasticsearch.common.xcontent.NamedXContentRegistry.UnknownNamedObjectException::new, 148, Version.V_5_2_0),
|
||||
TOO_MANY_BUCKETS_EXCEPTION(MultiBucketConsumerService.TooManyBucketsException.class,
|
||||
MultiBucketConsumerService.TooManyBucketsException::new, 149,
|
||||
Version.V_7_0_0_alpha1);
|
||||
|
||||
final Class<? extends ElasticsearchException> exceptionClass;
|
||||
final CheckedFunction<StreamInput, ? extends ElasticsearchException, IOException> constructor;
|
||||
|
|
|
@ -107,6 +107,8 @@ public class Version implements Comparable<Version> {
|
|||
public static final Version V_5_6_4 = new Version(V_5_6_4_ID, org.apache.lucene.util.Version.LUCENE_6_6_1);
|
||||
public static final int V_5_6_5_ID = 5060599;
|
||||
public static final Version V_5_6_5 = new Version(V_5_6_5_ID, org.apache.lucene.util.Version.LUCENE_6_6_1);
|
||||
public static final int V_5_6_6_ID = 5060699;
|
||||
public static final Version V_5_6_6 = new Version(V_5_6_6_ID, org.apache.lucene.util.Version.LUCENE_6_6_1);
|
||||
public static final int V_6_0_0_alpha1_ID = 6000001;
|
||||
public static final Version V_6_0_0_alpha1 =
|
||||
new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_0_0);
|
||||
|
@ -131,10 +133,15 @@ public class Version implements Comparable<Version> {
|
|||
public static final int V_6_0_1_ID = 6000199;
|
||||
public static final Version V_6_0_1 =
|
||||
new Version(V_6_0_1_ID, org.apache.lucene.util.Version.LUCENE_7_0_1);
|
||||
public static final int V_6_0_2_ID = 6000299;
|
||||
public static final Version V_6_0_2 =
|
||||
new Version(V_6_0_2_ID, org.apache.lucene.util.Version.LUCENE_7_0_1);
|
||||
public static final int V_6_1_0_ID = 6010099;
|
||||
public static final Version V_6_1_0 = new Version(V_6_1_0_ID, org.apache.lucene.util.Version.LUCENE_7_1_0);
|
||||
public static final int V_6_1_1_ID = 6010199;
|
||||
public static final Version V_6_1_1 = new Version(V_6_1_1_ID, org.apache.lucene.util.Version.LUCENE_7_1_0);
|
||||
public static final int V_6_2_0_ID = 6020099;
|
||||
public static final Version V_6_2_0 = new Version(V_6_2_0_ID, org.apache.lucene.util.Version.LUCENE_7_1_0);
|
||||
public static final Version V_6_2_0 = new Version(V_6_2_0_ID, org.apache.lucene.util.Version.LUCENE_7_2_0);
|
||||
public static final int V_7_0_0_alpha1_ID = 7000001;
|
||||
public static final Version V_7_0_0_alpha1 =
|
||||
new Version(V_7_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_2_0);
|
||||
|
@ -153,10 +160,14 @@ public class Version implements Comparable<Version> {
|
|||
switch (id) {
|
||||
case V_7_0_0_alpha1_ID:
|
||||
return V_7_0_0_alpha1;
|
||||
case V_6_1_0_ID:
|
||||
return V_6_1_0;
|
||||
case V_6_2_0_ID:
|
||||
return V_6_2_0;
|
||||
case V_6_1_1_ID:
|
||||
return V_6_1_1;
|
||||
case V_6_1_0_ID:
|
||||
return V_6_1_0;
|
||||
case V_6_0_2_ID:
|
||||
return V_6_0_2;
|
||||
case V_6_0_1_ID:
|
||||
return V_6_0_1;
|
||||
case V_6_0_0_ID:
|
||||
|
@ -173,6 +184,8 @@ public class Version implements Comparable<Version> {
|
|||
return V_6_0_0_alpha2;
|
||||
case V_6_0_0_alpha1_ID:
|
||||
return V_6_0_0_alpha1;
|
||||
case V_5_6_6_ID:
|
||||
return V_5_6_6;
|
||||
case V_5_6_5_ID:
|
||||
return V_5_6_5;
|
||||
case V_5_6_4_ID:
|
||||
|
|
|
@ -19,8 +19,10 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.snapshots.status;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.SnapshotsInProgress.State;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.inject.internal.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
|
@ -57,10 +59,15 @@ public class SnapshotStatus implements ToXContentObject, Streamable {
|
|||
|
||||
private SnapshotStats stats;
|
||||
|
||||
SnapshotStatus(final Snapshot snapshot, final State state, final List<SnapshotIndexShardStatus> shards) {
|
||||
@Nullable
|
||||
private Boolean includeGlobalState;
|
||||
|
||||
SnapshotStatus(final Snapshot snapshot, final State state, final List<SnapshotIndexShardStatus> shards,
|
||||
final Boolean includeGlobalState) {
|
||||
this.snapshot = Objects.requireNonNull(snapshot);
|
||||
this.state = Objects.requireNonNull(state);
|
||||
this.shards = Objects.requireNonNull(shards);
|
||||
this.includeGlobalState = includeGlobalState;
|
||||
shardsStats = new SnapshotShardsStats(shards);
|
||||
updateShardStats();
|
||||
}
|
||||
|
@ -82,6 +89,14 @@ public class SnapshotStatus implements ToXContentObject, Streamable {
|
|||
return state;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if global state is included in the snapshot, false otherwise.
|
||||
* Can be null if this information is unknown.
|
||||
*/
|
||||
public Boolean includeGlobalState() {
|
||||
return includeGlobalState;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns list of snapshot shards
|
||||
*/
|
||||
|
@ -132,6 +147,9 @@ public class SnapshotStatus implements ToXContentObject, Streamable {
|
|||
builder.add(SnapshotIndexShardStatus.readShardSnapshotStatus(in));
|
||||
}
|
||||
shards = Collections.unmodifiableList(builder);
|
||||
if (in.getVersion().onOrAfter(Version.V_6_2_0)) {
|
||||
includeGlobalState = in.readOptionalBoolean();
|
||||
}
|
||||
updateShardStats();
|
||||
}
|
||||
|
||||
|
@ -143,6 +161,9 @@ public class SnapshotStatus implements ToXContentObject, Streamable {
|
|||
for (SnapshotIndexShardStatus shard : shards) {
|
||||
shard.writeTo(out);
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_6_2_0)) {
|
||||
out.writeOptionalBoolean(includeGlobalState);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -174,6 +195,7 @@ public class SnapshotStatus implements ToXContentObject, Streamable {
|
|||
private static final String UUID = "uuid";
|
||||
private static final String STATE = "state";
|
||||
private static final String INDICES = "indices";
|
||||
private static final String INCLUDE_GLOBAL_STATE = "include_global_state";
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
|
@ -182,6 +204,9 @@ public class SnapshotStatus implements ToXContentObject, Streamable {
|
|||
builder.field(REPOSITORY, snapshot.getRepository());
|
||||
builder.field(UUID, snapshot.getSnapshotId().getUUID());
|
||||
builder.field(STATE, state.name());
|
||||
if (includeGlobalState != null) {
|
||||
builder.field(INCLUDE_GLOBAL_STATE, includeGlobalState);
|
||||
}
|
||||
shardsStats.toXContent(builder, params);
|
||||
stats.toXContent(builder, params);
|
||||
builder.startObject(INDICES);
|
||||
|
|
|
@ -196,7 +196,8 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction<Sn
|
|||
SnapshotIndexShardStatus shardStatus = new SnapshotIndexShardStatus(shardEntry.key, stage);
|
||||
shardStatusBuilder.add(shardStatus);
|
||||
}
|
||||
builder.add(new SnapshotStatus(entry.snapshot(), entry.state(), Collections.unmodifiableList(shardStatusBuilder)));
|
||||
builder.add(new SnapshotStatus(entry.snapshot(), entry.state(),
|
||||
Collections.unmodifiableList(shardStatusBuilder), entry.includeGlobalState()));
|
||||
}
|
||||
}
|
||||
// Now add snapshots on disk that are not currently running
|
||||
|
@ -248,7 +249,8 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction<Sn
|
|||
default:
|
||||
throw new IllegalArgumentException("Unknown snapshot state " + snapshotInfo.state());
|
||||
}
|
||||
builder.add(new SnapshotStatus(new Snapshot(repositoryName, snapshotId), state, Collections.unmodifiableList(shardStatusBuilder)));
|
||||
builder.add(new SnapshotStatus(new Snapshot(repositoryName, snapshotId), state,
|
||||
Collections.unmodifiableList(shardStatusBuilder), snapshotInfo.includeGlobalState()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,10 +21,13 @@ package org.elasticsearch.action.admin.indices.alias;
|
|||
|
||||
import org.elasticsearch.ElasticsearchGenerationException;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
@ -33,11 +36,17 @@ import org.elasticsearch.index.query.QueryBuilder;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Represents an alias, to be associated with an index
|
||||
*/
|
||||
public class Alias implements Streamable {
|
||||
public class Alias implements Streamable, ToXContentObject {
|
||||
|
||||
private static final ParseField FILTER = new ParseField("filter");
|
||||
private static final ParseField ROUTING = new ParseField("routing");
|
||||
private static final ParseField INDEX_ROUTING = new ParseField("index_routing", "indexRouting", "index-routing");
|
||||
private static final ParseField SEARCH_ROUTING = new ParseField("search_routing", "searchRouting", "search-routing");
|
||||
|
||||
private String name;
|
||||
|
||||
|
@ -196,16 +205,16 @@ public class Alias implements Streamable {
|
|||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
if ("filter".equals(currentFieldName)) {
|
||||
if (FILTER.match(currentFieldName)) {
|
||||
Map<String, Object> filter = parser.mapOrdered();
|
||||
alias.filter(filter);
|
||||
}
|
||||
} else if (token == XContentParser.Token.VALUE_STRING) {
|
||||
if ("routing".equals(currentFieldName)) {
|
||||
if (ROUTING.match(currentFieldName)) {
|
||||
alias.routing(parser.text());
|
||||
} else if ("index_routing".equals(currentFieldName) || "indexRouting".equals(currentFieldName) || "index-routing".equals(currentFieldName)) {
|
||||
} else if (INDEX_ROUTING.match(currentFieldName)) {
|
||||
alias.indexRouting(parser.text());
|
||||
} else if ("search_routing".equals(currentFieldName) || "searchRouting".equals(currentFieldName) || "search-routing".equals(currentFieldName)) {
|
||||
} else if (SEARCH_ROUTING.match(currentFieldName)) {
|
||||
alias.searchRouting(parser.text());
|
||||
}
|
||||
}
|
||||
|
@ -213,6 +222,29 @@ public class Alias implements Streamable {
|
|||
return alias;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(name);
|
||||
|
||||
if (filter != null) {
|
||||
builder.rawField(FILTER.getPreferredName(), new BytesArray(filter), XContentType.JSON);
|
||||
}
|
||||
|
||||
if (indexRouting != null && indexRouting.equals(searchRouting)) {
|
||||
builder.field(ROUTING.getPreferredName(), indexRouting);
|
||||
} else {
|
||||
if (indexRouting != null) {
|
||||
builder.field(INDEX_ROUTING.getPreferredName(), indexRouting);
|
||||
}
|
||||
if (searchRouting != null) {
|
||||
builder.field(SEARCH_ROUTING.getPreferredName(), searchRouting);
|
||||
}
|
||||
}
|
||||
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
|
|
|
@ -158,15 +158,18 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
|||
}
|
||||
}
|
||||
final AnalysisRegistry analysisRegistry = indicesService.getAnalysis();
|
||||
return analyze(request, field, analyzer, indexService != null ? indexService.getIndexAnalyzers() : null, analysisRegistry, environment);
|
||||
final int maxTokenCount = indexService == null ?
|
||||
IndexSettings.MAX_TOKEN_COUNT_SETTING.get(settings) : indexService.getIndexSettings().getMaxTokenCount();
|
||||
return analyze(request, field, analyzer, indexService != null ? indexService.getIndexAnalyzers() : null,
|
||||
analysisRegistry, environment, maxTokenCount);
|
||||
} catch (IOException e) {
|
||||
throw new ElasticsearchException("analysis failed", e);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public static AnalyzeResponse analyze(AnalyzeRequest request, String field, Analyzer analyzer, IndexAnalyzers indexAnalyzers, AnalysisRegistry analysisRegistry, Environment environment) throws IOException {
|
||||
|
||||
public static AnalyzeResponse analyze(AnalyzeRequest request, String field, Analyzer analyzer, IndexAnalyzers indexAnalyzers,
|
||||
AnalysisRegistry analysisRegistry, Environment environment, int maxTokenCount) throws IOException {
|
||||
boolean closeAnalyzer = false;
|
||||
if (analyzer == null && request.analyzer() != null) {
|
||||
if (indexAnalyzers == null) {
|
||||
|
@ -235,9 +238,9 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
|||
DetailAnalyzeResponse detail = null;
|
||||
|
||||
if (request.explain()) {
|
||||
detail = detailAnalyze(request, analyzer, field);
|
||||
detail = detailAnalyze(request, analyzer, field, maxTokenCount);
|
||||
} else {
|
||||
tokens = simpleAnalyze(request, analyzer, field);
|
||||
tokens = simpleAnalyze(request, analyzer, field, maxTokenCount);
|
||||
}
|
||||
|
||||
if (closeAnalyzer) {
|
||||
|
@ -247,7 +250,9 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
|||
return new AnalyzeResponse(tokens, detail);
|
||||
}
|
||||
|
||||
private static List<AnalyzeResponse.AnalyzeToken> simpleAnalyze(AnalyzeRequest request, Analyzer analyzer, String field) {
|
||||
private static List<AnalyzeResponse.AnalyzeToken> simpleAnalyze(AnalyzeRequest request,
|
||||
Analyzer analyzer, String field, int maxTokenCount) {
|
||||
TokenCounter tc = new TokenCounter(maxTokenCount);
|
||||
List<AnalyzeResponse.AnalyzeToken> tokens = new ArrayList<>();
|
||||
int lastPosition = -1;
|
||||
int lastOffset = 0;
|
||||
|
@ -267,7 +272,7 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
|||
}
|
||||
tokens.add(new AnalyzeResponse.AnalyzeToken(term.toString(), lastPosition, lastOffset + offset.startOffset(),
|
||||
lastOffset + offset.endOffset(), posLen.getPositionLength(), type.type(), null));
|
||||
|
||||
tc.increment();
|
||||
}
|
||||
stream.end();
|
||||
lastOffset += offset.endOffset();
|
||||
|
@ -282,7 +287,7 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
|||
return tokens;
|
||||
}
|
||||
|
||||
private static DetailAnalyzeResponse detailAnalyze(AnalyzeRequest request, Analyzer analyzer, String field) {
|
||||
private static DetailAnalyzeResponse detailAnalyze(AnalyzeRequest request, Analyzer analyzer, String field, int maxTokenCount) {
|
||||
DetailAnalyzeResponse detailResponse;
|
||||
final Set<String> includeAttributes = new HashSet<>();
|
||||
if (request.attributes() != null) {
|
||||
|
@ -307,7 +312,7 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
|||
String[][] charFiltersTexts = new String[charFilterFactories != null ? charFilterFactories.length : 0][request.text().length];
|
||||
TokenListCreator[] tokenFiltersTokenListCreator = new TokenListCreator[tokenFilterFactories != null ? tokenFilterFactories.length : 0];
|
||||
|
||||
TokenListCreator tokenizerTokenListCreator = new TokenListCreator();
|
||||
TokenListCreator tokenizerTokenListCreator = new TokenListCreator(maxTokenCount);
|
||||
|
||||
for (int textIndex = 0; textIndex < request.text().length; textIndex++) {
|
||||
String charFilteredSource = request.text()[textIndex];
|
||||
|
@ -333,7 +338,7 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
|||
if (tokenFilterFactories != null) {
|
||||
for (int tokenFilterIndex = 0; tokenFilterIndex < tokenFilterFactories.length; tokenFilterIndex++) {
|
||||
if (tokenFiltersTokenListCreator[tokenFilterIndex] == null) {
|
||||
tokenFiltersTokenListCreator[tokenFilterIndex] = new TokenListCreator();
|
||||
tokenFiltersTokenListCreator[tokenFilterIndex] = new TokenListCreator(maxTokenCount);
|
||||
}
|
||||
TokenStream stream = createStackedTokenStream(request.text()[textIndex],
|
||||
charFilterFactories, tokenizerFactory, tokenFilterFactories, tokenFilterIndex + 1);
|
||||
|
@ -366,7 +371,7 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
|||
name = analyzer.getClass().getName();
|
||||
}
|
||||
|
||||
TokenListCreator tokenListCreator = new TokenListCreator();
|
||||
TokenListCreator tokenListCreator = new TokenListCreator(maxTokenCount);
|
||||
for (String text : request.text()) {
|
||||
tokenListCreator.analyze(analyzer.tokenStream(field, text), analyzer, field,
|
||||
includeAttributes);
|
||||
|
@ -408,13 +413,32 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
|||
return sb.toString();
|
||||
}
|
||||
|
||||
private static class TokenCounter{
|
||||
private int tokenCount = 0;
|
||||
private int maxTokenCount;
|
||||
|
||||
private TokenCounter(int maxTokenCount){
|
||||
this.maxTokenCount = maxTokenCount;
|
||||
}
|
||||
private void increment(){
|
||||
tokenCount++;
|
||||
if (tokenCount > maxTokenCount) {
|
||||
throw new IllegalStateException(
|
||||
"The number of tokens produced by calling _analyze has exceeded the allowed maximum of [" + maxTokenCount + "]."
|
||||
+ " This limit can be set by changing the [index.analyze.max_token_count] index level setting.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static class TokenListCreator {
|
||||
int lastPosition = -1;
|
||||
int lastOffset = 0;
|
||||
List<AnalyzeResponse.AnalyzeToken> tokens;
|
||||
private TokenCounter tc;
|
||||
|
||||
TokenListCreator() {
|
||||
TokenListCreator(int maxTokenCount) {
|
||||
tokens = new ArrayList<>();
|
||||
tc = new TokenCounter(maxTokenCount);
|
||||
}
|
||||
|
||||
private void analyze(TokenStream stream, Analyzer analyzer, String field, Set<String> includeAttributes) {
|
||||
|
@ -433,7 +457,7 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
|||
}
|
||||
tokens.add(new AnalyzeResponse.AnalyzeToken(term.toString(), lastPosition, lastOffset + offset.startOffset(),
|
||||
lastOffset + offset.endOffset(), posLen.getPositionLength(), type.type(), extractExtendedAttributes(stream, includeAttributes)));
|
||||
|
||||
tc.increment();
|
||||
}
|
||||
stream.end();
|
||||
lastOffset += offset.endOffset();
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.elasticsearch.action.support.ActiveShardCount;
|
|||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequest;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
|
@ -37,6 +38,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
|||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
|
@ -65,7 +67,11 @@ import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
|
|||
* @see org.elasticsearch.client.Requests#createIndexRequest(String)
|
||||
* @see CreateIndexResponse
|
||||
*/
|
||||
public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest> implements IndicesRequest {
|
||||
public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest> implements IndicesRequest, ToXContentObject {
|
||||
|
||||
private static final ParseField MAPPINGS = new ParseField("mappings");
|
||||
private static final ParseField SETTINGS = new ParseField("settings");
|
||||
private static final ParseField ALIASES = new ParseField("aliases");
|
||||
|
||||
private String cause = "";
|
||||
|
||||
|
@ -376,14 +382,14 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
|
|||
public CreateIndexRequest source(Map<String, ?> source) {
|
||||
for (Map.Entry<String, ?> entry : source.entrySet()) {
|
||||
String name = entry.getKey();
|
||||
if (name.equals("settings")) {
|
||||
if (SETTINGS.match(name)) {
|
||||
settings((Map<String, Object>) entry.getValue());
|
||||
} else if (name.equals("mappings")) {
|
||||
} else if (MAPPINGS.match(name)) {
|
||||
Map<String, Object> mappings = (Map<String, Object>) entry.getValue();
|
||||
for (Map.Entry<String, Object> entry1 : mappings.entrySet()) {
|
||||
mapping(entry1.getKey(), (Map<String, Object>) entry1.getValue());
|
||||
}
|
||||
} else if (name.equals("aliases")) {
|
||||
} else if (ALIASES.match(name)) {
|
||||
aliases((Map<String, Object>) entry.getValue());
|
||||
} else {
|
||||
// maybe custom?
|
||||
|
@ -520,4 +526,32 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
|
|||
out.writeBoolean(updateAllTypes);
|
||||
waitForActiveShards.writeTo(out);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
|
||||
builder.startObject(SETTINGS.getPreferredName());
|
||||
settings.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
|
||||
builder.startObject(MAPPINGS.getPreferredName());
|
||||
for (Map.Entry<String, String> entry : mappings.entrySet()) {
|
||||
builder.rawField(entry.getKey(), new BytesArray(entry.getValue()), XContentType.JSON);
|
||||
}
|
||||
builder.endObject();
|
||||
|
||||
builder.startObject(ALIASES.getPreferredName());
|
||||
for (Alias alias : aliases) {
|
||||
alias.toXContent(builder, params);
|
||||
}
|
||||
builder.endObject();
|
||||
|
||||
for (Map.Entry<String, IndexMetaData.Custom> entry : customs.entrySet()) {
|
||||
builder.field(entry.getKey(), entry.getValue(), params);
|
||||
}
|
||||
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,20 +39,17 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constru
|
|||
*/
|
||||
public class CreateIndexResponse extends AcknowledgedResponse implements ToXContentObject {
|
||||
|
||||
private static final String SHARDS_ACKNOWLEDGED = "shards_acknowledged";
|
||||
private static final String INDEX = "index";
|
||||
|
||||
private static final ParseField SHARDS_ACKNOWLEDGED_PARSER = new ParseField(SHARDS_ACKNOWLEDGED);
|
||||
private static final ParseField INDEX_PARSER = new ParseField(INDEX);
|
||||
private static final ParseField SHARDS_ACKNOWLEDGED = new ParseField("shards_acknowledged");
|
||||
private static final ParseField INDEX = new ParseField("index");
|
||||
|
||||
private static final ConstructingObjectParser<CreateIndexResponse, Void> PARSER = new ConstructingObjectParser<>("create_index",
|
||||
true, args -> new CreateIndexResponse((boolean) args[0], (boolean) args[1], (String) args[2]));
|
||||
|
||||
static {
|
||||
declareAcknowledgedField(PARSER);
|
||||
PARSER.declareField(constructorArg(), (parser, context) -> parser.booleanValue(), SHARDS_ACKNOWLEDGED_PARSER,
|
||||
PARSER.declareField(constructorArg(), (parser, context) -> parser.booleanValue(), SHARDS_ACKNOWLEDGED,
|
||||
ObjectParser.ValueType.BOOLEAN);
|
||||
PARSER.declareField(constructorArg(), (parser, context) -> parser.text(), INDEX_PARSER, ObjectParser.ValueType.STRING);
|
||||
PARSER.declareField(constructorArg(), (parser, context) -> parser.text(), INDEX, ObjectParser.ValueType.STRING);
|
||||
}
|
||||
|
||||
private boolean shardsAcked;
|
||||
|
@ -102,8 +99,8 @@ public class CreateIndexResponse extends AcknowledgedResponse implements ToXCont
|
|||
}
|
||||
|
||||
public void addCustomFields(XContentBuilder builder) throws IOException {
|
||||
builder.field(SHARDS_ACKNOWLEDGED, isShardsAcked());
|
||||
builder.field(INDEX, index());
|
||||
builder.field(SHARDS_ACKNOWLEDGED.getPreferredName(), isShardsAcked());
|
||||
builder.field(INDEX.getPreferredName(), index());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -36,9 +36,11 @@ import org.elasticsearch.common.Strings;
|
|||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
|
@ -46,10 +48,15 @@ import java.util.List;
|
|||
*/
|
||||
public class TransportGetIndexAction extends TransportClusterInfoAction<GetIndexRequest, GetIndexResponse> {
|
||||
|
||||
private final IndicesService indicesService;
|
||||
|
||||
@Inject
|
||||
public TransportGetIndexAction(Settings settings, TransportService transportService, ClusterService clusterService,
|
||||
ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, GetIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, GetIndexRequest::new, indexNameExpressionResolver);
|
||||
ThreadPool threadPool, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, IndicesService indicesService) {
|
||||
super(settings, GetIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, GetIndexRequest::new,
|
||||
indexNameExpressionResolver);
|
||||
this.indicesService = indicesService;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -60,7 +67,8 @@ public class TransportGetIndexAction extends TransportClusterInfoAction<GetIndex
|
|||
|
||||
@Override
|
||||
protected ClusterBlockException checkBlock(GetIndexRequest request, ClusterState state) {
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request));
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ,
|
||||
indexNameExpressionResolver.concreteIndexNames(state, request));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -82,8 +90,14 @@ public class TransportGetIndexAction extends TransportClusterInfoAction<GetIndex
|
|||
switch (feature) {
|
||||
case MAPPINGS:
|
||||
if (!doneMappings) {
|
||||
mappingsResult = state.metaData().findMappings(concreteIndices, request.types());
|
||||
try {
|
||||
mappingsResult = state.metaData().findMappings(concreteIndices, request.types(),
|
||||
indicesService.getFieldFilter());
|
||||
doneMappings = true;
|
||||
} catch (IOException e) {
|
||||
listener.onFailure(e);
|
||||
return;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case ALIASES:
|
||||
|
|
|
@ -29,13 +29,12 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
|||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.routing.ShardsIterator;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.mapper.DocumentFieldMappers;
|
||||
|
@ -50,7 +49,10 @@ import org.elasticsearch.transport.TransportService;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static java.util.Collections.singletonMap;
|
||||
|
@ -69,7 +71,8 @@ public class TransportGetFieldMappingsIndexAction extends TransportSingleShardAc
|
|||
public TransportGetFieldMappingsIndexAction(Settings settings, ClusterService clusterService, TransportService transportService,
|
||||
IndicesService indicesService, ThreadPool threadPool, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, ACTION_NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, GetFieldMappingsIndexRequest::new, ThreadPool.Names.MANAGEMENT);
|
||||
super(settings, ACTION_NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
|
||||
GetFieldMappingsIndexRequest::new, ThreadPool.Names.MANAGEMENT);
|
||||
this.clusterService = clusterService;
|
||||
this.indicesService = indicesService;
|
||||
}
|
||||
|
@ -90,6 +93,9 @@ public class TransportGetFieldMappingsIndexAction extends TransportSingleShardAc
|
|||
protected GetFieldMappingsResponse shardOperation(final GetFieldMappingsIndexRequest request, ShardId shardId) {
|
||||
assert shardId != null;
|
||||
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
|
||||
Predicate<String> metadataFieldPredicate = indicesService::isMetaDataField;
|
||||
Predicate<String> fieldPredicate = metadataFieldPredicate.or(indicesService.getFieldFilter().apply(shardId.getIndexName()));
|
||||
|
||||
Collection<String> typeIntersection;
|
||||
if (request.types().length == 0) {
|
||||
typeIntersection = indexService.mapperService().types();
|
||||
|
@ -104,16 +110,15 @@ public class TransportGetFieldMappingsIndexAction extends TransportSingleShardAc
|
|||
}
|
||||
}
|
||||
|
||||
MapBuilder<String, Map<String, FieldMappingMetaData>> typeMappings = new MapBuilder<>();
|
||||
Map<String, Map<String, FieldMappingMetaData>> typeMappings = new HashMap<>();
|
||||
for (String type : typeIntersection) {
|
||||
DocumentMapper documentMapper = indexService.mapperService().documentMapper(type);
|
||||
Map<String, FieldMappingMetaData> fieldMapping = findFieldMappingsByType(documentMapper, request);
|
||||
Map<String, FieldMappingMetaData> fieldMapping = findFieldMappingsByType(fieldPredicate, documentMapper, request);
|
||||
if (!fieldMapping.isEmpty()) {
|
||||
typeMappings.put(type, fieldMapping);
|
||||
}
|
||||
}
|
||||
|
||||
return new GetFieldMappingsResponse(singletonMap(shardId.getIndexName(), typeMappings.immutableMap()));
|
||||
return new GetFieldMappingsResponse(singletonMap(shardId.getIndexName(), Collections.unmodifiableMap(typeMappings)));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -163,47 +168,50 @@ public class TransportGetFieldMappingsIndexAction extends TransportSingleShardAc
|
|||
}
|
||||
};
|
||||
|
||||
private Map<String, FieldMappingMetaData> findFieldMappingsByType(DocumentMapper documentMapper, GetFieldMappingsIndexRequest request) {
|
||||
MapBuilder<String, FieldMappingMetaData> fieldMappings = new MapBuilder<>();
|
||||
private static Map<String, FieldMappingMetaData> findFieldMappingsByType(Predicate<String> fieldPredicate,
|
||||
DocumentMapper documentMapper,
|
||||
GetFieldMappingsIndexRequest request) {
|
||||
Map<String, FieldMappingMetaData> fieldMappings = new HashMap<>();
|
||||
final DocumentFieldMappers allFieldMappers = documentMapper.mappers();
|
||||
for (String field : request.fields()) {
|
||||
if (Regex.isMatchAllPattern(field)) {
|
||||
for (FieldMapper fieldMapper : allFieldMappers) {
|
||||
addFieldMapper(fieldMapper.fieldType().name(), fieldMapper, fieldMappings, request.includeDefaults());
|
||||
addFieldMapper(fieldPredicate, fieldMapper.fieldType().name(), fieldMapper, fieldMappings, request.includeDefaults());
|
||||
}
|
||||
} else if (Regex.isSimpleMatchPattern(field)) {
|
||||
for (FieldMapper fieldMapper : allFieldMappers) {
|
||||
if (Regex.simpleMatch(field, fieldMapper.fieldType().name())) {
|
||||
addFieldMapper(fieldMapper.fieldType().name(), fieldMapper, fieldMappings,
|
||||
request.includeDefaults());
|
||||
addFieldMapper(fieldPredicate, fieldMapper.fieldType().name(),
|
||||
fieldMapper, fieldMappings, request.includeDefaults());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// not a pattern
|
||||
FieldMapper fieldMapper = allFieldMappers.smartNameFieldMapper(field);
|
||||
if (fieldMapper != null) {
|
||||
addFieldMapper(field, fieldMapper, fieldMappings, request.includeDefaults());
|
||||
addFieldMapper(fieldPredicate, field, fieldMapper, fieldMappings, request.includeDefaults());
|
||||
} else if (request.probablySingleFieldRequest()) {
|
||||
fieldMappings.put(field, FieldMappingMetaData.NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
return fieldMappings.immutableMap();
|
||||
return Collections.unmodifiableMap(fieldMappings);
|
||||
}
|
||||
|
||||
private void addFieldMapper(String field, FieldMapper fieldMapper, MapBuilder<String, FieldMappingMetaData> fieldMappings, boolean includeDefaults) {
|
||||
private static void addFieldMapper(Predicate<String> fieldPredicate,
|
||||
String field, FieldMapper fieldMapper, Map<String, FieldMappingMetaData> fieldMappings,
|
||||
boolean includeDefaults) {
|
||||
if (fieldMappings.containsKey(field)) {
|
||||
return;
|
||||
}
|
||||
if (fieldPredicate.test(field)) {
|
||||
try {
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
|
||||
builder.startObject();
|
||||
fieldMapper.toXContent(builder, includeDefaults ? includeDefaultsParams : ToXContent.EMPTY_PARAMS);
|
||||
builder.endObject();
|
||||
fieldMappings.put(field, new FieldMappingMetaData(fieldMapper.fieldType().name(), builder.bytes()));
|
||||
BytesReference bytes = XContentHelper.toXContent(fieldMapper, XContentType.JSON,
|
||||
includeDefaults ? includeDefaultsParams : ToXContent.EMPTY_PARAMS, false);
|
||||
fieldMappings.put(field, new FieldMappingMetaData(fieldMapper.fieldType().name(), bytes));
|
||||
} catch (IOException e) {
|
||||
throw new ElasticsearchException("failed to serialize XContent of field [" + field + "]", e);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,15 +31,23 @@ import org.elasticsearch.cluster.service.ClusterService;
|
|||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class TransportGetMappingsAction extends TransportClusterInfoAction<GetMappingsRequest, GetMappingsResponse> {
|
||||
|
||||
private final IndicesService indicesService;
|
||||
|
||||
@Inject
|
||||
public TransportGetMappingsAction(Settings settings, TransportService transportService, ClusterService clusterService,
|
||||
ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, GetMappingsAction.NAME, transportService, clusterService, threadPool, actionFilters, GetMappingsRequest::new, indexNameExpressionResolver);
|
||||
ThreadPool threadPool, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, IndicesService indicesService) {
|
||||
super(settings, GetMappingsAction.NAME, transportService, clusterService, threadPool, actionFilters, GetMappingsRequest::new,
|
||||
indexNameExpressionResolver);
|
||||
this.indicesService = indicesService;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -50,7 +58,8 @@ public class TransportGetMappingsAction extends TransportClusterInfoAction<GetMa
|
|||
|
||||
@Override
|
||||
protected ClusterBlockException checkBlock(GetMappingsRequest request, ClusterState state) {
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request));
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ,
|
||||
indexNameExpressionResolver.concreteIndexNames(state, request));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -59,11 +68,15 @@ public class TransportGetMappingsAction extends TransportClusterInfoAction<GetMa
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void doMasterOperation(final GetMappingsRequest request, String[] concreteIndices, final ClusterState state, final ActionListener<GetMappingsResponse> listener) {
|
||||
protected void doMasterOperation(final GetMappingsRequest request, String[] concreteIndices, final ClusterState state,
|
||||
final ActionListener<GetMappingsResponse> listener) {
|
||||
logger.trace("serving getMapping request based on version {}", state.version());
|
||||
ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> result = state.metaData().findMappings(
|
||||
concreteIndices, request.types()
|
||||
);
|
||||
try {
|
||||
ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> result =
|
||||
state.metaData().findMappings(concreteIndices, request.types(), indicesService.getFieldFilter());
|
||||
listener.onResponse(new GetMappingsResponse(result));
|
||||
} catch (IOException e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,15 +21,34 @@ package org.elasticsearch.action.admin.indices.open;
|
|||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
|
||||
|
||||
/**
|
||||
* A response for a open index action.
|
||||
*/
|
||||
public class OpenIndexResponse extends AcknowledgedResponse {
|
||||
public class OpenIndexResponse extends AcknowledgedResponse implements ToXContentObject {
|
||||
private static final String SHARDS_ACKNOWLEDGED = "shards_acknowledged";
|
||||
private static final ParseField SHARDS_ACKNOWLEDGED_PARSER = new ParseField(SHARDS_ACKNOWLEDGED);
|
||||
|
||||
private static final ConstructingObjectParser<OpenIndexResponse, Void> PARSER = new ConstructingObjectParser<>("open_index", true,
|
||||
args -> new OpenIndexResponse((boolean) args[0], (boolean) args[1]));
|
||||
|
||||
static {
|
||||
declareAcknowledgedField(PARSER);
|
||||
PARSER.declareField(constructorArg(), (parser, context) -> parser.booleanValue(), SHARDS_ACKNOWLEDGED_PARSER,
|
||||
ObjectParser.ValueType.BOOLEAN);
|
||||
}
|
||||
|
||||
private boolean shardsAcknowledged;
|
||||
|
||||
|
@ -68,4 +87,17 @@ public class OpenIndexResponse extends AcknowledgedResponse {
|
|||
out.writeBoolean(shardsAcknowledged);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
addAcknowledgedField(builder);
|
||||
builder.field(SHARDS_ACKNOWLEDGED, isShardsAcknowledged());
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
public static OpenIndexResponse fromXContent(XContentParser parser) throws IOException {
|
||||
return PARSER.apply(parser, null);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -185,7 +185,6 @@ public class BulkProcessor implements Closeable {
|
|||
|
||||
private BulkRequest bulkRequest;
|
||||
private final BulkRequestHandler bulkRequestHandler;
|
||||
private final Scheduler scheduler;
|
||||
private final Runnable onClose;
|
||||
|
||||
private volatile boolean closed = false;
|
||||
|
@ -196,7 +195,6 @@ public class BulkProcessor implements Closeable {
|
|||
this.bulkActions = bulkActions;
|
||||
this.bulkSize = bulkSize.getBytes();
|
||||
this.bulkRequest = new BulkRequest();
|
||||
this.scheduler = scheduler;
|
||||
this.bulkRequestHandler = new BulkRequestHandler(consumer, backoffPolicy, listener, scheduler, concurrentRequests);
|
||||
// Start period flushing task after everything is setup
|
||||
this.cancellableFlushTask = startFlushTask(flushInterval, scheduler);
|
||||
|
|
|
@ -40,6 +40,7 @@ import java.util.HashMap;
|
|||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
public class TransportFieldCapabilitiesIndexAction extends TransportSingleShardAction<FieldCapabilitiesIndexRequest,
|
||||
FieldCapabilitiesIndexResponse> {
|
||||
|
@ -77,14 +78,17 @@ public class TransportFieldCapabilitiesIndexAction extends TransportSingleShardA
|
|||
for (String field : request.fields()) {
|
||||
fieldNames.addAll(mapperService.simpleMatchToIndexNames(field));
|
||||
}
|
||||
Predicate<String> fieldPredicate = indicesService.getFieldFilter().apply(shardId.getIndexName());
|
||||
Map<String, FieldCapabilities> responseMap = new HashMap<>();
|
||||
for (String field : fieldNames) {
|
||||
MappedFieldType ft = mapperService.fullName(field);
|
||||
if (ft != null) {
|
||||
FieldCapabilities fieldCap = new FieldCapabilities(field, ft.typeName(), ft.isSearchable(), ft.isAggregatable());
|
||||
if (indicesService.isMetaDataField(field) || fieldPredicate.test(field)) {
|
||||
responseMap.put(field, fieldCap);
|
||||
}
|
||||
}
|
||||
}
|
||||
return new FieldCapabilitiesIndexResponse(shardId.getIndexName(), responseMap);
|
||||
}
|
||||
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.elasticsearch.action.RoutingMissingException;
|
|||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.single.shard.TransportSingleShardAction;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
|
@ -69,7 +68,6 @@ public class TransportGetAction extends TransportSingleShardAction<GetRequest, G
|
|||
|
||||
@Override
|
||||
protected void resolveRequest(ClusterState state, InternalRequest request) {
|
||||
IndexMetaData indexMeta = state.getMetaData().index(request.concreteIndex());
|
||||
// update the routing (request#index here is possibly an alias)
|
||||
request.request().routing(state.metaData().resolveIndexRouting(request.request().parent(), request.request().routing(), request.request().index()));
|
||||
// Fail fast on the node that received the request.
|
||||
|
|
|
@ -165,6 +165,7 @@ final class ExpandSearchPhase extends SearchPhase {
|
|||
}
|
||||
groupSource.explain(options.isExplain());
|
||||
groupSource.trackScores(options.isTrackScores());
|
||||
groupSource.version(options.isVersion());
|
||||
return groupSource;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,20 +23,36 @@ import org.elasticsearch.action.ActionRequest;
|
|||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.CompositeIndicesRequest;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.common.CheckedBiConsumer;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeStringArrayValue;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeStringValue;
|
||||
|
||||
/**
|
||||
* A multi search API request.
|
||||
*/
|
||||
public class MultiSearchRequest extends ActionRequest implements CompositeIndicesRequest {
|
||||
|
||||
public static final int MAX_CONCURRENT_SEARCH_REQUESTS_DEFAULT = 0;
|
||||
|
||||
private int maxConcurrentSearchRequests = 0;
|
||||
private List<SearchRequest> requests = new ArrayList<>();
|
||||
|
||||
|
@ -131,4 +147,171 @@ public class MultiSearchRequest extends ActionRequest implements CompositeIndice
|
|||
request.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
MultiSearchRequest that = (MultiSearchRequest) o;
|
||||
return maxConcurrentSearchRequests == that.maxConcurrentSearchRequests &&
|
||||
Objects.equals(requests, that.requests) &&
|
||||
Objects.equals(indicesOptions, that.indicesOptions);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(maxConcurrentSearchRequests, requests, indicesOptions);
|
||||
}
|
||||
|
||||
public static void readMultiLineFormat(BytesReference data,
|
||||
XContent xContent,
|
||||
CheckedBiConsumer<SearchRequest, XContentParser, IOException> consumer,
|
||||
String[] indices,
|
||||
IndicesOptions indicesOptions,
|
||||
String[] types,
|
||||
String routing,
|
||||
String searchType,
|
||||
NamedXContentRegistry registry,
|
||||
boolean allowExplicitIndex) throws IOException {
|
||||
int from = 0;
|
||||
int length = data.length();
|
||||
byte marker = xContent.streamSeparator();
|
||||
while (true) {
|
||||
int nextMarker = findNextMarker(marker, from, data, length);
|
||||
if (nextMarker == -1) {
|
||||
break;
|
||||
}
|
||||
// support first line with \n
|
||||
if (nextMarker == 0) {
|
||||
from = nextMarker + 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
SearchRequest searchRequest = new SearchRequest();
|
||||
if (indices != null) {
|
||||
searchRequest.indices(indices);
|
||||
}
|
||||
if (indicesOptions != null) {
|
||||
searchRequest.indicesOptions(indicesOptions);
|
||||
}
|
||||
if (types != null && types.length > 0) {
|
||||
searchRequest.types(types);
|
||||
}
|
||||
if (routing != null) {
|
||||
searchRequest.routing(routing);
|
||||
}
|
||||
if (searchType != null) {
|
||||
searchRequest.searchType(searchType);
|
||||
}
|
||||
IndicesOptions defaultOptions = SearchRequest.DEFAULT_INDICES_OPTIONS;
|
||||
// now parse the action
|
||||
if (nextMarker - from > 0) {
|
||||
try (XContentParser parser = xContent.createParser(registry, data.slice(from, nextMarker - from))) {
|
||||
Map<String, Object> source = parser.map();
|
||||
for (Map.Entry<String, Object> entry : source.entrySet()) {
|
||||
Object value = entry.getValue();
|
||||
if ("index".equals(entry.getKey()) || "indices".equals(entry.getKey())) {
|
||||
if (!allowExplicitIndex) {
|
||||
throw new IllegalArgumentException("explicit index in multi search is not allowed");
|
||||
}
|
||||
searchRequest.indices(nodeStringArrayValue(value));
|
||||
} else if ("type".equals(entry.getKey()) || "types".equals(entry.getKey())) {
|
||||
searchRequest.types(nodeStringArrayValue(value));
|
||||
} else if ("search_type".equals(entry.getKey()) || "searchType".equals(entry.getKey())) {
|
||||
searchRequest.searchType(nodeStringValue(value, null));
|
||||
} else if ("request_cache".equals(entry.getKey()) || "requestCache".equals(entry.getKey())) {
|
||||
searchRequest.requestCache(nodeBooleanValue(value, entry.getKey()));
|
||||
} else if ("preference".equals(entry.getKey())) {
|
||||
searchRequest.preference(nodeStringValue(value, null));
|
||||
} else if ("routing".equals(entry.getKey())) {
|
||||
searchRequest.routing(nodeStringValue(value, null));
|
||||
}
|
||||
}
|
||||
defaultOptions = IndicesOptions.fromMap(source, defaultOptions);
|
||||
}
|
||||
}
|
||||
searchRequest.indicesOptions(defaultOptions);
|
||||
|
||||
// move pointers
|
||||
from = nextMarker + 1;
|
||||
// now for the body
|
||||
nextMarker = findNextMarker(marker, from, data, length);
|
||||
if (nextMarker == -1) {
|
||||
break;
|
||||
}
|
||||
BytesReference bytes = data.slice(from, nextMarker - from);
|
||||
try (XContentParser parser = xContent.createParser(registry, bytes)) {
|
||||
consumer.accept(searchRequest, parser);
|
||||
}
|
||||
// move pointers
|
||||
from = nextMarker + 1;
|
||||
}
|
||||
}
|
||||
|
||||
private static int findNextMarker(byte marker, int from, BytesReference data, int length) {
|
||||
for (int i = from; i < length; i++) {
|
||||
if (data.get(i) == marker) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
if (from != length) {
|
||||
throw new IllegalArgumentException("The msearch request must be terminated by a newline [\n]");
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
public static byte[] writeMultiLineFormat(MultiSearchRequest multiSearchRequest, XContent xContent) throws IOException {
|
||||
ByteArrayOutputStream output = new ByteArrayOutputStream();
|
||||
for (SearchRequest request : multiSearchRequest.requests()) {
|
||||
try (XContentBuilder xContentBuilder = XContentBuilder.builder(xContent)) {
|
||||
xContentBuilder.startObject();
|
||||
if (request.indices() != null) {
|
||||
xContentBuilder.field("index", request.indices());
|
||||
}
|
||||
if (request.indicesOptions() != null && request.indicesOptions() != SearchRequest.DEFAULT_INDICES_OPTIONS) {
|
||||
if (request.indicesOptions().expandWildcardsOpen() && request.indicesOptions().expandWildcardsClosed()) {
|
||||
xContentBuilder.field("expand_wildcards", "all");
|
||||
} else if (request.indicesOptions().expandWildcardsOpen()) {
|
||||
xContentBuilder.field("expand_wildcards", "open");
|
||||
} else if (request.indicesOptions().expandWildcardsClosed()) {
|
||||
xContentBuilder.field("expand_wildcards", "closed");
|
||||
} else {
|
||||
xContentBuilder.field("expand_wildcards", "none");
|
||||
}
|
||||
xContentBuilder.field("ignore_unavailable", request.indicesOptions().ignoreUnavailable());
|
||||
xContentBuilder.field("allow_no_indices", request.indicesOptions().allowNoIndices());
|
||||
}
|
||||
if (request.types() != null) {
|
||||
xContentBuilder.field("types", request.types());
|
||||
}
|
||||
if (request.searchType() != null) {
|
||||
xContentBuilder.field("search_type", request.searchType().name().toLowerCase(Locale.ROOT));
|
||||
}
|
||||
if (request.requestCache() != null) {
|
||||
xContentBuilder.field("request_cache", request.requestCache());
|
||||
}
|
||||
if (request.preference() != null) {
|
||||
xContentBuilder.field("preference", request.preference());
|
||||
}
|
||||
if (request.routing() != null) {
|
||||
xContentBuilder.field("routing", request.routing());
|
||||
}
|
||||
xContentBuilder.endObject();
|
||||
xContentBuilder.bytes().writeTo(output);
|
||||
}
|
||||
output.write(xContent.streamSeparator());
|
||||
try (XContentBuilder xContentBuilder = XContentBuilder.builder(xContent)) {
|
||||
if (request.source() != null) {
|
||||
request.source().toXContent(xContentBuilder, ToXContent.EMPTY_PARAMS);
|
||||
} else {
|
||||
xContentBuilder.startObject();
|
||||
xContentBuilder.endObject();
|
||||
}
|
||||
xContentBuilder.bytes().writeTo(output);
|
||||
}
|
||||
output.write(xContent.streamSeparator());
|
||||
}
|
||||
return output.toByteArray();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -24,23 +24,39 @@ import org.elasticsearch.ExceptionsHelper;
|
|||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser.Token;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
|
||||
|
||||
/**
|
||||
* A multi search response.
|
||||
*/
|
||||
public class MultiSearchResponse extends ActionResponse implements Iterable<MultiSearchResponse.Item>, ToXContentObject {
|
||||
|
||||
private static final ParseField RESPONSES = new ParseField(Fields.RESPONSES);
|
||||
private static final ParseField TOOK_IN_MILLIS = new ParseField("took");
|
||||
private static final ConstructingObjectParser<MultiSearchResponse, Void> PARSER = new ConstructingObjectParser<>("multi_search",
|
||||
true, a -> new MultiSearchResponse(((List<Item>)a[0]).toArray(new Item[0]), (long) a[1]));
|
||||
static {
|
||||
PARSER.declareObjectArray(constructorArg(), (p, c) -> itemFromXContent(p), RESPONSES);
|
||||
PARSER.declareLong(constructorArg(), TOOK_IN_MILLIS);
|
||||
}
|
||||
|
||||
/**
|
||||
* A search response item, holding the actual search response, or an error message if it failed.
|
||||
*/
|
||||
|
@ -188,6 +204,45 @@ public class MultiSearchResponse extends ActionResponse implements Iterable<Mult
|
|||
return builder;
|
||||
}
|
||||
|
||||
public static MultiSearchResponse fromXContext(XContentParser parser) {
|
||||
return PARSER.apply(parser, null);
|
||||
}
|
||||
|
||||
private static MultiSearchResponse.Item itemFromXContent(XContentParser parser) throws IOException {
|
||||
// This parsing logic is a bit tricky here, because the multi search response itself is tricky:
|
||||
// 1) The json objects inside the responses array are either a search response or a serialized exception
|
||||
// 2) Each response json object gets a status field injected that ElasticsearchException.failureFromXContent(...) does not parse,
|
||||
// but SearchResponse.innerFromXContent(...) parses and then ignores. The status field is not needed to parse
|
||||
// the response item. However in both cases this method does need to parse the 'status' field otherwise the parsing of
|
||||
// the response item in the next json array element will fail due to parsing errors.
|
||||
|
||||
Item item = null;
|
||||
String fieldName = null;
|
||||
|
||||
Token token = parser.nextToken();
|
||||
assert token == Token.FIELD_NAME;
|
||||
outer: for (; token != Token.END_OBJECT; token = parser.nextToken()) {
|
||||
switch (token) {
|
||||
case FIELD_NAME:
|
||||
fieldName = parser.currentName();
|
||||
if ("error".equals(fieldName)) {
|
||||
item = new Item(null, ElasticsearchException.failureFromXContent(parser));
|
||||
} else if ("status".equals(fieldName) == false) {
|
||||
item = new Item(SearchResponse.innerFromXContent(parser), null);
|
||||
break outer;
|
||||
}
|
||||
break;
|
||||
case VALUE_NUMBER:
|
||||
if ("status".equals(fieldName)) {
|
||||
// Ignore the status value
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
assert parser.currentToken() == Token.END_OBJECT;
|
||||
return item;
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final String RESPONSES = "responses";
|
||||
static final String STATUS = "status";
|
||||
|
|
|
@ -65,6 +65,7 @@ import java.util.Collections;
|
|||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.IntFunction;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.StreamSupport;
|
||||
|
@ -73,13 +74,16 @@ public final class SearchPhaseController extends AbstractComponent {
|
|||
|
||||
private static final ScoreDoc[] EMPTY_DOCS = new ScoreDoc[0];
|
||||
|
||||
private final BigArrays bigArrays;
|
||||
private final ScriptService scriptService;
|
||||
private final Function<Boolean, ReduceContext> reduceContextFunction;
|
||||
|
||||
public SearchPhaseController(Settings settings, BigArrays bigArrays, ScriptService scriptService) {
|
||||
/**
|
||||
* Constructor.
|
||||
* @param settings Node settings
|
||||
* @param reduceContextFunction A function that builds a context for the reduce of an {@link InternalAggregation}
|
||||
*/
|
||||
public SearchPhaseController(Settings settings, Function<Boolean, ReduceContext> reduceContextFunction) {
|
||||
super(settings);
|
||||
this.bigArrays = bigArrays;
|
||||
this.scriptService = scriptService;
|
||||
this.reduceContextFunction = reduceContextFunction;
|
||||
}
|
||||
|
||||
public AggregatedDfs aggregateDfs(Collection<DfsSearchResult> results) {
|
||||
|
@ -496,7 +500,7 @@ public final class SearchPhaseController extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
final Suggest suggest = groupedSuggestions.isEmpty() ? null : new Suggest(Suggest.reduce(groupedSuggestions));
|
||||
ReduceContext reduceContext = new ReduceContext(bigArrays, scriptService, true);
|
||||
ReduceContext reduceContext = reduceContextFunction.apply(true);
|
||||
final InternalAggregations aggregations = aggregationsList.isEmpty() ? null : reduceAggs(aggregationsList,
|
||||
firstResult.pipelineAggregators(), reduceContext);
|
||||
final SearchProfileShardResults shardResults = profileResults.isEmpty() ? null : new SearchProfileShardResults(profileResults);
|
||||
|
@ -513,7 +517,7 @@ public final class SearchPhaseController extends AbstractComponent {
|
|||
* that relevant for the final reduce step. For final reduce see {@link #reduceAggs(List, List, ReduceContext)}
|
||||
*/
|
||||
private InternalAggregations reduceAggsIncrementally(List<InternalAggregations> aggregationsList) {
|
||||
ReduceContext reduceContext = new ReduceContext(bigArrays, scriptService, false);
|
||||
ReduceContext reduceContext = reduceContextFunction.apply(false);
|
||||
return aggregationsList.isEmpty() ? null : reduceAggs(aggregationsList,
|
||||
null, reduceContext);
|
||||
}
|
||||
|
|
|
@ -161,17 +161,21 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest
|
|||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
ActionRequestValidationException validationException = null;
|
||||
if (source != null && source.trackTotalHits() == false && scroll() != null) {
|
||||
final Scroll scroll = scroll();
|
||||
if (source != null && source.trackTotalHits() == false && scroll != null) {
|
||||
validationException =
|
||||
addValidationError("disabling [track_total_hits] is not allowed in a scroll context", validationException);
|
||||
}
|
||||
if (source != null && source.from() > 0 && scroll() != null) {
|
||||
if (source != null && source.from() > 0 && scroll != null) {
|
||||
validationException =
|
||||
addValidationError("using [from] is not allowed in a scroll context", validationException);
|
||||
}
|
||||
if (requestCache != null && requestCache && scroll() != null) {
|
||||
if (requestCache != null && requestCache && scroll != null) {
|
||||
validationException =
|
||||
addValidationError("[request_cache] cannot be used in a a scroll context", validationException);
|
||||
addValidationError("[request_cache] cannot be used in a scroll context", validationException);
|
||||
}
|
||||
if (source != null && source.size() == 0 && scroll != null) {
|
||||
validationException = addValidationError("[size] cannot be [0] in a scroll context", validationException);
|
||||
}
|
||||
return validationException;
|
||||
}
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.elasticsearch.common.xcontent.StatusToXContentObject;
|
|||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser.Token;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.rest.action.RestActions;
|
||||
import org.elasticsearch.search.SearchHits;
|
||||
|
@ -242,9 +243,14 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb
|
|||
}
|
||||
|
||||
public static SearchResponse fromXContent(XContentParser parser) throws IOException {
|
||||
ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation);
|
||||
XContentParser.Token token;
|
||||
String currentFieldName = null;
|
||||
ensureExpectedToken(Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation);
|
||||
parser.nextToken();
|
||||
return innerFromXContent(parser);
|
||||
}
|
||||
|
||||
static SearchResponse innerFromXContent(XContentParser parser) throws IOException {
|
||||
ensureExpectedToken(Token.FIELD_NAME, parser.currentToken(), parser::getTokenLocation);
|
||||
String currentFieldName = parser.currentName();
|
||||
SearchHits hits = null;
|
||||
Aggregations aggs = null;
|
||||
Suggest suggest = null;
|
||||
|
@ -259,8 +265,8 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb
|
|||
String scrollId = null;
|
||||
List<ShardSearchFailure> failures = new ArrayList<>();
|
||||
Clusters clusters = Clusters.EMPTY;
|
||||
while((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
for (Token token = parser.nextToken(); token != Token.END_OBJECT; token = parser.nextToken()) {
|
||||
if (token == Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token.isValue()) {
|
||||
if (SCROLL_ID.match(currentFieldName)) {
|
||||
|
@ -276,7 +282,7 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb
|
|||
} else {
|
||||
parser.skipChildren();
|
||||
}
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
} else if (token == Token.START_OBJECT) {
|
||||
if (SearchHits.Fields.HITS.equals(currentFieldName)) {
|
||||
hits = SearchHits.fromXContent(parser);
|
||||
} else if (Aggregations.AGGREGATIONS_FIELD.equals(currentFieldName)) {
|
||||
|
@ -286,8 +292,8 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb
|
|||
} else if (SearchProfileShardResults.PROFILE_FIELD.equals(currentFieldName)) {
|
||||
profile = SearchProfileShardResults.fromXContent(parser);
|
||||
} else if (RestActions._SHARDS_FIELD.match(currentFieldName)) {
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
while ((token = parser.nextToken()) != Token.END_OBJECT) {
|
||||
if (token == Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token.isValue()) {
|
||||
if (RestActions.FAILED_FIELD.match(currentFieldName)) {
|
||||
|
@ -301,9 +307,9 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb
|
|||
} else {
|
||||
parser.skipChildren();
|
||||
}
|
||||
} else if (token == XContentParser.Token.START_ARRAY) {
|
||||
} else if (token == Token.START_ARRAY) {
|
||||
if (RestActions.FAILURES_FIELD.match(currentFieldName)) {
|
||||
while((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
while((token = parser.nextToken()) != Token.END_ARRAY) {
|
||||
failures.add(ShardSearchFailure.fromXContent(parser));
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -76,7 +76,7 @@ public class TransportMultiSearchAction extends HandledTransportAction<MultiSear
|
|||
clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ);
|
||||
|
||||
int maxConcurrentSearches = request.maxConcurrentSearchRequests();
|
||||
if (maxConcurrentSearches == 0) {
|
||||
if (maxConcurrentSearches == MultiSearchRequest.MAX_CONCURRENT_SEARCH_REQUESTS_DEFAULT) {
|
||||
maxConcurrentSearches = defaultMaxConcurrentSearches(availableProcessors, clusterState);
|
||||
}
|
||||
|
||||
|
@ -130,7 +130,7 @@ public class TransportMultiSearchAction extends HandledTransportAction<MultiSear
|
|||
* of concurrent requests. At first glance, it appears that we should never poll from the queue and not obtain a request given
|
||||
* that we only poll here no more times than the number of requests. However, this is not the only consumer of this queue as
|
||||
* earlier requests that have already completed will poll from the queue too and they could complete before later polls are
|
||||
* invoked here. Thus, it can be the case that we poll here and and the queue was empty.
|
||||
* invoked here. Thus, it can be the case that we poll here and the queue was empty.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -37,11 +37,10 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constru
|
|||
*/
|
||||
public abstract class AcknowledgedResponse extends ActionResponse {
|
||||
|
||||
private static final String ACKNOWLEDGED = "acknowledged";
|
||||
private static final ParseField ACKNOWLEDGED_PARSER = new ParseField(ACKNOWLEDGED);
|
||||
private static final ParseField ACKNOWLEDGED = new ParseField("acknowledged");
|
||||
|
||||
protected static <T extends AcknowledgedResponse> void declareAcknowledgedField(ConstructingObjectParser<T, Void> PARSER) {
|
||||
PARSER.declareField(constructorArg(), (parser, context) -> parser.booleanValue(), ACKNOWLEDGED_PARSER,
|
||||
PARSER.declareField(constructorArg(), (parser, context) -> parser.booleanValue(), ACKNOWLEDGED,
|
||||
ObjectParser.ValueType.BOOLEAN);
|
||||
}
|
||||
|
||||
|
@ -78,6 +77,6 @@ public abstract class AcknowledgedResponse extends ActionResponse {
|
|||
}
|
||||
|
||||
protected void addAcknowledgedField(XContentBuilder builder) throws IOException {
|
||||
builder.field(ACKNOWLEDGED, isAcknowledged());
|
||||
builder.field(ACKNOWLEDGED.getPreferredName(), isAcknowledged());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,13 +39,4 @@ public abstract class SingleShardOperationRequestBuilder<Request extends SingleS
|
|||
request.index(index);
|
||||
return (RequestBuilder) this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Controls if the operation will be executed on a separate thread when executed locally.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public final RequestBuilder setOperationThreaded(boolean threadedOperation) {
|
||||
request.operationThreaded(threadedOperation);
|
||||
return (RequestBuilder) this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -94,22 +94,6 @@ public abstract class SingleShardRequest<Request extends SingleShardRequest<Requ
|
|||
return INDICES_OPTIONS;
|
||||
}
|
||||
|
||||
/**
|
||||
* Controls if the operation will be executed on a separate thread when executed locally.
|
||||
*/
|
||||
public boolean operationThreaded() {
|
||||
return threadedOperation;
|
||||
}
|
||||
|
||||
/**
|
||||
* Controls if the operation will be executed on a separate thread when executed locally.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public final Request operationThreaded(boolean threadedOperation) {
|
||||
this.threadedOperation = threadedOperation;
|
||||
return (Request) this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
|
|
|
@ -277,7 +277,6 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
|
|||
@Override
|
||||
public void messageReceived(Request request, final TransportChannel channel) throws Exception {
|
||||
// if we have a local operation, execute it on a thread since we don't spawn
|
||||
request.operationThreaded(true);
|
||||
execute(request, new ActionListener<Response>() {
|
||||
@Override
|
||||
public void onResponse(Response result) {
|
||||
|
|
|
@ -46,9 +46,9 @@ import org.elasticsearch.env.Environment;
|
|||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
import org.elasticsearch.monitor.os.OsProbe;
|
||||
import org.elasticsearch.monitor.process.ProcessProbe;
|
||||
import org.elasticsearch.node.InternalSettingsPreparer;
|
||||
import org.elasticsearch.node.Node;
|
||||
import org.elasticsearch.node.NodeValidationException;
|
||||
import org.elasticsearch.node.InternalSettingsPreparer;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
|
@ -57,8 +57,8 @@ import java.io.UnsupportedEncodingException;
|
|||
import java.net.URISyntaxException;
|
||||
import java.nio.file.Path;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.util.List;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
/**
|
||||
|
@ -239,7 +239,6 @@ final class Bootstrap {
|
|||
}
|
||||
|
||||
private static Environment createEnvironment(
|
||||
final boolean foreground,
|
||||
final Path pidFile,
|
||||
final SecureSettings secureSettings,
|
||||
final Settings initialSettings,
|
||||
|
@ -283,7 +282,7 @@ final class Bootstrap {
|
|||
INSTANCE = new Bootstrap();
|
||||
|
||||
final SecureSettings keystore = loadSecureSettings(initialEnv);
|
||||
final Environment environment = createEnvironment(foreground, pidFile, keystore, initialEnv.settings(), initialEnv.configFile());
|
||||
final Environment environment = createEnvironment(pidFile, keystore, initialEnv.settings(), initialEnv.configFile());
|
||||
try {
|
||||
LogConfigurator.configure(environment);
|
||||
} catch (IOException e) {
|
||||
|
|
|
@ -43,6 +43,7 @@ import org.elasticsearch.common.settings.SettingsModule;
|
|||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.PageCacheRecycler;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.node.InternalSettingsPreparer;
|
||||
|
@ -79,7 +80,11 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
|
|||
* <p>
|
||||
* The transport client important modules used is the {@link org.elasticsearch.common.network.NetworkModule} which is
|
||||
* started in client mode (only connects, no bind).
|
||||
*
|
||||
* @deprecated {@link TransportClient} is deprecated in favour of the High Level REST client and will
|
||||
* be removed in Elasticsearch 8.0.
|
||||
*/
|
||||
@Deprecated
|
||||
public abstract class TransportClient extends AbstractClient {
|
||||
|
||||
public static final Setting<TimeValue> CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL =
|
||||
|
@ -165,11 +170,12 @@ public abstract class TransportClient extends AbstractClient {
|
|||
CircuitBreakerService circuitBreakerService = Node.createCircuitBreakerService(settingsModule.getSettings(),
|
||||
settingsModule.getClusterSettings());
|
||||
resourcesToClose.add(circuitBreakerService);
|
||||
BigArrays bigArrays = new BigArrays(settings, circuitBreakerService);
|
||||
PageCacheRecycler pageCacheRecycler = new PageCacheRecycler(settings);
|
||||
BigArrays bigArrays = new BigArrays(pageCacheRecycler, circuitBreakerService);
|
||||
resourcesToClose.add(bigArrays);
|
||||
modules.add(settingsModule);
|
||||
NetworkModule networkModule = new NetworkModule(settings, true, pluginsService.filterPlugins(NetworkPlugin.class), threadPool,
|
||||
bigArrays, circuitBreakerService, namedWriteableRegistry, xContentRegistry, networkService, null);
|
||||
bigArrays, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, xContentRegistry, networkService, null);
|
||||
final Transport transport = networkModule.getTransportSupplier().get();
|
||||
final TransportService transportService = new TransportService(settings, transport, threadPool,
|
||||
networkModule.getTransportInterceptor(),
|
||||
|
|
|
@ -54,6 +54,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.ResizeAllocationDeci
|
|||
import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.RestoreInProgressAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
|
@ -191,6 +192,7 @@ public class ClusterModule extends AbstractModule {
|
|||
addAllocationDecider(deciders, new EnableAllocationDecider(settings, clusterSettings));
|
||||
addAllocationDecider(deciders, new NodeVersionAllocationDecider(settings));
|
||||
addAllocationDecider(deciders, new SnapshotInProgressAllocationDecider(settings));
|
||||
addAllocationDecider(deciders, new RestoreInProgressAllocationDecider(settings));
|
||||
addAllocationDecider(deciders, new FilterAllocationDecider(settings, clusterSettings));
|
||||
addAllocationDecider(deciders, new SameShardAllocationDecider(settings, clusterSettings));
|
||||
addAllocationDecider(deciders, new DiskThresholdDecider(settings, clusterSettings));
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.elasticsearch.common.bytes.BytesArray;
|
|||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
|
@ -58,7 +59,7 @@ public class AliasMetaData extends AbstractDiffable<AliasMetaData> {
|
|||
this.indexRouting = indexRouting;
|
||||
this.searchRouting = searchRouting;
|
||||
if (searchRouting != null) {
|
||||
searchRoutingValues = Collections.unmodifiableSet(Strings.splitStringByCommaToSet(searchRouting));
|
||||
searchRoutingValues = Collections.unmodifiableSet(Sets.newHashSet(Strings.splitStringByCommaToArray(searchRouting)));
|
||||
} else {
|
||||
searchRoutingValues = emptySet();
|
||||
}
|
||||
|
@ -186,7 +187,7 @@ public class AliasMetaData extends AbstractDiffable<AliasMetaData> {
|
|||
}
|
||||
if (in.readBoolean()) {
|
||||
searchRouting = in.readString();
|
||||
searchRoutingValues = Collections.unmodifiableSet(Strings.splitStringByCommaToSet(searchRouting));
|
||||
searchRoutingValues = Collections.unmodifiableSet(Sets.newHashSet(Strings.splitStringByCommaToArray(searchRouting)));
|
||||
} else {
|
||||
searchRouting = null;
|
||||
searchRoutingValues = emptySet();
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.elasticsearch.common.joda.DateMathParser;
|
|||
import org.elasticsearch.common.joda.FormatDateTimeFormatter;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.indices.IndexClosedException;
|
||||
|
@ -358,6 +359,7 @@ public class IndexNameExpressionResolver extends AbstractComponent {
|
|||
resolvedExpressions = expressionResolver.resolve(context, resolvedExpressions);
|
||||
}
|
||||
|
||||
// TODO: it appears that this can never be true?
|
||||
if (isAllIndices(resolvedExpressions)) {
|
||||
return resolveSearchRoutingAllIndices(state.metaData(), routing);
|
||||
}
|
||||
|
@ -367,7 +369,7 @@ public class IndexNameExpressionResolver extends AbstractComponent {
|
|||
// List of indices that don't require any routing
|
||||
Set<String> norouting = new HashSet<>();
|
||||
if (routing != null) {
|
||||
paramRouting = Strings.splitStringByCommaToSet(routing);
|
||||
paramRouting = Sets.newHashSet(Strings.splitStringByCommaToArray(routing));
|
||||
}
|
||||
|
||||
for (String expression : resolvedExpressions) {
|
||||
|
@ -442,9 +444,9 @@ public class IndexNameExpressionResolver extends AbstractComponent {
|
|||
/**
|
||||
* Sets the same routing for all indices
|
||||
*/
|
||||
private Map<String, Set<String>> resolveSearchRoutingAllIndices(MetaData metaData, String routing) {
|
||||
public Map<String, Set<String>> resolveSearchRoutingAllIndices(MetaData metaData, String routing) {
|
||||
if (routing != null) {
|
||||
Set<String> r = Strings.splitStringByCommaToSet(routing);
|
||||
Set<String> r = Sets.newHashSet(Strings.splitStringByCommaToArray(routing));
|
||||
Map<String, Set<String>> routings = new HashMap<>();
|
||||
String[] concreteIndices = metaData.getConcreteAllIndices();
|
||||
for (String index : concreteIndices) {
|
||||
|
|
|
@ -93,6 +93,9 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
|
|||
ImmutableOpenMap<String, CompressedXContent> mappings,
|
||||
ImmutableOpenMap<String, AliasMetaData> aliases,
|
||||
ImmutableOpenMap<String, IndexMetaData.Custom> customs) {
|
||||
if (patterns == null || patterns.isEmpty()) {
|
||||
throw new IllegalArgumentException("Index patterns must not be null or empty; got " + patterns);
|
||||
}
|
||||
this.name = name;
|
||||
this.order = order;
|
||||
this.version = version;
|
||||
|
@ -244,7 +247,7 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
|
|||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
out.writeStringList(patterns);
|
||||
} else {
|
||||
out.writeString(patterns.size() > 0 ? patterns.get(0) : "");
|
||||
out.writeString(patterns.get(0));
|
||||
}
|
||||
Settings.writeSettingsToStream(settings, out);
|
||||
out.writeVInt(mappings.size());
|
||||
|
|
|
@ -107,15 +107,6 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
|||
initMappers(withoutType);
|
||||
}
|
||||
|
||||
private MappingMetaData() {
|
||||
this.type = "";
|
||||
try {
|
||||
this.source = new CompressedXContent("{}");
|
||||
} catch (IOException ex) {
|
||||
throw new IllegalStateException("Cannot create MappingMetaData prototype", ex);
|
||||
}
|
||||
}
|
||||
|
||||
private void initMappers(Map<String, Object> withoutType) {
|
||||
if (withoutType.containsKey("_routing")) {
|
||||
boolean required = false;
|
||||
|
@ -143,13 +134,6 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
|||
}
|
||||
}
|
||||
|
||||
public MappingMetaData(String type, CompressedXContent source, Routing routing, boolean hasParentField) {
|
||||
this.type = type;
|
||||
this.source = source;
|
||||
this.routing = routing;
|
||||
this.hasParentField = hasParentField;
|
||||
}
|
||||
|
||||
void updateDefaultMapping(MappingMetaData defaultMapping) {
|
||||
if (routing == Routing.EMPTY) {
|
||||
routing = defaultMapping.routing();
|
||||
|
@ -250,5 +234,4 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
|||
public static Diff<MappingMetaData> readDiffFrom(StreamInput in) throws IOException {
|
||||
return readDiffFrom(MappingMetaData::new, in);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -48,11 +48,13 @@ import org.elasticsearch.common.xcontent.ToXContent;
|
|||
import org.elasticsearch.common.xcontent.ToXContentFragment;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.gateway.MetaDataStateFormat;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.plugins.MapperPlugin;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -69,6 +71,8 @@ import java.util.Map;
|
|||
import java.util.Set;
|
||||
import java.util.SortedMap;
|
||||
import java.util.TreeMap;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
import static org.elasticsearch.common.settings.Settings.readSettingsFromStream;
|
||||
import static org.elasticsearch.common.settings.Settings.writeSettingsToStream;
|
||||
|
@ -324,32 +328,38 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, To
|
|||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Finds all mappings for types and concrete indices. Types are expanded to
|
||||
* include all types that match the glob patterns in the types array. Empty
|
||||
* types array, null or {"_all"} will be expanded to all types available for
|
||||
* the given indices.
|
||||
/**
|
||||
* Finds all mappings for types and concrete indices. Types are expanded to include all types that match the glob
|
||||
* patterns in the types array. Empty types array, null or {"_all"} will be expanded to all types available for
|
||||
* the given indices. Only fields that match the provided field filter will be returned (default is a predicate
|
||||
* that always returns true, which can be overridden via plugins)
|
||||
*
|
||||
* @see MapperPlugin#getFieldFilter()
|
||||
*
|
||||
*/
|
||||
public ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> findMappings(String[] concreteIndices, final String[] types) {
|
||||
public ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> findMappings(String[] concreteIndices,
|
||||
final String[] types,
|
||||
Function<String, Predicate<String>> fieldFilter)
|
||||
throws IOException {
|
||||
assert types != null;
|
||||
assert concreteIndices != null;
|
||||
if (concreteIndices.length == 0) {
|
||||
return ImmutableOpenMap.of();
|
||||
}
|
||||
|
||||
boolean isAllTypes = isAllTypes(types);
|
||||
ImmutableOpenMap.Builder<String, ImmutableOpenMap<String, MappingMetaData>> indexMapBuilder = ImmutableOpenMap.builder();
|
||||
Iterable<String> intersection = HppcMaps.intersection(ObjectHashSet.from(concreteIndices), indices.keys());
|
||||
for (String index : intersection) {
|
||||
IndexMetaData indexMetaData = indices.get(index);
|
||||
ImmutableOpenMap.Builder<String, MappingMetaData> filteredMappings;
|
||||
if (isAllTypes(types)) {
|
||||
indexMapBuilder.put(index, indexMetaData.getMappings()); // No types specified means get it all
|
||||
|
||||
Predicate<String> fieldPredicate = fieldFilter.apply(index);
|
||||
if (isAllTypes) {
|
||||
indexMapBuilder.put(index, filterFields(indexMetaData.getMappings(), fieldPredicate));
|
||||
} else {
|
||||
filteredMappings = ImmutableOpenMap.builder();
|
||||
ImmutableOpenMap.Builder<String, MappingMetaData> filteredMappings = ImmutableOpenMap.builder();
|
||||
for (ObjectObjectCursor<String, MappingMetaData> cursor : indexMetaData.getMappings()) {
|
||||
if (Regex.simpleMatch(types, cursor.key)) {
|
||||
filteredMappings.put(cursor.key, cursor.value);
|
||||
filteredMappings.put(cursor.key, filterFields(cursor.value, fieldPredicate));
|
||||
}
|
||||
}
|
||||
if (!filteredMappings.isEmpty()) {
|
||||
|
@ -360,6 +370,95 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, To
|
|||
return indexMapBuilder.build();
|
||||
}
|
||||
|
||||
private static ImmutableOpenMap<String, MappingMetaData> filterFields(ImmutableOpenMap<String, MappingMetaData> mappings,
|
||||
Predicate<String> fieldPredicate) throws IOException {
|
||||
if (fieldPredicate == MapperPlugin.NOOP_FIELD_PREDICATE) {
|
||||
return mappings;
|
||||
}
|
||||
ImmutableOpenMap.Builder<String, MappingMetaData> builder = ImmutableOpenMap.builder(mappings.size());
|
||||
for (ObjectObjectCursor<String, MappingMetaData> cursor : mappings) {
|
||||
builder.put(cursor.key, filterFields(cursor.value, fieldPredicate));
|
||||
}
|
||||
return builder.build(); // No types specified means return them all
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private static MappingMetaData filterFields(MappingMetaData mappingMetaData, Predicate<String> fieldPredicate) throws IOException {
|
||||
if (fieldPredicate == MapperPlugin.NOOP_FIELD_PREDICATE) {
|
||||
return mappingMetaData;
|
||||
}
|
||||
Map<String, Object> sourceAsMap = XContentHelper.convertToMap(mappingMetaData.source().compressedReference(), true).v2();
|
||||
Map<String, Object> mapping;
|
||||
if (sourceAsMap.size() == 1 && sourceAsMap.containsKey(mappingMetaData.type())) {
|
||||
mapping = (Map<String, Object>) sourceAsMap.get(mappingMetaData.type());
|
||||
} else {
|
||||
mapping = sourceAsMap;
|
||||
}
|
||||
|
||||
Map<String, Object> properties = (Map<String, Object>)mapping.get("properties");
|
||||
if (properties == null || properties.isEmpty()) {
|
||||
return mappingMetaData;
|
||||
}
|
||||
|
||||
filterFields("", properties, fieldPredicate);
|
||||
|
||||
return new MappingMetaData(mappingMetaData.type(), sourceAsMap);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private static boolean filterFields(String currentPath, Map<String, Object> fields, Predicate<String> fieldPredicate) {
|
||||
assert fieldPredicate != MapperPlugin.NOOP_FIELD_PREDICATE;
|
||||
Iterator<Map.Entry<String, Object>> entryIterator = fields.entrySet().iterator();
|
||||
while (entryIterator.hasNext()) {
|
||||
Map.Entry<String, Object> entry = entryIterator.next();
|
||||
String newPath = mergePaths(currentPath, entry.getKey());
|
||||
Object value = entry.getValue();
|
||||
boolean mayRemove = true;
|
||||
boolean isMultiField = false;
|
||||
if (value instanceof Map) {
|
||||
Map<String, Object> map = (Map<String, Object>) value;
|
||||
Map<String, Object> properties = (Map<String, Object>)map.get("properties");
|
||||
if (properties != null) {
|
||||
mayRemove = filterFields(newPath, properties, fieldPredicate);
|
||||
} else {
|
||||
Map<String, Object> subFields = (Map<String, Object>)map.get("fields");
|
||||
if (subFields != null) {
|
||||
isMultiField = true;
|
||||
if (mayRemove = filterFields(newPath, subFields, fieldPredicate)) {
|
||||
map.remove("fields");
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
throw new IllegalStateException("cannot filter mappings, found unknown element of type [" + value.getClass() + "]");
|
||||
}
|
||||
|
||||
//only remove a field if it has no sub-fields left and it has to be excluded
|
||||
if (fieldPredicate.test(newPath) == false) {
|
||||
if (mayRemove) {
|
||||
entryIterator.remove();
|
||||
} else if (isMultiField) {
|
||||
//multi fields that should be excluded but hold subfields that don't have to be excluded are converted to objects
|
||||
Map<String, Object> map = (Map<String, Object>) value;
|
||||
Map<String, Object> subFields = (Map<String, Object>)map.get("fields");
|
||||
assert subFields.size() > 0;
|
||||
map.put("properties", subFields);
|
||||
map.remove("fields");
|
||||
map.remove("type");
|
||||
}
|
||||
}
|
||||
}
|
||||
//return true if the ancestor may be removed, as it has no sub-fields left
|
||||
return fields.size() == 0;
|
||||
}
|
||||
|
||||
private static String mergePaths(String path, String field) {
|
||||
if (path.length() == 0) {
|
||||
return field;
|
||||
}
|
||||
return path + "." + field;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns all the concrete indices.
|
||||
*/
|
||||
|
|
|
@ -82,7 +82,6 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
|||
public IndexMetaData upgradeIndexMetaData(IndexMetaData indexMetaData, Version minimumIndexCompatibilityVersion) {
|
||||
// Throws an exception if there are too-old segments:
|
||||
if (isUpgraded(indexMetaData)) {
|
||||
assert indexMetaData == archiveBrokenIndexSettings(indexMetaData) : "all settings must have been upgraded before";
|
||||
return indexMetaData;
|
||||
}
|
||||
checkSupportedVersion(indexMetaData, minimumIndexCompatibilityVersion);
|
||||
|
|
|
@ -293,7 +293,9 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
}
|
||||
assert mappingType != null;
|
||||
|
||||
if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && mappingType.charAt(0) == '_') {
|
||||
if (MapperService.DEFAULT_MAPPING.equals(mappingType) == false
|
||||
&& MapperService.SINGLE_MAPPING_NAME.equals(mappingType) == false
|
||||
&& mappingType.charAt(0) == '_') {
|
||||
throw new InvalidTypeNameException("Document mapping type name can't start with '_', found: [" + mappingType + "]");
|
||||
}
|
||||
MetaData.Builder builder = MetaData.builder(metaData);
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.elasticsearch.common.Priority;
|
|||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.IndexScopedSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -54,7 +55,6 @@ import java.util.List;
|
|||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
import static org.elasticsearch.action.support.ContextPreservingActionListener.wrapPreservingContext;
|
||||
|
||||
|
@ -164,13 +164,16 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
Settings.Builder settingsForOpenIndices = Settings.builder();
|
||||
final Set<String> skippedSettings = new HashSet<>();
|
||||
|
||||
indexScopedSettings.validate(normalizedSettings, false); // don't validate dependencies here we check it below
|
||||
// never allow to change the number of shards
|
||||
indexScopedSettings.validate(normalizedSettings.filter(s -> Regex.isSimpleMatchPattern(s) == false /* don't validate wildcards */),
|
||||
false); //don't validate dependencies here we check it below never allow to change the number of shards
|
||||
for (String key : normalizedSettings.keySet()) {
|
||||
Setting setting = indexScopedSettings.get(key);
|
||||
assert setting != null; // we already validated the normalized settings
|
||||
boolean isWildcard = setting == null && Regex.isSimpleMatchPattern(key);
|
||||
assert setting != null // we already validated the normalized settings
|
||||
|| (isWildcard && normalizedSettings.hasValue(key) == false)
|
||||
: "unknown setting: " + key + " isWildcard: " + isWildcard + " hasValue: " + normalizedSettings.hasValue(key);
|
||||
settingsForClosedIndices.copy(key, normalizedSettings);
|
||||
if (setting.isDynamic()) {
|
||||
if (isWildcard || setting.isDynamic()) {
|
||||
settingsForOpenIndices.copy(key, normalizedSettings);
|
||||
} else {
|
||||
skippedSettings.add(key);
|
||||
|
|
|
@ -39,7 +39,6 @@ import java.util.Comparator;
|
|||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
|
@ -357,8 +356,6 @@ public class IndexShardRoutingTable implements Iterable<ShardRouting> {
|
|||
|
||||
// Retrieve which nodes we can potentially send the query to
|
||||
final Set<String> nodeIds = getAllNodeIds(shards);
|
||||
final int nodeCount = nodeIds.size();
|
||||
|
||||
final Map<String, Optional<ResponseCollectorService.ComputedNodeStats>> nodeStats = getNodeStats(nodeIds, collector);
|
||||
|
||||
// Retrieve all the nodes the shards exist on
|
||||
|
@ -424,16 +421,6 @@ public class IndexShardRoutingTable implements Iterable<ShardRouting> {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if no primaries are active or initializing for this shard
|
||||
*/
|
||||
private boolean noPrimariesActive() {
|
||||
if (!primaryAsList.isEmpty() && !primaryAsList.get(0).active() && !primaryAsList.get(0).initializing()) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an iterator only on the primary shard.
|
||||
*/
|
||||
|
|
|
@ -40,6 +40,7 @@ import java.util.ArrayList;
|
|||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
|
@ -631,7 +632,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
// if the activeReplica was relocating before this call to failShard, its relocation was cancelled earlier when we
|
||||
// failed initializing replica shards (and moved replica relocation source back to started)
|
||||
assert activeReplica.started() : "replica relocation should have been cancelled: " + activeReplica;
|
||||
ShardRouting primarySwappedCandidate = promoteActiveReplicaShardToPrimary(activeReplica);
|
||||
promoteActiveReplicaShardToPrimary(activeReplica);
|
||||
routingChangesObserver.replicaPromoted(activeReplica);
|
||||
}
|
||||
|
||||
|
@ -1043,26 +1044,27 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
indicesAndShards.put(shard.index(), Math.max(i, shard.id()));
|
||||
}
|
||||
}
|
||||
|
||||
// Assert that the active shard routing are identical.
|
||||
Set<Map.Entry<Index, Integer>> entries = indicesAndShards.entrySet();
|
||||
final List<ShardRouting> shards = new ArrayList<>();
|
||||
for (Map.Entry<Index, Integer> e : entries) {
|
||||
Index index = e.getKey();
|
||||
for (int i = 0; i < e.getValue(); i++) {
|
||||
for (RoutingNode routingNode : routingNodes) {
|
||||
for (ShardRouting shardRouting : routingNode) {
|
||||
if (shardRouting.index().equals(index) && shardRouting.id() == i) {
|
||||
|
||||
final Map<ShardId, HashSet<ShardRouting>> shardsByShardId = new HashMap<>();
|
||||
for (final RoutingNode routingNode: routingNodes) {
|
||||
for (final ShardRouting shardRouting : routingNode) {
|
||||
final HashSet<ShardRouting> shards =
|
||||
shardsByShardId.computeIfAbsent(new ShardId(shardRouting.index(), shardRouting.id()), k -> new HashSet<>());
|
||||
shards.add(shardRouting);
|
||||
}
|
||||
}
|
||||
}
|
||||
List<ShardRouting> mutableShardRoutings = routingNodes.assignedShards(new ShardId(index, i));
|
||||
assert mutableShardRoutings.size() == shards.size();
|
||||
for (ShardRouting r : mutableShardRoutings) {
|
||||
assert shards.contains(r);
|
||||
shards.remove(r);
|
||||
}
|
||||
assert shards.isEmpty();
|
||||
|
||||
for (final Map.Entry<Index, Integer> e : entries) {
|
||||
final Index index = e.getKey();
|
||||
for (int i = 0; i < e.getValue(); i++) {
|
||||
final ShardId shardId = new ShardId(index, i);
|
||||
final HashSet<ShardRouting> shards = shardsByShardId.get(shardId);
|
||||
final List<ShardRouting> mutableShardRoutings = routingNodes.assignedShards(shardId);
|
||||
assert (shards == null && mutableShardRoutings.size() == 0)
|
||||
|| (shards != null && shards.size() == mutableShardRoutings.size() && shards.containsAll(mutableShardRoutings));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -48,6 +48,8 @@ import java.util.List;
|
|||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
import static java.util.Collections.singletonList;
|
||||
import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING;
|
||||
|
||||
|
||||
|
@ -135,13 +137,14 @@ public class AllocationService extends AbstractComponent {
|
|||
return newState;
|
||||
}
|
||||
|
||||
// Used for testing
|
||||
public ClusterState applyFailedShard(ClusterState clusterState, ShardRouting failedShard) {
|
||||
return applyFailedShards(clusterState, Collections.singletonList(new FailedShard(failedShard, null, null)),
|
||||
Collections.emptyList());
|
||||
return applyFailedShards(clusterState, singletonList(new FailedShard(failedShard, null, null)), emptyList());
|
||||
}
|
||||
|
||||
// Used for testing
|
||||
public ClusterState applyFailedShards(ClusterState clusterState, List<FailedShard> failedShards) {
|
||||
return applyFailedShards(clusterState, failedShards, Collections.emptyList());
|
||||
return applyFailedShards(clusterState, failedShards, emptyList());
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -0,0 +1,86 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation.decider;
|
||||
|
||||
import org.elasticsearch.cluster.RestoreInProgress;
|
||||
import org.elasticsearch.cluster.routing.RecoverySource;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.snapshots.Snapshot;
|
||||
|
||||
/**
|
||||
* This {@link AllocationDecider} prevents shards that have failed to be
|
||||
* restored from a snapshot to be allocated.
|
||||
*/
|
||||
public class RestoreInProgressAllocationDecider extends AllocationDecider {
|
||||
|
||||
public static final String NAME = "restore_in_progress";
|
||||
|
||||
/**
|
||||
* Creates a new {@link RestoreInProgressAllocationDecider} instance from
|
||||
* given settings
|
||||
*
|
||||
* @param settings {@link Settings} to use
|
||||
*/
|
||||
public RestoreInProgressAllocationDecider(Settings settings) {
|
||||
super(settings);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Decision canAllocate(final ShardRouting shardRouting, final RoutingNode node, final RoutingAllocation allocation) {
|
||||
return canAllocate(shardRouting, allocation);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Decision canAllocate(final ShardRouting shardRouting, final RoutingAllocation allocation) {
|
||||
final RecoverySource recoverySource = shardRouting.recoverySource();
|
||||
if (recoverySource == null || recoverySource.getType() != RecoverySource.Type.SNAPSHOT) {
|
||||
return allocation.decision(Decision.YES, NAME, "ignored as shard is not being recovered from a snapshot");
|
||||
}
|
||||
|
||||
final Snapshot snapshot = ((RecoverySource.SnapshotRecoverySource) recoverySource).snapshot();
|
||||
final RestoreInProgress restoresInProgress = allocation.custom(RestoreInProgress.TYPE);
|
||||
|
||||
if (restoresInProgress != null) {
|
||||
for (RestoreInProgress.Entry restoreInProgress : restoresInProgress.entries()) {
|
||||
if (restoreInProgress.snapshot().equals(snapshot)) {
|
||||
RestoreInProgress.ShardRestoreStatus shardRestoreStatus = restoreInProgress.shards().get(shardRouting.shardId());
|
||||
if (shardRestoreStatus != null && shardRestoreStatus.state().completed() == false) {
|
||||
assert shardRestoreStatus.state() != RestoreInProgress.State.SUCCESS : "expected shard [" + shardRouting
|
||||
+ "] to be in initializing state but got [" + shardRestoreStatus.state() + "]";
|
||||
return allocation.decision(Decision.YES, NAME, "shard is currently being restored");
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return allocation.decision(Decision.NO, NAME, "shard has failed to be restored from the snapshot [%s] because of [%s] - " +
|
||||
"manually close or delete the index [%s] in order to retry to restore the snapshot again or use the reroute API to force the " +
|
||||
"allocation of an empty primary shard", snapshot, shardRouting.unassignedInfo().getDetails(), shardRouting.getIndexName());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Decision canForceAllocatePrimary(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
|
||||
assert shardRouting.primary() : "must not call canForceAllocatePrimary on a non-primary shard " + shardRouting;
|
||||
return canAllocate(shardRouting, node, allocation);
|
||||
}
|
||||
}
|
|
@ -133,8 +133,11 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements
|
|||
Objects.requireNonNull(nodeConnectionsService, "please set the node connection service before starting");
|
||||
Objects.requireNonNull(state.get(), "please set initial state before starting");
|
||||
addListener(localNodeMasterListeners);
|
||||
threadPoolExecutor = EsExecutors.newSinglePrioritizing(CLUSTER_UPDATE_THREAD_NAME,
|
||||
daemonThreadFactory(settings, CLUSTER_UPDATE_THREAD_NAME), threadPool.getThreadContext(), threadPool.scheduler());
|
||||
threadPoolExecutor = EsExecutors.newSinglePrioritizing(
|
||||
nodeName() + "/" + CLUSTER_UPDATE_THREAD_NAME,
|
||||
daemonThreadFactory(settings, CLUSTER_UPDATE_THREAD_NAME),
|
||||
threadPool.getThreadContext(),
|
||||
threadPool.scheduler());
|
||||
}
|
||||
|
||||
class UpdateTask extends SourcePrioritizedRunnable implements Function<ClusterState, ClusterState> {
|
||||
|
|
|
@ -104,8 +104,11 @@ public class MasterService extends AbstractLifecycleComponent {
|
|||
protected synchronized void doStart() {
|
||||
Objects.requireNonNull(clusterStatePublisher, "please set a cluster state publisher before starting");
|
||||
Objects.requireNonNull(clusterStateSupplier, "please set a cluster state supplier before starting");
|
||||
threadPoolExecutor = EsExecutors.newSinglePrioritizing(MASTER_UPDATE_THREAD_NAME,
|
||||
daemonThreadFactory(settings, MASTER_UPDATE_THREAD_NAME), threadPool.getThreadContext(), threadPool.scheduler());
|
||||
threadPoolExecutor = EsExecutors.newSinglePrioritizing(
|
||||
nodeName() + "/" + MASTER_UPDATE_THREAD_NAME,
|
||||
daemonThreadFactory(settings, MASTER_UPDATE_THREAD_NAME),
|
||||
threadPool.getThreadContext(),
|
||||
threadPool.scheduler());
|
||||
taskBatcher = new Batcher(logger, threadPoolExecutor);
|
||||
}
|
||||
|
||||
|
|
|
@ -41,6 +41,7 @@ import java.util.List;
|
|||
import java.util.Set;
|
||||
import java.util.StringTokenizer;
|
||||
import java.util.TreeSet;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static java.util.Collections.unmodifiableSet;
|
||||
import static org.elasticsearch.common.util.set.Sets.newHashSet;
|
||||
|
@ -410,62 +411,27 @@ public class Strings {
|
|||
return collection.toArray(new String[collection.size()]);
|
||||
}
|
||||
|
||||
public static Set<String> splitStringByCommaToSet(final String s) {
|
||||
return splitStringToSet(s, ',');
|
||||
}
|
||||
|
||||
public static String[] splitStringByCommaToArray(final String s) {
|
||||
if (s == null || s.isEmpty()) return Strings.EMPTY_ARRAY;
|
||||
else return s.split(",");
|
||||
/**
|
||||
* Tokenize the specified string by commas to a set, trimming whitespace and ignoring empty tokens.
|
||||
*
|
||||
* @param s the string to tokenize
|
||||
* @return the set of tokens
|
||||
*/
|
||||
public static Set<String> tokenizeByCommaToSet(final String s) {
|
||||
if (s == null) return Collections.emptySet();
|
||||
return tokenizeToCollection(s, ",", HashSet::new);
|
||||
}
|
||||
|
||||
/**
|
||||
* A convenience method for splitting a delimited string into
|
||||
* a set and trimming leading and trailing whitespace from all
|
||||
* split strings.
|
||||
* Split the specified string by commas to an array.
|
||||
*
|
||||
* @param s the string to split
|
||||
* @param c the delimiter to split on
|
||||
* @return the set of split strings
|
||||
* @return the array of split values
|
||||
* @see String#split(String)
|
||||
*/
|
||||
public static Set<String> splitStringToSet(final String s, final char c) {
|
||||
if (s == null || s.isEmpty()) {
|
||||
return Collections.emptySet();
|
||||
}
|
||||
final char[] chars = s.toCharArray();
|
||||
int count = 1;
|
||||
for (final char x : chars) {
|
||||
if (x == c) {
|
||||
count++;
|
||||
}
|
||||
}
|
||||
final Set<String> result = new HashSet<>(count);
|
||||
final int len = chars.length;
|
||||
int start = 0; // starting index in chars of the current substring.
|
||||
int pos = 0; // current index in chars.
|
||||
int end = 0; // the position of the end of the current token
|
||||
for (; pos < len; pos++) {
|
||||
if (chars[pos] == c) {
|
||||
int size = end - start;
|
||||
if (size > 0) { // only add non empty strings
|
||||
result.add(new String(chars, start, size));
|
||||
}
|
||||
start = pos + 1;
|
||||
end = start;
|
||||
} else if (Character.isWhitespace(chars[pos])) {
|
||||
if (start == pos) {
|
||||
// skip over preceding whitespace
|
||||
start++;
|
||||
}
|
||||
} else {
|
||||
end = pos + 1;
|
||||
}
|
||||
}
|
||||
int size = end - start;
|
||||
if (size > 0) {
|
||||
result.add(new String(chars, start, size));
|
||||
}
|
||||
return result;
|
||||
public static String[] splitStringByCommaToArray(final String s) {
|
||||
if (s == null || s.isEmpty()) return Strings.EMPTY_ARRAY;
|
||||
else return s.split(",");
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -499,7 +465,7 @@ public class Strings {
|
|||
* tokens. A delimiter is always a single character; for multi-character
|
||||
* delimiters, consider using <code>delimitedListToStringArray</code>
|
||||
*
|
||||
* @param str the String to tokenize
|
||||
* @param s the String to tokenize
|
||||
* @param delimiters the delimiter characters, assembled as String
|
||||
* (each of those characters is individually considered as delimiter).
|
||||
* @return an array of the tokens
|
||||
|
@ -507,48 +473,35 @@ public class Strings {
|
|||
* @see java.lang.String#trim()
|
||||
* @see #delimitedListToStringArray
|
||||
*/
|
||||
public static String[] tokenizeToStringArray(String str, String delimiters) {
|
||||
return tokenizeToStringArray(str, delimiters, true, true);
|
||||
public static String[] tokenizeToStringArray(final String s, final String delimiters) {
|
||||
return toStringArray(tokenizeToCollection(s, delimiters, ArrayList::new));
|
||||
}
|
||||
|
||||
/**
|
||||
* Tokenize the given String into a String array via a StringTokenizer.
|
||||
* <p>The given delimiters string is supposed to consist of any number of
|
||||
* delimiter characters. Each of those characters can be used to separate
|
||||
* tokens. A delimiter is always a single character; for multi-character
|
||||
* delimiters, consider using <code>delimitedListToStringArray</code>
|
||||
* Tokenizes the specified string to a collection using the specified delimiters as the token delimiters. This method trims whitespace
|
||||
* from tokens and ignores empty tokens.
|
||||
*
|
||||
* @param str the String to tokenize
|
||||
* @param delimiters the delimiter characters, assembled as String
|
||||
* (each of those characters is individually considered as delimiter)
|
||||
* @param trimTokens trim the tokens via String's <code>trim</code>
|
||||
* @param ignoreEmptyTokens omit empty tokens from the result array
|
||||
* (only applies to tokens that are empty after trimming; StringTokenizer
|
||||
* will not consider subsequent delimiters as token in the first place).
|
||||
* @return an array of the tokens (<code>null</code> if the input String
|
||||
* was <code>null</code>)
|
||||
* @param s the string to tokenize.
|
||||
* @param delimiters the token delimiters
|
||||
* @param supplier a collection supplier
|
||||
* @param <T> the type of the collection
|
||||
* @return the tokens
|
||||
* @see java.util.StringTokenizer
|
||||
* @see java.lang.String#trim()
|
||||
* @see #delimitedListToStringArray
|
||||
*/
|
||||
public static String[] tokenizeToStringArray(
|
||||
String str, String delimiters, boolean trimTokens, boolean ignoreEmptyTokens) {
|
||||
|
||||
if (str == null) {
|
||||
private static <T extends Collection<String>> T tokenizeToCollection(
|
||||
final String s, final String delimiters, final Supplier<T> supplier) {
|
||||
if (s == null) {
|
||||
return null;
|
||||
}
|
||||
StringTokenizer st = new StringTokenizer(str, delimiters);
|
||||
List<String> tokens = new ArrayList<>();
|
||||
while (st.hasMoreTokens()) {
|
||||
String token = st.nextToken();
|
||||
if (trimTokens) {
|
||||
token = token.trim();
|
||||
}
|
||||
if (!ignoreEmptyTokens || token.length() > 0) {
|
||||
final StringTokenizer tokenizer = new StringTokenizer(s, delimiters);
|
||||
final T tokens = supplier.get();
|
||||
while (tokenizer.hasMoreTokens()) {
|
||||
final String token = tokenizer.nextToken().trim();
|
||||
if (token.length() > 0) {
|
||||
tokens.add(token);
|
||||
}
|
||||
}
|
||||
return toStringArray(tokens);
|
||||
return tokens;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue