Merge branch 'master' into index-lifecycle
This commit is contained in:
commit
d7d709a5dc
|
@ -549,6 +549,25 @@ class BuildPlugin implements Plugin<Project> {
|
|||
javadoc.classpath = javadoc.getClasspath().filter { f ->
|
||||
return classes.contains(f) == false
|
||||
}
|
||||
/*
|
||||
* Force html5 on projects that support it to silence the warning
|
||||
* that `javadoc` will change its defaults in the future.
|
||||
*
|
||||
* But not all of our javadoc is actually valid html5. So we
|
||||
* have to become valid incrementally. We only set html5 on the
|
||||
* projects we have converted so that we still get the annoying
|
||||
* warning on the unconverted ones. That will give us an
|
||||
* incentive to convert them....
|
||||
*/
|
||||
List html4Projects = [
|
||||
':server',
|
||||
':libs:elasticsearch-core',
|
||||
':test:framework',
|
||||
':x-pack:plugin:core',
|
||||
]
|
||||
if (false == html4Projects.contains(project.path)) {
|
||||
javadoc.options.addBooleanOption('html5', true)
|
||||
}
|
||||
}
|
||||
configureJavadocJar(project)
|
||||
}
|
||||
|
|
|
@ -129,7 +129,8 @@ public class NoopBulkRequestBuilder extends ActionRequestBuilder<BulkRequest, Bu
|
|||
}
|
||||
|
||||
/**
|
||||
* A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
|
||||
* A timeout to wait if the index operation can't be performed immediately.
|
||||
* Defaults to {@code 1m}.
|
||||
*/
|
||||
public final NoopBulkRequestBuilder setTimeout(TimeValue timeout) {
|
||||
request.timeout(timeout);
|
||||
|
@ -137,7 +138,8 @@ public class NoopBulkRequestBuilder extends ActionRequestBuilder<BulkRequest, Bu
|
|||
}
|
||||
|
||||
/**
|
||||
* A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
|
||||
* A timeout to wait if the index operation can't be performed immediately.
|
||||
* Defaults to {@code 1m}.
|
||||
*/
|
||||
public final NoopBulkRequestBuilder setTimeout(String timeout) {
|
||||
request.timeout(timeout);
|
||||
|
@ -151,4 +153,3 @@ public class NoopBulkRequestBuilder extends ActionRequestBuilder<BulkRequest, Bu
|
|||
return request.numberOfActions();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -142,7 +142,7 @@ public class NoopSearchRequestBuilder extends ActionRequestBuilder<SearchRequest
|
|||
|
||||
/**
|
||||
* Sets the preference to execute the search. Defaults to randomize across shards. Can be set to
|
||||
* <tt>_local</tt> to prefer local shards or a custom value, which guarantees that the same order
|
||||
* {@code _local} to prefer local shards or a custom value, which guarantees that the same order
|
||||
* will be used across different requests.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setPreference(String preference) {
|
||||
|
@ -188,7 +188,7 @@ public class NoopSearchRequestBuilder extends ActionRequestBuilder<SearchRequest
|
|||
}
|
||||
|
||||
/**
|
||||
* From index to start the search from. Defaults to <tt>0</tt>.
|
||||
* From index to start the search from. Defaults to {@code 0}.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setFrom(int from) {
|
||||
sourceBuilder().from(from);
|
||||
|
@ -196,7 +196,7 @@ public class NoopSearchRequestBuilder extends ActionRequestBuilder<SearchRequest
|
|||
}
|
||||
|
||||
/**
|
||||
* The number of search hits to return. Defaults to <tt>10</tt>.
|
||||
* The number of search hits to return. Defaults to {@code 10}.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setSize(int size) {
|
||||
sourceBuilder().size(size);
|
||||
|
@ -349,7 +349,7 @@ public class NoopSearchRequestBuilder extends ActionRequestBuilder<SearchRequest
|
|||
|
||||
/**
|
||||
* Applies when sorting, and controls if scores will be tracked as well. Defaults to
|
||||
* <tt>false</tt>.
|
||||
* {@code false}.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setTrackScores(boolean trackScores) {
|
||||
sourceBuilder().trackScores(trackScores);
|
||||
|
|
|
@ -14,6 +14,8 @@
|
|||
|
||||
<<remove-suggest-metric, Removed `suggest` metric on stats APIs>> ({pull}29635[#29635])
|
||||
|
||||
<<remove-field-caps-body, In field capabilities APIs, removed support for providing fields in the request body>> ({pull}30185[#30185])
|
||||
|
||||
=== Breaking Java Changes
|
||||
|
||||
=== Deprecations
|
||||
|
@ -31,7 +33,7 @@ written to by an older Elasticsearch after writing to it with a newer Elasticsea
|
|||
|
||||
=== Known Issues
|
||||
|
||||
== Elasticsearch version 6.3.0
|
||||
== Elasticsearch version 6.4.0
|
||||
|
||||
=== New Features
|
||||
|
||||
|
|
|
@ -19,9 +19,6 @@ the configured remote cluster alias.
|
|||
`seeds`::
|
||||
The configured initial seed transport addresses of the remote cluster.
|
||||
|
||||
`http_addresses`::
|
||||
The published http addresses of all connected remote nodes.
|
||||
|
||||
`connected`::
|
||||
True if there is at least one connection to the remote cluster.
|
||||
|
||||
|
|
|
@ -23,7 +23,8 @@ The merge scheduler supports the following _dynamic_ setting:
|
|||
|
||||
`index.merge.scheduler.max_thread_count`::
|
||||
|
||||
The maximum number of threads that may be merging at once. Defaults to
|
||||
The maximum number of threads on a single shard that may be merging at once.
|
||||
Defaults to
|
||||
`Math.max(1, Math.min(4, Runtime.getRuntime().availableProcessors() / 2))`
|
||||
which works well for a good solid-state-disk (SSD). If your index is on
|
||||
spinning platter drives instead, decrease this to 1.
|
||||
|
|
|
@ -1,12 +0,0 @@
|
|||
[[breaking-changes-6.4]]
|
||||
== Breaking changes in 6.4
|
||||
|
||||
[[breaking_64_api_changes]]
|
||||
=== API changes
|
||||
|
||||
==== Field capabilities request format
|
||||
|
||||
In the past, `fields` could be provided either as a parameter, or as part of the request
|
||||
body. Specifying `fields` in the request body is now deprecated, and instead they should
|
||||
always be supplied through a request parameter. In 7.0.0, the field capabilities API will
|
||||
not accept `fields` supplied in the request body.
|
|
@ -22,6 +22,32 @@ The following parameters starting with underscore have been removed:
|
|||
Instead of these removed parameters, use their non camel case equivalents without
|
||||
starting underscore, e.g. use `version_type` instead of `_version_type` or `versionType`.
|
||||
|
||||
==== Thread pool info
|
||||
|
||||
In previous versions of Elasticsearch, the thread pool info returned in the
|
||||
<<cluster-nodes-info,nodes info API>> returned `min` and `max` values reflecting
|
||||
the configured minimum and maximum number of threads that could be in each
|
||||
thread pool. The trouble with this representation is that it does not align with
|
||||
the configuration parameters used to configure thread pools. For
|
||||
<<modules-threadpool,scaling thread pools>>, the minimum number of threads is
|
||||
configured by a parameter called `core` and the maximum number of threads is
|
||||
configured by a parameter called `max`. For <<modules-threadpool,fixed thread
|
||||
pools>>, there is only one configuration parameter along these lines and that
|
||||
parameter is called `size`, reflecting the fixed number of threads in the
|
||||
pool. This discrepancy between the API and the configuration parameters has been
|
||||
rectified. Now, the API will report `core` and `max` for scaling thread pools,
|
||||
and `size` for fixed thread pools.
|
||||
|
||||
Similarly, in the cat thread pool API the existing `size` output has been
|
||||
renamed to `pool_size` which reflects the number of threads currently in the
|
||||
pool; the shortcut for this value has been changed from `s` to `psz`. The `min`
|
||||
output has been renamed to `core` with a shortcut of `cr`, the shortcut for
|
||||
`max` has been changed to `mx`, and the `size` output with a shortcut of `sz`
|
||||
has been reused to report the configured number of threads in the pool. This
|
||||
aligns the output of the API with the configuration values for thread
|
||||
pools. Note that `core` and `max` will be populated for scaling thread pools,
|
||||
and `size` will be populated for fixed thread pools.
|
||||
|
||||
==== The parameter `fields` deprecated in 6.x has been removed from Bulk request
|
||||
and Update request. The Update API returns `400 - Bad request` if request contains
|
||||
unknown parameters (instead of ignored in the previous version).
|
||||
|
@ -33,3 +59,10 @@ Previously, `suggest` stats were folded into `search` stats. Support for the
|
|||
`suggest` metric on the indices stats and nodes stats APIs remained for
|
||||
backwards compatibility. Backwards support for the `suggest` metric was
|
||||
deprecated in 6.3.0 and now removed in 7.0.0.
|
||||
|
||||
[[remove-field-caps-body]]
|
||||
==== In the fields capabilities API, `fields` can no longer be provided in the request body.
|
||||
|
||||
In the past, `fields` could be provided either as a parameter, or as part of the request
|
||||
body. Specifying `fields` in the request body as opposed to a parameter was deprecated
|
||||
in 6.4.0, and is now unsupported in 7.0.0.
|
||||
|
|
|
@ -10,5 +10,9 @@ The changes listed below have been released for the first time in Elasticsearch
|
|||
Core::
|
||||
* Tribe node has been removed in favor of Cross-Cluster-Search
|
||||
|
||||
Cross-Cluster-Search::
|
||||
* `http_addresses` has been removed from the <<cluster-remote-info>> API
|
||||
because it is expensive to fetch and no longer needed by Kibana.
|
||||
|
||||
Rest API::
|
||||
* The Clear Cache API only supports `POST` as HTTP method
|
||||
|
|
|
@ -20,20 +20,6 @@ GET twitter/_field_caps?fields=rating
|
|||
// CONSOLE
|
||||
// TEST[setup:twitter]
|
||||
|
||||
Alternatively the `fields` option can also be defined in the request body. deprecated[6.4.0, Please use a request parameter instead.]
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST _field_caps
|
||||
{
|
||||
"fields" : ["rating"]
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[warning:Specifying a request body is deprecated -- the [fields] request parameter should be used instead.]
|
||||
|
||||
This is equivalent to the previous request.
|
||||
|
||||
Supported request options:
|
||||
|
||||
[horizontal]
|
||||
|
|
|
@ -3,6 +3,8 @@ setup:
|
|||
indices.create:
|
||||
index: test
|
||||
body:
|
||||
settings:
|
||||
number_of_shards: 1
|
||||
mappings:
|
||||
test:
|
||||
properties:
|
||||
|
|
|
@ -55,7 +55,7 @@ import java.util.Map;
|
|||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* A query builder for <tt>has_child</tt> query.
|
||||
* A query builder for {@code has_child} query.
|
||||
*/
|
||||
public class HasChildQueryBuilder extends AbstractQueryBuilder<HasChildQueryBuilder> {
|
||||
public static final String NAME = "has_child";
|
||||
|
|
|
@ -41,7 +41,7 @@ import static org.elasticsearch.index.rankeval.EvaluationMetric.joinHitsWithRati
|
|||
|
||||
/**
|
||||
* Metric implementing Discounted Cumulative Gain.
|
||||
* The `normalize` parameter can be set to calculate the normalized NDCG (set to <tt>false</tt> by default).<br>
|
||||
* The `normalize` parameter can be set to calculate the normalized NDCG (set to {@code false} by default).<br>
|
||||
* The optional `unknown_doc_rating` parameter can be used to specify a default rating for unlabeled documents.
|
||||
* @see <a href="https://en.wikipedia.org/wiki/Discounted_cumulative_gain#Discounted_Cumulative_Gain">Discounted Cumulative Gain</a><br>
|
||||
*/
|
||||
|
|
|
@ -192,7 +192,7 @@ public class RatedRequest implements Writeable, ToXContentObject {
|
|||
return Collections.unmodifiableMap(this.params);
|
||||
}
|
||||
|
||||
/** return the parameters if this request uses a template, otherwise this will be <tt>null</tt>. */
|
||||
/** return the parameters if this request uses a template, otherwise this will be {@code null}. */
|
||||
public String getTemplateId() {
|
||||
return this.templateId;
|
||||
}
|
||||
|
|
|
@ -17,6 +17,10 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
import org.apache.tools.ant.taskdefs.condition.Os
|
||||
|
||||
import static org.elasticsearch.gradle.BuildPlugin.getJavaHome
|
||||
|
||||
apply plugin: 'elasticsearch.test-with-dependencies'
|
||||
|
||||
esplugin {
|
||||
|
@ -60,3 +64,61 @@ thirdPartyAudit.excludes = [
|
|||
'org.apache.log.Hierarchy',
|
||||
'org.apache.log.Logger',
|
||||
]
|
||||
|
||||
// Support for testing reindex-from-remote against old Elaticsearch versions
|
||||
configurations {
|
||||
oldesFixture
|
||||
es2
|
||||
es1
|
||||
es090
|
||||
}
|
||||
|
||||
dependencies {
|
||||
oldesFixture project(':test:fixtures:old-elasticsearch')
|
||||
/* Right now we just test against the latest version of each major we expect
|
||||
* reindex-from-remote to work against. We could randomize the versions but
|
||||
* that doesn't seem worth it at this point. */
|
||||
es2 'org.elasticsearch.distribution.zip:elasticsearch:2.4.5@zip'
|
||||
es1 'org.elasticsearch:elasticsearch:1.7.6@zip'
|
||||
es090 'org.elasticsearch:elasticsearch:0.90.13@zip'
|
||||
}
|
||||
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
// we can't get the pid files in windows so we skip reindex-from-old
|
||||
integTestRunner.systemProperty "tests.fromOld", "false"
|
||||
} else {
|
||||
integTestRunner.systemProperty "tests.fromOld", "true"
|
||||
/* Set up tasks to unzip and run the old versions of ES before running the
|
||||
* integration tests. */
|
||||
for (String version : ['2', '1', '090']) {
|
||||
Task unzip = task("unzipEs${version}", type: Sync) {
|
||||
Configuration oldEsDependency = configurations['es' + version]
|
||||
dependsOn oldEsDependency
|
||||
/* Use a closure here to delay resolution of the dependency until we need
|
||||
* it */
|
||||
from {
|
||||
oldEsDependency.collect { zipTree(it) }
|
||||
}
|
||||
into temporaryDir
|
||||
}
|
||||
Task fixture = task("oldEs${version}Fixture",
|
||||
type: org.elasticsearch.gradle.test.AntFixture) {
|
||||
dependsOn project.configurations.oldesFixture
|
||||
dependsOn unzip
|
||||
executable = new File(project.runtimeJavaHome, 'bin/java')
|
||||
env 'CLASSPATH', "${ -> project.configurations.oldesFixture.asPath }"
|
||||
env 'JAVA_HOME', getJavaHome(it, 7)
|
||||
args 'oldes.OldElasticsearch',
|
||||
baseDir,
|
||||
unzip.temporaryDir,
|
||||
version == '090'
|
||||
}
|
||||
integTest.dependsOn fixture
|
||||
integTestRunner {
|
||||
/* Use a closure on the string to delay evaluation until right before we
|
||||
* run the integration tests so that we can be sure that the file is
|
||||
* ready. */
|
||||
systemProperty "es${version}.port", "${ -> fixture.addressAndPort }"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.smoketest;
|
||||
package org.elasticsearch.index.reindex.remote;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpHost;
|
||||
|
@ -27,6 +27,7 @@ import org.apache.http.util.EntityUtils;
|
|||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.ResponseException;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -38,6 +39,9 @@ import static org.hamcrest.Matchers.containsString;
|
|||
|
||||
public class ReindexFromOldRemoteIT extends ESRestTestCase {
|
||||
private void oldEsTestCase(String portPropertyName, String requestsPerSecond) throws IOException {
|
||||
boolean enabled = Booleans.parseBoolean(System.getProperty("tests.fromOld"));
|
||||
assumeTrue("test is disabled, probably because this is windows", enabled);
|
||||
|
||||
int oldEsPort = Integer.parseInt(System.getProperty(portPropertyName));
|
||||
try (RestClient oldEs = RestClient.builder(new HttpHost("127.0.0.1", oldEsPort)).build()) {
|
||||
try {
|
|
@ -36,12 +36,12 @@ import com.ibm.icu.util.ULocale;
|
|||
|
||||
/**
|
||||
* An ICU based collation token filter. There are two ways to configure collation:
|
||||
* <p>The first is simply specifying the locale (defaults to the default locale). The <tt>language</tt>
|
||||
* parameter is the lowercase two-letter ISO-639 code. An additional <tt>country</tt> and <tt>variant</tt>
|
||||
* <p>The first is simply specifying the locale (defaults to the default locale). The {@code language}
|
||||
* parameter is the lowercase two-letter ISO-639 code. An additional {@code country} and {@code variant}
|
||||
* can be provided.
|
||||
* <p>The second option is to specify collation rules as defined in the <a href="http://www.icu-project.org/userguide/Collate_Customization.html">
|
||||
* Collation customization</a> chapter in icu docs. The <tt>rules</tt> parameter can either embed the rules definition
|
||||
* in the settings or refer to an external location (preferable located under the <tt>config</tt> location, relative to it).
|
||||
* Collation customization</a> chapter in icu docs. The {@code rules} parameter can either embed the rules definition
|
||||
* in the settings or refer to an external location (preferable located under the {@code config} location, relative to it).
|
||||
*/
|
||||
public class IcuCollationTokenFilterFactory extends AbstractTokenFilterFactory {
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ import org.elasticsearch.index.IndexSettings;
|
|||
* Can be filtered to handle certain characters in a specified way (see http://icu-project.org/apiref/icu4j/com/ibm/icu/text/UnicodeSet.html)
|
||||
* E.g national chars that should be retained (filter : "[^åäöÅÄÖ]").
|
||||
*
|
||||
* <p>The <tt>unicodeSetFilter</tt> attribute can be used to provide the UniCodeSet for filtering.
|
||||
* <p>The {@code unicodeSetFilter} attribute can be used to provide the UniCodeSet for filtering.
|
||||
*
|
||||
* @author kimchy (shay.banon)
|
||||
*/
|
||||
|
|
|
@ -32,9 +32,9 @@ import java.io.Reader;
|
|||
|
||||
/**
|
||||
* Uses the {@link org.apache.lucene.analysis.icu.ICUNormalizer2CharFilter} to normalize character.
|
||||
* <p>The <tt>name</tt> can be used to provide the type of normalization to perform.</p>
|
||||
* <p>The <tt>mode</tt> can be used to provide 'compose' or 'decompose'. Default is compose.</p>
|
||||
* <p>The <tt>unicodeSetFilter</tt> attribute can be used to provide the UniCodeSet for filtering.</p>
|
||||
* <p>The {@code name} can be used to provide the type of normalization to perform.</p>
|
||||
* <p>The {@code mode} can be used to provide 'compose' or 'decompose'. Default is compose.</p>
|
||||
* <p>The {@code unicodeSetFilter} attribute can be used to provide the UniCodeSet for filtering.</p>
|
||||
*/
|
||||
public class IcuNormalizerCharFilterFactory extends AbstractCharFilterFactory implements MultiTermAwareComponent {
|
||||
|
||||
|
|
|
@ -31,10 +31,8 @@ import org.elasticsearch.index.IndexSettings;
|
|||
|
||||
/**
|
||||
* Uses the {@link org.apache.lucene.analysis.icu.ICUNormalizer2Filter} to normalize tokens.
|
||||
* <p>The <tt>name</tt> can be used to provide the type of normalization to perform.</p>
|
||||
* <p>The <tt>unicodeSetFilter</tt> attribute can be used to provide the UniCodeSet for filtering.</p>
|
||||
*
|
||||
*
|
||||
* <p>The {@code name} can be used to provide the type of normalization to perform.</p>
|
||||
* <p>The {@code unicodeSetFilter} attribute can be used to provide the UniCodeSet for filtering.</p>
|
||||
*/
|
||||
public class IcuNormalizerTokenFilterFactory extends AbstractTokenFilterFactory implements MultiTermAwareComponent {
|
||||
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
import org.elasticsearch.gradle.test.AntFixture
|
||||
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
|
@ -66,28 +64,14 @@ test {
|
|||
exclude '**/*CredentialsTests.class'
|
||||
}
|
||||
|
||||
forbiddenApisTest {
|
||||
// we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage
|
||||
bundledSignatures -= 'jdk-non-portable'
|
||||
bundledSignatures += 'jdk-internal'
|
||||
}
|
||||
|
||||
/** A task to start the AmazonS3Fixture which emulates a S3 service **/
|
||||
task s3Fixture(type: AntFixture) {
|
||||
dependsOn compileTestJava
|
||||
env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }"
|
||||
executable = new File(project.runtimeJavaHome, 'bin/java')
|
||||
args 'org.elasticsearch.repositories.s3.AmazonS3Fixture', baseDir, 'bucket_test'
|
||||
check {
|
||||
// also execute the QA tests when testing the plugin
|
||||
dependsOn 'qa:amazon-s3:check'
|
||||
}
|
||||
|
||||
integTestCluster {
|
||||
dependsOn s3Fixture
|
||||
|
||||
keystoreSetting 's3.client.integration_test.access_key', "s3_integration_test_access_key"
|
||||
keystoreSetting 's3.client.integration_test.secret_key', "s3_integration_test_secret_key"
|
||||
|
||||
/* Use a closure on the string to delay evaluation until tests are executed */
|
||||
setting 's3.client.integration_test.endpoint', "http://${ -> s3Fixture.addressAndPort }"
|
||||
}
|
||||
|
||||
thirdPartyAudit.excludes = [
|
||||
|
|
|
@ -0,0 +1,83 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import org.elasticsearch.gradle.MavenFilteringHack
|
||||
import org.elasticsearch.gradle.test.AntFixture
|
||||
|
||||
apply plugin: 'elasticsearch.standalone-rest-test'
|
||||
apply plugin: 'elasticsearch.rest-test'
|
||||
|
||||
dependencies {
|
||||
testCompile project(path: ':plugins:repository-s3', configuration: 'runtime')
|
||||
}
|
||||
|
||||
integTestCluster {
|
||||
plugin ':plugins:repository-s3'
|
||||
}
|
||||
|
||||
forbiddenApisTest {
|
||||
// we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage
|
||||
bundledSignatures -= 'jdk-non-portable'
|
||||
bundledSignatures += 'jdk-internal'
|
||||
}
|
||||
|
||||
boolean useFixture = false
|
||||
|
||||
String s3AccessKey = System.getenv("amazon_s3_access_key")
|
||||
String s3SecretKey = System.getenv("amazon_s3_secret_key")
|
||||
String s3Bucket = System.getenv("amazon_s3_bucket")
|
||||
String s3BasePath = System.getenv("amazon_s3_base_path")
|
||||
|
||||
if (!s3AccessKey && !s3SecretKey && !s3Bucket && !s3BasePath) {
|
||||
s3AccessKey = 's3_integration_test_access_key'
|
||||
s3SecretKey = 's3_integration_test_secret_key'
|
||||
s3Bucket = 'bucket_test'
|
||||
s3BasePath = 'integration_test'
|
||||
useFixture = true
|
||||
}
|
||||
|
||||
/** A task to start the AmazonS3Fixture which emulates a S3 service **/
|
||||
task s3Fixture(type: AntFixture) {
|
||||
dependsOn compileTestJava
|
||||
env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }"
|
||||
executable = new File(project.runtimeJavaHome, 'bin/java')
|
||||
args 'org.elasticsearch.repositories.s3.AmazonS3Fixture', baseDir, s3Bucket
|
||||
}
|
||||
|
||||
Map<String, Object> expansions = [
|
||||
'bucket': s3Bucket,
|
||||
'base_path': s3BasePath
|
||||
]
|
||||
processTestResources {
|
||||
inputs.properties(expansions)
|
||||
MavenFilteringHack.filter(it, expansions)
|
||||
}
|
||||
|
||||
integTestCluster {
|
||||
keystoreSetting 's3.client.integration_test.access_key', s3AccessKey
|
||||
keystoreSetting 's3.client.integration_test.secret_key', s3SecretKey
|
||||
|
||||
if (useFixture) {
|
||||
dependsOn s3Fixture
|
||||
/* Use a closure on the string to delay evaluation until tests are executed */
|
||||
setting 's3.client.integration_test.endpoint', "http://${-> s3Fixture.addressAndPort}"
|
||||
} else {
|
||||
println "Using an external service to test the repository-s3 plugin"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,37 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.repositories.s3;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.annotations.Name;
|
||||
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
|
||||
import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
|
||||
import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
|
||||
|
||||
public class AmazonS3RepositoryClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
|
||||
|
||||
public AmazonS3RepositoryClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) {
|
||||
super(testCandidate);
|
||||
}
|
||||
|
||||
@ParametersFactory
|
||||
public static Iterable<Object[]> parameters() throws Exception {
|
||||
return ESClientYamlSuiteTestCase.createParameters();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,183 @@
|
|||
# Integration tests for repository-s3
|
||||
---
|
||||
"Snapshot/Restore with repository-s3":
|
||||
|
||||
# Register repository
|
||||
- do:
|
||||
snapshot.create_repository:
|
||||
repository: repository
|
||||
body:
|
||||
type: s3
|
||||
settings:
|
||||
bucket: ${bucket}
|
||||
client: integration_test
|
||||
base_path: ${base_path}
|
||||
canned_acl: private
|
||||
storage_class: standard
|
||||
|
||||
- match: { acknowledged: true }
|
||||
|
||||
# Get repository
|
||||
- do:
|
||||
snapshot.get_repository:
|
||||
repository: repository
|
||||
|
||||
- match: { repository.settings.bucket : ${bucket} }
|
||||
- match: { repository.settings.client : "integration_test" }
|
||||
- match: { repository.settings.base_path : ${base_path} }
|
||||
- match: { repository.settings.canned_acl : "private" }
|
||||
- match: { repository.settings.storage_class : "standard" }
|
||||
- is_false: repository.settings.access_key
|
||||
- is_false: repository.settings.secret_key
|
||||
|
||||
# Index documents
|
||||
- do:
|
||||
bulk:
|
||||
refresh: true
|
||||
body:
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 1
|
||||
- snapshot: one
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 2
|
||||
- snapshot: one
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 3
|
||||
- snapshot: one
|
||||
|
||||
- do:
|
||||
count:
|
||||
index: docs
|
||||
|
||||
- match: {count: 3}
|
||||
|
||||
# Create a first snapshot
|
||||
- do:
|
||||
snapshot.create:
|
||||
repository: repository
|
||||
snapshot: snapshot-one
|
||||
wait_for_completion: true
|
||||
|
||||
- match: { snapshot.snapshot: snapshot-one }
|
||||
- match: { snapshot.state : SUCCESS }
|
||||
- match: { snapshot.include_global_state: true }
|
||||
- match: { snapshot.shards.failed : 0 }
|
||||
|
||||
- do:
|
||||
snapshot.status:
|
||||
repository: repository
|
||||
snapshot: snapshot-one
|
||||
|
||||
- is_true: snapshots
|
||||
- match: { snapshots.0.snapshot: snapshot-one }
|
||||
- match: { snapshots.0.state : SUCCESS }
|
||||
|
||||
# Index more documents
|
||||
- do:
|
||||
bulk:
|
||||
refresh: true
|
||||
body:
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 4
|
||||
- snapshot: two
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 5
|
||||
- snapshot: two
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 6
|
||||
- snapshot: two
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 7
|
||||
- snapshot: two
|
||||
|
||||
- do:
|
||||
count:
|
||||
index: docs
|
||||
|
||||
- match: {count: 7}
|
||||
|
||||
# Create a second snapshot
|
||||
- do:
|
||||
snapshot.create:
|
||||
repository: repository
|
||||
snapshot: snapshot-two
|
||||
wait_for_completion: true
|
||||
|
||||
- match: { snapshot.snapshot: snapshot-two }
|
||||
- match: { snapshot.state : SUCCESS }
|
||||
- match: { snapshot.shards.failed : 0 }
|
||||
|
||||
- do:
|
||||
snapshot.get:
|
||||
repository: repository
|
||||
snapshot: snapshot-one,snapshot-two
|
||||
|
||||
- is_true: snapshots
|
||||
- match: { snapshots.0.state : SUCCESS }
|
||||
- match: { snapshots.1.state : SUCCESS }
|
||||
|
||||
# Delete the index
|
||||
- do:
|
||||
indices.delete:
|
||||
index: docs
|
||||
|
||||
# Restore the second snapshot
|
||||
- do:
|
||||
snapshot.restore:
|
||||
repository: repository
|
||||
snapshot: snapshot-two
|
||||
wait_for_completion: true
|
||||
|
||||
- do:
|
||||
count:
|
||||
index: docs
|
||||
|
||||
- match: {count: 7}
|
||||
|
||||
# Delete the index again
|
||||
- do:
|
||||
indices.delete:
|
||||
index: docs
|
||||
|
||||
# Restore the first snapshot
|
||||
- do:
|
||||
snapshot.restore:
|
||||
repository: repository
|
||||
snapshot: snapshot-one
|
||||
wait_for_completion: true
|
||||
|
||||
- do:
|
||||
count:
|
||||
index: docs
|
||||
|
||||
- match: {count: 3}
|
||||
|
||||
# Remove the snapshots
|
||||
- do:
|
||||
snapshot.delete:
|
||||
repository: repository
|
||||
snapshot: snapshot-two
|
||||
|
||||
- do:
|
||||
snapshot.delete:
|
||||
repository: repository
|
||||
snapshot: snapshot-one
|
||||
|
||||
# Remove our repository
|
||||
- do:
|
||||
snapshot.delete_repository:
|
||||
repository: repository
|
|
@ -156,7 +156,7 @@ class S3Repository extends BlobStoreRepository {
|
|||
|
||||
String bucket = BUCKET_SETTING.get(metadata.settings());
|
||||
if (bucket == null) {
|
||||
throw new RepositoryException(metadata.name(), "No bucket defined for s3 gateway");
|
||||
throw new RepositoryException(metadata.name(), "No bucket defined for s3 repository");
|
||||
}
|
||||
|
||||
boolean serverSideEncryption = SERVER_SIDE_ENCRYPTION_SETTING.get(metadata.settings());
|
||||
|
|
|
@ -11,183 +11,3 @@
|
|||
nodes.info: {}
|
||||
|
||||
- match: { nodes.$master.plugins.0.name: repository-s3 }
|
||||
---
|
||||
"Snapshot/Restore with repository-s3":
|
||||
|
||||
# Register repository
|
||||
- do:
|
||||
snapshot.create_repository:
|
||||
repository: repository
|
||||
body:
|
||||
type: s3
|
||||
settings:
|
||||
bucket: "bucket_test"
|
||||
client: "integration_test"
|
||||
canned_acl: "public-read"
|
||||
storage_class: "standard"
|
||||
|
||||
- match: { acknowledged: true }
|
||||
|
||||
# Get repository
|
||||
- do:
|
||||
snapshot.get_repository:
|
||||
repository: repository
|
||||
|
||||
- match: {repository.settings.bucket : "bucket_test"}
|
||||
- match: {repository.settings.client : "integration_test"}
|
||||
- match: {repository.settings.canned_acl : "public-read"}
|
||||
- match: {repository.settings.storage_class : "standard"}
|
||||
- is_false: repository.settings.access_key
|
||||
- is_false: repository.settings.secret_key
|
||||
|
||||
# Index documents
|
||||
- do:
|
||||
bulk:
|
||||
refresh: true
|
||||
body:
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 1
|
||||
- snapshot: one
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 2
|
||||
- snapshot: one
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 3
|
||||
- snapshot: one
|
||||
|
||||
- do:
|
||||
count:
|
||||
index: docs
|
||||
|
||||
- match: {count: 3}
|
||||
|
||||
# Create a first snapshot
|
||||
- do:
|
||||
snapshot.create:
|
||||
repository: repository
|
||||
snapshot: snapshot-one
|
||||
wait_for_completion: true
|
||||
|
||||
- match: { snapshot.snapshot: snapshot-one }
|
||||
- match: { snapshot.state : SUCCESS }
|
||||
- match: { snapshot.include_global_state: true }
|
||||
- match: { snapshot.shards.failed : 0 }
|
||||
|
||||
- do:
|
||||
snapshot.status:
|
||||
repository: repository
|
||||
snapshot: snapshot-one
|
||||
|
||||
- is_true: snapshots
|
||||
- match: { snapshots.0.snapshot: snapshot-one }
|
||||
- match: { snapshots.0.state : SUCCESS }
|
||||
|
||||
# Index more documents
|
||||
- do:
|
||||
bulk:
|
||||
refresh: true
|
||||
body:
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 4
|
||||
- snapshot: two
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 5
|
||||
- snapshot: two
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 6
|
||||
- snapshot: two
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 7
|
||||
- snapshot: two
|
||||
|
||||
- do:
|
||||
count:
|
||||
index: docs
|
||||
|
||||
- match: {count: 7}
|
||||
|
||||
# Create a second snapshot
|
||||
- do:
|
||||
snapshot.create:
|
||||
repository: repository
|
||||
snapshot: snapshot-two
|
||||
wait_for_completion: true
|
||||
|
||||
- match: { snapshot.snapshot: snapshot-two }
|
||||
- match: { snapshot.state : SUCCESS }
|
||||
- match: { snapshot.shards.failed : 0 }
|
||||
|
||||
- do:
|
||||
snapshot.get:
|
||||
repository: repository
|
||||
snapshot: snapshot-one,snapshot-two
|
||||
|
||||
- is_true: snapshots
|
||||
- match: { snapshots.0.state : SUCCESS }
|
||||
- match: { snapshots.1.state : SUCCESS }
|
||||
|
||||
# Delete the index
|
||||
- do:
|
||||
indices.delete:
|
||||
index: docs
|
||||
|
||||
# Restore the second snapshot
|
||||
- do:
|
||||
snapshot.restore:
|
||||
repository: repository
|
||||
snapshot: snapshot-two
|
||||
wait_for_completion: true
|
||||
|
||||
- do:
|
||||
count:
|
||||
index: docs
|
||||
|
||||
- match: {count: 7}
|
||||
|
||||
# Delete the index again
|
||||
- do:
|
||||
indices.delete:
|
||||
index: docs
|
||||
|
||||
# Restore the first snapshot
|
||||
- do:
|
||||
snapshot.restore:
|
||||
repository: repository
|
||||
snapshot: snapshot-one
|
||||
wait_for_completion: true
|
||||
|
||||
- do:
|
||||
count:
|
||||
index: docs
|
||||
|
||||
- match: {count: 3}
|
||||
|
||||
# Remove the snapshots
|
||||
- do:
|
||||
snapshot.delete:
|
||||
repository: repository
|
||||
snapshot: snapshot-two
|
||||
|
||||
- do:
|
||||
snapshot.delete:
|
||||
repository: repository
|
||||
snapshot: snapshot-one
|
||||
|
||||
# Remove our repository
|
||||
- do:
|
||||
snapshot.delete_repository:
|
||||
repository: repository
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
- match: { my_remote_cluster.num_nodes_connected: 1}
|
||||
- match: { my_remote_cluster.max_connections_per_cluster: 1}
|
||||
- match: { my_remote_cluster.initial_connect_timeout: "30s" }
|
||||
- is_true: my_remote_cluster.http_addresses.0
|
||||
|
||||
---
|
||||
"Add transient remote cluster based on the preset cluster and check remote info":
|
||||
|
@ -38,9 +37,6 @@
|
|||
|
||||
- do:
|
||||
cluster.remote_info: {}
|
||||
- set: { my_remote_cluster.http_addresses.0: remote_http }
|
||||
- match: { test_remote_cluster.http_addresses.0: $remote_http }
|
||||
|
||||
- match: { test_remote_cluster.connected: true }
|
||||
- match: { my_remote_cluster.connected: true }
|
||||
|
||||
|
@ -132,4 +128,3 @@
|
|||
transient:
|
||||
search.remote.remote1.seeds: null
|
||||
search.remote.remote1.skip_unavailable: null
|
||||
|
||||
|
|
|
@ -1,94 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
description = """\
|
||||
Tests reindex-from-remote against some specific versions of
|
||||
Elasticsearch prior to 5.0. Versions of Elasticsearch >= 5.0
|
||||
should be able to use the standard launching mechanism which
|
||||
is more flexible and reliable.
|
||||
"""
|
||||
|
||||
|
||||
import org.apache.tools.ant.taskdefs.condition.Os
|
||||
|
||||
import static org.elasticsearch.gradle.BuildPlugin.getJavaHome
|
||||
|
||||
apply plugin: 'elasticsearch.standalone-rest-test'
|
||||
apply plugin: 'elasticsearch.rest-test'
|
||||
|
||||
integTestCluster {
|
||||
// Whitelist reindexing from the local node so we can test it.
|
||||
setting 'reindex.remote.whitelist', '127.0.0.1:*'
|
||||
}
|
||||
|
||||
configurations {
|
||||
oldesFixture
|
||||
es2
|
||||
es1
|
||||
es090
|
||||
}
|
||||
|
||||
dependencies {
|
||||
oldesFixture project(':test:fixtures:old-elasticsearch')
|
||||
/* Right now we just test against the latest version of each major we expect
|
||||
* reindex-from-remote to work against. We could randomize the versions but
|
||||
* that doesn't seem worth it at this point. */
|
||||
es2 'org.elasticsearch.distribution.zip:elasticsearch:2.4.5@zip'
|
||||
es1 'org.elasticsearch:elasticsearch:1.7.6@zip'
|
||||
es090 'org.elasticsearch:elasticsearch:0.90.13@zip'
|
||||
}
|
||||
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
// we can't get the pid files in windows so we skip that
|
||||
integTest.enabled = false
|
||||
} else {
|
||||
/* Set up tasks to unzip and run the old versions of ES before running the
|
||||
* integration tests. */
|
||||
for (String version : ['2', '1', '090']) {
|
||||
Task unzip = task("unzipEs${version}", type: Sync) {
|
||||
Configuration oldEsDependency = configurations['es' + version]
|
||||
dependsOn oldEsDependency
|
||||
/* Use a closure here to delay resolution of the dependency until we need
|
||||
* it */
|
||||
from {
|
||||
oldEsDependency.collect { zipTree(it) }
|
||||
}
|
||||
into temporaryDir
|
||||
}
|
||||
Task fixture = task("oldEs${version}Fixture",
|
||||
type: org.elasticsearch.gradle.test.AntFixture) {
|
||||
dependsOn project.configurations.oldesFixture
|
||||
dependsOn unzip
|
||||
executable = new File(project.runtimeJavaHome, 'bin/java')
|
||||
env 'CLASSPATH', "${ -> project.configurations.oldesFixture.asPath }"
|
||||
env 'JAVA_HOME', getJavaHome(it, 7)
|
||||
args 'oldes.OldElasticsearch',
|
||||
baseDir,
|
||||
unzip.temporaryDir,
|
||||
version == '090'
|
||||
}
|
||||
integTest.dependsOn fixture
|
||||
integTestRunner {
|
||||
/* Use a closure on the string to delay evaluation until right before we
|
||||
* run the integration tests so that we can be sure that the file is
|
||||
* ready. */
|
||||
systemProperty "es${version}.port", "${ -> fixture.addressAndPort }"
|
||||
}
|
||||
}
|
||||
}
|
|
@ -23,9 +23,9 @@ apply plugin: 'elasticsearch.standalone-rest-test'
|
|||
apply plugin: 'elasticsearch.rest-test'
|
||||
|
||||
ext.pluginsCount = 0
|
||||
project.rootProject.subprojects.findAll { it.parent.path == ':plugins' }.each { subproj ->
|
||||
project(':plugins').getChildProjects().each { pluginName, pluginProject ->
|
||||
integTestCluster {
|
||||
plugin subproj.path
|
||||
plugin pluginProject.path
|
||||
}
|
||||
pluginsCount += 1
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ apply plugin: 'elasticsearch.vagrant'
|
|||
|
||||
List<String> plugins = []
|
||||
for (Project subproj : project.rootProject.subprojects) {
|
||||
if (subproj.path.startsWith(':plugins:') || subproj.path.equals(':example-plugins:custom-settings')) {
|
||||
if (subproj.parent.path == ':plugins' || subproj.path.equals(':example-plugins:custom-settings')) {
|
||||
// add plugin as a dep
|
||||
dependencies {
|
||||
packaging project(path: "${subproj.path}", configuration: 'zip')
|
||||
|
|
|
@ -35,9 +35,6 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"body": {
|
||||
"description": "Field json objects containing an array of field names",
|
||||
"required": false
|
||||
}
|
||||
"body": null
|
||||
}
|
||||
}
|
|
@ -241,6 +241,7 @@ import org.elasticsearch.rest.action.admin.cluster.RestRemoteClusterInfoAction;
|
|||
import org.elasticsearch.rest.action.admin.cluster.RestRestoreSnapshotAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.RestSnapshotsStatusAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.RestVerifyRepositoryAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestResizeHandler;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestAnalyzeAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestClearIndicesCacheAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestCloseIndexAction;
|
||||
|
@ -270,8 +271,6 @@ import org.elasticsearch.rest.action.admin.indices.RestPutMappingAction;
|
|||
import org.elasticsearch.rest.action.admin.indices.RestRecoveryAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestRefreshAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestRolloverIndexAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestShrinkIndexAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestSplitIndexAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestSyncedFlushAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestUpdateSettingsAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestUpgradeAction;
|
||||
|
@ -569,8 +568,8 @@ public class ActionModule extends AbstractModule {
|
|||
registerHandler.accept(new RestIndexPutAliasAction(settings, restController));
|
||||
registerHandler.accept(new RestIndicesAliasesAction(settings, restController));
|
||||
registerHandler.accept(new RestCreateIndexAction(settings, restController));
|
||||
registerHandler.accept(new RestShrinkIndexAction(settings, restController));
|
||||
registerHandler.accept(new RestSplitIndexAction(settings, restController));
|
||||
registerHandler.accept(new RestResizeHandler.RestShrinkIndexAction(settings, restController));
|
||||
registerHandler.accept(new RestResizeHandler.RestSplitIndexAction(settings, restController));
|
||||
registerHandler.accept(new RestRolloverIndexAction(settings, restController));
|
||||
registerHandler.accept(new RestDeleteIndexAction(settings, restController));
|
||||
registerHandler.accept(new RestCloseIndexAction(settings, restController));
|
||||
|
|
|
@ -30,6 +30,8 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import static java.util.stream.Collectors.toList;
|
||||
|
||||
public final class TransportRemoteInfoAction extends HandledTransportAction<RemoteInfoRequest, RemoteInfoResponse> {
|
||||
|
||||
private final RemoteClusterService remoteClusterService;
|
||||
|
@ -45,7 +47,6 @@ public final class TransportRemoteInfoAction extends HandledTransportAction<Remo
|
|||
|
||||
@Override
|
||||
protected void doExecute(RemoteInfoRequest remoteInfoRequest, ActionListener<RemoteInfoResponse> listener) {
|
||||
remoteClusterService.getRemoteConnectionInfos(ActionListener.wrap(remoteConnectionInfos
|
||||
-> listener.onResponse(new RemoteInfoResponse(remoteConnectionInfos)), listener::onFailure));
|
||||
listener.onResponse(new RemoteInfoResponse(remoteClusterService.getRemoteConnectionInfos().collect(toList())));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,7 +30,6 @@ import org.elasticsearch.common.Strings;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
@ -102,10 +101,6 @@ public final class FieldCapabilitiesRequest extends ActionRequest implements Ind
|
|||
}
|
||||
}
|
||||
|
||||
public static FieldCapabilitiesRequest parseFields(XContentParser parser) throws IOException {
|
||||
return PARSER.parse(parser, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* The list of field names to retrieve
|
||||
*/
|
||||
|
|
|
@ -26,7 +26,7 @@ import org.elasticsearch.common.collect.Tuple;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentFragment;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParserUtils;
|
||||
|
@ -41,7 +41,7 @@ import java.util.stream.Collectors;
|
|||
/**
|
||||
* Response for {@link FieldCapabilitiesRequest} requests.
|
||||
*/
|
||||
public class FieldCapabilitiesResponse extends ActionResponse implements ToXContentFragment {
|
||||
public class FieldCapabilitiesResponse extends ActionResponse implements ToXContentObject {
|
||||
private static final ParseField FIELDS_FIELD = new ParseField("fields");
|
||||
|
||||
private Map<String, Map<String, FieldCapabilities>> responseMap;
|
||||
|
@ -123,8 +123,9 @@ public class FieldCapabilitiesResponse extends ActionResponse implements ToXCont
|
|||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field(FIELDS_FIELD.getPreferredName(), responseMap);
|
||||
return builder;
|
||||
return builder.startObject()
|
||||
.field(FIELDS_FIELD.getPreferredName(), responseMap)
|
||||
.endObject();
|
||||
}
|
||||
|
||||
public static FieldCapabilitiesResponse fromXContent(XContentParser parser) throws IOException {
|
||||
|
|
|
@ -384,7 +384,9 @@ public abstract class TransportWriteAction<
|
|||
@Override
|
||||
public void failShardIfNeeded(ShardRouting replica, String message, Exception exception,
|
||||
Runnable onSuccess, Consumer<Exception> onPrimaryDemoted, Consumer<Exception> onIgnoredFailure) {
|
||||
if (TransportActions.isShardNotAvailableException(exception) == false) {
|
||||
logger.warn(new ParameterizedMessage("[{}] {}", replica.shardId(), message), exception);
|
||||
}
|
||||
shardStateAction.remoteShardFailed(replica.shardId(), replica.allocationId().getId(), primaryTerm, true, message, exception,
|
||||
createShardActionListener(onSuccess, onPrimaryDemoted, onIgnoredFailure));
|
||||
}
|
||||
|
|
|
@ -20,25 +20,18 @@
|
|||
package org.elasticsearch.rest.action;
|
||||
|
||||
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest;
|
||||
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.client.node.NodeClient;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.rest.BaseRestHandler;
|
||||
import org.elasticsearch.rest.BytesRestResponse;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.RestResponse;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.rest.RestRequest.Method.GET;
|
||||
import static org.elasticsearch.rest.RestRequest.Method.POST;
|
||||
import static org.elasticsearch.rest.RestStatus.OK;
|
||||
|
||||
public class RestFieldCapabilitiesAction extends BaseRestHandler {
|
||||
public RestFieldCapabilitiesAction(Settings settings, RestController controller) {
|
||||
|
@ -57,41 +50,13 @@ public class RestFieldCapabilitiesAction extends BaseRestHandler {
|
|||
@Override
|
||||
public RestChannelConsumer prepareRequest(final RestRequest request,
|
||||
final NodeClient client) throws IOException {
|
||||
if (request.hasContentOrSourceParam()) {
|
||||
deprecationLogger.deprecated("Specifying a request body is deprecated -- the" +
|
||||
" [fields] request parameter should be used instead.");
|
||||
if (request.hasParam("fields")) {
|
||||
throw new IllegalArgumentException("can't specify a request body and [fields]" +
|
||||
" request parameter, either specify a request body or the" +
|
||||
" [fields] request parameter");
|
||||
}
|
||||
}
|
||||
String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
|
||||
FieldCapabilitiesRequest fieldRequest = new FieldCapabilitiesRequest()
|
||||
.fields(Strings.splitStringByCommaToArray(request.param("fields")))
|
||||
.indices(indices);
|
||||
|
||||
final String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
|
||||
final FieldCapabilitiesRequest fieldRequest;
|
||||
if (request.hasContentOrSourceParam()) {
|
||||
try (XContentParser parser = request.contentOrSourceParamParser()) {
|
||||
fieldRequest = FieldCapabilitiesRequest.parseFields(parser);
|
||||
}
|
||||
} else {
|
||||
fieldRequest = new FieldCapabilitiesRequest();
|
||||
fieldRequest.fields(Strings.splitStringByCommaToArray(request.param("fields")));
|
||||
}
|
||||
fieldRequest.indices(indices);
|
||||
fieldRequest.indicesOptions(
|
||||
IndicesOptions.fromRequest(request, fieldRequest.indicesOptions())
|
||||
);
|
||||
return channel -> client.fieldCaps(fieldRequest,
|
||||
new RestBuilderListener<FieldCapabilitiesResponse>(channel) {
|
||||
@Override
|
||||
public RestResponse buildResponse(FieldCapabilitiesResponse response,
|
||||
XContentBuilder builder) throws Exception {
|
||||
RestStatus status = OK;
|
||||
builder.startObject();
|
||||
response.toXContent(builder, request);
|
||||
builder.endObject();
|
||||
return new BytesRestResponse(status, builder);
|
||||
}
|
||||
});
|
||||
IndicesOptions.fromRequest(request, fieldRequest.indicesOptions()));
|
||||
return channel -> client.fieldCaps(fieldRequest, new RestToXContentListener<>(channel));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.elasticsearch.rest.RestRequest;
|
|||
import org.elasticsearch.rest.RestResponse;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.rest.action.RestBuilderListener;
|
||||
import org.elasticsearch.rest.action.RestToXContentListener;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -50,16 +51,8 @@ public final class RestRemoteClusterInfoAction extends BaseRestHandler {
|
|||
}
|
||||
|
||||
@Override
|
||||
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client)
|
||||
throws IOException {
|
||||
return channel -> client.execute(RemoteInfoAction.INSTANCE, new RemoteInfoRequest(),
|
||||
new RestBuilderListener<RemoteInfoResponse>(channel) {
|
||||
@Override
|
||||
public RestResponse buildResponse(RemoteInfoResponse response, XContentBuilder builder) throws Exception {
|
||||
response.toXContent(builder, request);
|
||||
return new BytesRestResponse(RestStatus.OK, builder);
|
||||
}
|
||||
});
|
||||
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
|
||||
return channel -> client.execute(RemoteInfoAction.INSTANCE, new RemoteInfoRequest(), new RestToXContentListener<>(channel));
|
||||
}
|
||||
@Override
|
||||
public boolean canTripCircuitBreaker() {
|
||||
|
|
|
@ -0,0 +1,96 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.rest.action.admin.indices;
|
||||
|
||||
import org.elasticsearch.action.admin.indices.shrink.ResizeRequest;
|
||||
import org.elasticsearch.action.admin.indices.shrink.ResizeType;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.client.node.NodeClient;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.rest.BaseRestHandler;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.action.RestToXContentListener;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public abstract class RestResizeHandler extends BaseRestHandler {
|
||||
|
||||
RestResizeHandler(final Settings settings) {
|
||||
super(settings);
|
||||
}
|
||||
|
||||
@Override
|
||||
public abstract String getName();
|
||||
|
||||
abstract ResizeType getResizeType();
|
||||
|
||||
@Override
|
||||
public final RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
|
||||
final ResizeRequest resizeRequest = new ResizeRequest(request.param("target"), request.param("index"));
|
||||
resizeRequest.setResizeType(getResizeType());
|
||||
request.applyContentParser(resizeRequest::fromXContent);
|
||||
resizeRequest.timeout(request.paramAsTime("timeout", resizeRequest.timeout()));
|
||||
resizeRequest.masterNodeTimeout(request.paramAsTime("master_timeout", resizeRequest.masterNodeTimeout()));
|
||||
resizeRequest.setWaitForActiveShards(ActiveShardCount.parseString(request.param("wait_for_active_shards")));
|
||||
return channel -> client.admin().indices().resizeIndex(resizeRequest, new RestToXContentListener<>(channel));
|
||||
}
|
||||
|
||||
public static class RestShrinkIndexAction extends RestResizeHandler {
|
||||
|
||||
public RestShrinkIndexAction(final Settings settings, final RestController controller) {
|
||||
super(settings);
|
||||
controller.registerHandler(RestRequest.Method.PUT, "/{index}/_shrink/{target}", this);
|
||||
controller.registerHandler(RestRequest.Method.POST, "/{index}/_shrink/{target}", this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return "shrink_index_action";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ResizeType getResizeType() {
|
||||
return ResizeType.SHRINK;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public static class RestSplitIndexAction extends RestResizeHandler {
|
||||
|
||||
public RestSplitIndexAction(final Settings settings, final RestController controller) {
|
||||
super(settings);
|
||||
controller.registerHandler(RestRequest.Method.PUT, "/{index}/_split/{target}", this);
|
||||
controller.registerHandler(RestRequest.Method.POST, "/{index}/_split/{target}", this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return "split_index_action";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ResizeType getResizeType() {
|
||||
return ResizeType.SPLIT;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -1,56 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.rest.action.admin.indices;
|
||||
|
||||
import org.elasticsearch.action.admin.indices.shrink.ResizeRequest;
|
||||
import org.elasticsearch.action.admin.indices.shrink.ResizeType;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.client.node.NodeClient;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.rest.BaseRestHandler;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.action.RestToXContentListener;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class RestShrinkIndexAction extends BaseRestHandler {
|
||||
public RestShrinkIndexAction(Settings settings, RestController controller) {
|
||||
super(settings);
|
||||
controller.registerHandler(RestRequest.Method.PUT, "/{index}/_shrink/{target}", this);
|
||||
controller.registerHandler(RestRequest.Method.POST, "/{index}/_shrink/{target}", this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return "shrink_index_action";
|
||||
}
|
||||
|
||||
@Override
|
||||
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
|
||||
ResizeRequest shrinkIndexRequest = new ResizeRequest(request.param("target"), request.param("index"));
|
||||
shrinkIndexRequest.setResizeType(ResizeType.SHRINK);
|
||||
request.applyContentParser(shrinkIndexRequest::fromXContent);
|
||||
shrinkIndexRequest.timeout(request.paramAsTime("timeout", shrinkIndexRequest.timeout()));
|
||||
shrinkIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", shrinkIndexRequest.masterNodeTimeout()));
|
||||
shrinkIndexRequest.setWaitForActiveShards(ActiveShardCount.parseString(request.param("wait_for_active_shards")));
|
||||
return channel -> client.admin().indices().resizeIndex(shrinkIndexRequest, new RestToXContentListener<>(channel));
|
||||
}
|
||||
}
|
|
@ -1,56 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.rest.action.admin.indices;
|
||||
|
||||
import org.elasticsearch.action.admin.indices.shrink.ResizeRequest;
|
||||
import org.elasticsearch.action.admin.indices.shrink.ResizeType;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.client.node.NodeClient;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.rest.BaseRestHandler;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.action.RestToXContentListener;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class RestSplitIndexAction extends BaseRestHandler {
|
||||
public RestSplitIndexAction(Settings settings, RestController controller) {
|
||||
super(settings);
|
||||
controller.registerHandler(RestRequest.Method.PUT, "/{index}/_split/{target}", this);
|
||||
controller.registerHandler(RestRequest.Method.POST, "/{index}/_split/{target}", this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return "split_index_action";
|
||||
}
|
||||
|
||||
@Override
|
||||
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
|
||||
ResizeRequest shrinkIndexRequest = new ResizeRequest(request.param("target"), request.param("index"));
|
||||
shrinkIndexRequest.setResizeType(ResizeType.SPLIT);
|
||||
request.applyContentParser(shrinkIndexRequest::fromXContent);
|
||||
shrinkIndexRequest.timeout(request.paramAsTime("timeout", shrinkIndexRequest.timeout()));
|
||||
shrinkIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", shrinkIndexRequest.masterNodeTimeout()));
|
||||
shrinkIndexRequest.setWaitForActiveShards(ActiveShardCount.parseString(request.param("wait_for_active_shards")));
|
||||
return channel -> client.admin().indices().resizeIndex(shrinkIndexRequest, new RestToXContentListener<>(channel));
|
||||
}
|
||||
}
|
|
@ -40,6 +40,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes;
|
|||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.CancellableThreads;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
|
@ -602,66 +603,13 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo
|
|||
}
|
||||
|
||||
/**
|
||||
* Fetches connection info for this connection
|
||||
* Get the information about remote nodes to be rendered on {@code _remote/info} requests.
|
||||
*/
|
||||
public void getConnectionInfo(ActionListener<RemoteConnectionInfo> listener) {
|
||||
final Optional<DiscoveryNode> anyNode = connectedNodes.getAny();
|
||||
if (anyNode.isPresent() == false) {
|
||||
// not connected we return immediately
|
||||
RemoteConnectionInfo remoteConnectionStats = new RemoteConnectionInfo(clusterAlias,
|
||||
Collections.emptyList(), Collections.emptyList(), maxNumRemoteConnections, 0,
|
||||
RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings), skipUnavailable);
|
||||
listener.onResponse(remoteConnectionStats);
|
||||
} else {
|
||||
NodesInfoRequest request = new NodesInfoRequest();
|
||||
request.clear();
|
||||
request.http(true);
|
||||
|
||||
transportService.sendRequest(anyNode.get(), NodesInfoAction.NAME, request, new TransportResponseHandler<NodesInfoResponse>() {
|
||||
@Override
|
||||
public NodesInfoResponse newInstance() {
|
||||
return new NodesInfoResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleResponse(NodesInfoResponse response) {
|
||||
Collection<TransportAddress> httpAddresses = new HashSet<>();
|
||||
for (NodeInfo info : response.getNodes()) {
|
||||
if (connectedNodes.contains(info.getNode()) && info.getHttp() != null) {
|
||||
httpAddresses.add(info.getHttp().getAddress().publishAddress());
|
||||
}
|
||||
}
|
||||
|
||||
if (httpAddresses.size() < maxNumRemoteConnections) {
|
||||
// just in case non of the connected nodes have http enabled we get other http enabled nodes instead.
|
||||
for (NodeInfo info : response.getNodes()) {
|
||||
if (nodePredicate.test(info.getNode()) && info.getHttp() != null) {
|
||||
httpAddresses.add(info.getHttp().getAddress().publishAddress());
|
||||
}
|
||||
if (httpAddresses.size() == maxNumRemoteConnections) {
|
||||
break; // once we have enough return...
|
||||
}
|
||||
}
|
||||
}
|
||||
RemoteConnectionInfo remoteConnectionInfo = new RemoteConnectionInfo(clusterAlias,
|
||||
seedNodes.stream().map(DiscoveryNode::getAddress).collect(Collectors.toList()), new ArrayList<>(httpAddresses),
|
||||
maxNumRemoteConnections, connectedNodes.size(),
|
||||
RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings), skipUnavailable);
|
||||
listener.onResponse(remoteConnectionInfo);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleException(TransportException exp) {
|
||||
listener.onFailure(exp);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String executor() {
|
||||
return ThreadPool.Names.SAME;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public RemoteConnectionInfo getConnectionInfo() {
|
||||
List<TransportAddress> seedNodeAddresses = seedNodes.stream().map(DiscoveryNode::getAddress).collect(Collectors.toList());
|
||||
TimeValue initialConnectionTimeout = RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings);
|
||||
return new RemoteConnectionInfo(clusterAlias, seedNodeAddresses, maxNumRemoteConnections, connectedNodes.size(),
|
||||
initialConnectionTimeout, skipUnavailable);
|
||||
}
|
||||
|
||||
int getNumNodesConnected() {
|
||||
|
|
|
@ -25,7 +25,6 @@ import org.elasticsearch.action.ActionListener;
|
|||
import org.elasticsearch.action.OriginalIndices;
|
||||
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest;
|
||||
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse;
|
||||
import org.elasticsearch.action.support.GroupedActionListener;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
|
@ -42,7 +41,6 @@ import org.elasticsearch.threadpool.ThreadPool;
|
|||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
|
@ -56,6 +54,7 @@ import java.util.function.BiFunction;
|
|||
import java.util.function.Function;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static org.elasticsearch.common.settings.Setting.boolSetting;
|
||||
|
||||
|
@ -348,17 +347,8 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl
|
|||
IOUtils.close(remoteClusters.values());
|
||||
}
|
||||
|
||||
public void getRemoteConnectionInfos(ActionListener<Collection<RemoteConnectionInfo>> listener) {
|
||||
final Map<String, RemoteClusterConnection> remoteClusters = this.remoteClusters;
|
||||
if (remoteClusters.isEmpty()) {
|
||||
listener.onResponse(Collections.emptyList());
|
||||
} else {
|
||||
final GroupedActionListener<RemoteConnectionInfo> actionListener = new GroupedActionListener<>(listener,
|
||||
remoteClusters.size(), Collections.emptyList());
|
||||
for (RemoteClusterConnection connection : remoteClusters.values()) {
|
||||
connection.getConnectionInfo(actionListener);
|
||||
}
|
||||
}
|
||||
public Stream<RemoteConnectionInfo> getRemoteConnectionInfos() {
|
||||
return remoteClusters.values().stream().map(RemoteClusterConnection::getConnectionInfo);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -27,17 +27,18 @@ import org.elasticsearch.common.unit.TimeValue;
|
|||
import org.elasticsearch.common.xcontent.ToXContentFragment;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* This class encapsulates all remote cluster information to be rendered on
|
||||
* <tt>_remote/info</tt> requests.
|
||||
* {@code _remote/info} requests.
|
||||
*/
|
||||
public final class RemoteConnectionInfo implements ToXContentFragment, Writeable {
|
||||
final List<TransportAddress> seedNodes;
|
||||
final List<TransportAddress> httpAddresses;
|
||||
final int connectionsPerCluster;
|
||||
final TimeValue initialConnectionTimeout;
|
||||
final int numNodesConnected;
|
||||
|
@ -45,12 +46,10 @@ public final class RemoteConnectionInfo implements ToXContentFragment, Writeable
|
|||
final boolean skipUnavailable;
|
||||
|
||||
RemoteConnectionInfo(String clusterAlias, List<TransportAddress> seedNodes,
|
||||
List<TransportAddress> httpAddresses,
|
||||
int connectionsPerCluster, int numNodesConnected,
|
||||
TimeValue initialConnectionTimeout, boolean skipUnavailable) {
|
||||
this.clusterAlias = clusterAlias;
|
||||
this.seedNodes = seedNodes;
|
||||
this.httpAddresses = httpAddresses;
|
||||
this.connectionsPerCluster = connectionsPerCluster;
|
||||
this.numNodesConnected = numNodesConnected;
|
||||
this.initialConnectionTimeout = initialConnectionTimeout;
|
||||
|
@ -59,16 +58,45 @@ public final class RemoteConnectionInfo implements ToXContentFragment, Writeable
|
|||
|
||||
public RemoteConnectionInfo(StreamInput input) throws IOException {
|
||||
seedNodes = input.readList(TransportAddress::new);
|
||||
httpAddresses = input.readList(TransportAddress::new);
|
||||
if (input.getVersion().before(Version.V_7_0_0_alpha1)) {
|
||||
/*
|
||||
* Versions before 7.0 sent the HTTP addresses of all nodes in the
|
||||
* remote cluster here but it was expensive to fetch and we
|
||||
* ultimately figured out how to do without it. So we removed it.
|
||||
*
|
||||
* We just throw any HTTP addresses received here on the floor
|
||||
* because we don't need to do anything with them.
|
||||
*/
|
||||
input.readList(TransportAddress::new);
|
||||
}
|
||||
connectionsPerCluster = input.readVInt();
|
||||
initialConnectionTimeout = input.readTimeValue();
|
||||
numNodesConnected = input.readVInt();
|
||||
clusterAlias = input.readString();
|
||||
if (input.getVersion().onOrAfter(Version.V_6_1_0)) {
|
||||
skipUnavailable = input.readBoolean();
|
||||
} else {
|
||||
skipUnavailable = false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeList(seedNodes);
|
||||
if (out.getVersion().before(Version.V_7_0_0_alpha1)) {
|
||||
/*
|
||||
* Versions before 7.0 sent the HTTP addresses of all nodes in the
|
||||
* remote cluster here but it was expensive to fetch and we
|
||||
* ultimately figured out how to do without it. So we removed it.
|
||||
*
|
||||
* When sending this request to a node that expects HTTP addresses
|
||||
* here we pretend that we didn't find any. This *should* be fine
|
||||
* because, after all, we haven't been using this information for
|
||||
* a while.
|
||||
*/
|
||||
out.writeList(emptyList());
|
||||
}
|
||||
out.writeVInt(connectionsPerCluster);
|
||||
out.writeTimeValue(initialConnectionTimeout);
|
||||
out.writeVInt(numNodesConnected);
|
||||
out.writeString(clusterAlias);
|
||||
out.writeBoolean(skipUnavailable);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -80,11 +108,6 @@ public final class RemoteConnectionInfo implements ToXContentFragment, Writeable
|
|||
builder.value(addr.toString());
|
||||
}
|
||||
builder.endArray();
|
||||
builder.startArray("http_addresses");
|
||||
for (TransportAddress addr : httpAddresses) {
|
||||
builder.value(addr.toString());
|
||||
}
|
||||
builder.endArray();
|
||||
builder.field("connected", numNodesConnected > 0);
|
||||
builder.field("num_nodes_connected", numNodesConnected);
|
||||
builder.field("max_connections_per_cluster", connectionsPerCluster);
|
||||
|
@ -95,19 +118,6 @@ public final class RemoteConnectionInfo implements ToXContentFragment, Writeable
|
|||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeList(seedNodes);
|
||||
out.writeList(httpAddresses);
|
||||
out.writeVInt(connectionsPerCluster);
|
||||
out.writeTimeValue(initialConnectionTimeout);
|
||||
out.writeVInt(numNodesConnected);
|
||||
out.writeString(clusterAlias);
|
||||
if (out.getVersion().onOrAfter(Version.V_6_1_0)) {
|
||||
out.writeBoolean(skipUnavailable);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
|
@ -116,7 +126,6 @@ public final class RemoteConnectionInfo implements ToXContentFragment, Writeable
|
|||
return connectionsPerCluster == that.connectionsPerCluster &&
|
||||
numNodesConnected == that.numNodesConnected &&
|
||||
Objects.equals(seedNodes, that.seedNodes) &&
|
||||
Objects.equals(httpAddresses, that.httpAddresses) &&
|
||||
Objects.equals(initialConnectionTimeout, that.initialConnectionTimeout) &&
|
||||
Objects.equals(clusterAlias, that.clusterAlias) &&
|
||||
skipUnavailable == that.skipUnavailable;
|
||||
|
@ -124,7 +133,7 @@ public final class RemoteConnectionInfo implements ToXContentFragment, Writeable
|
|||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(seedNodes, httpAddresses, connectionsPerCluster, initialConnectionTimeout,
|
||||
return Objects.hash(seedNodes, connectionsPerCluster, initialConnectionTimeout,
|
||||
numNodesConnected, clusterAlias, skipUnavailable);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -110,10 +110,8 @@ public class FieldCapabilitiesResponseTests extends AbstractStreamableXContentTe
|
|||
public void testToXContent() throws IOException {
|
||||
FieldCapabilitiesResponse response = createSimpleResponse();
|
||||
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)
|
||||
.startObject();
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
|
||||
response.toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
builder.endObject();
|
||||
|
||||
String generatedResponse = BytesReference.bytes(builder).utf8ToString();
|
||||
assertEquals((
|
||||
|
|
|
@ -1,59 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.rest.action;
|
||||
|
||||
import org.elasticsearch.client.node.NodeClient;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.action.RestFieldCapabilitiesAction;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.rest.FakeRestRequest;
|
||||
import org.elasticsearch.usage.UsageService;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
||||
public class RestFieldCapabilitiesActionTests extends ESTestCase {
|
||||
|
||||
private RestFieldCapabilitiesAction action;
|
||||
|
||||
@Before
|
||||
public void setUpAction() {
|
||||
action = new RestFieldCapabilitiesAction(Settings.EMPTY, mock(RestController.class));
|
||||
}
|
||||
|
||||
public void testRequestBodyIsDeprecated() throws IOException {
|
||||
String content = "{ \"fields\": [\"title\"] }";
|
||||
RestRequest request = new FakeRestRequest.Builder(xContentRegistry())
|
||||
.withPath("/_field_caps")
|
||||
.withContent(new BytesArray(content), XContentType.JSON)
|
||||
.build();
|
||||
action.prepareRequest(request, mock(NodeClient.class));
|
||||
|
||||
assertWarnings("Specifying a request body is deprecated -- the" +
|
||||
" [fields] request parameter should be used instead.");
|
||||
}
|
||||
}
|
|
@ -93,6 +93,7 @@ import org.elasticsearch.script.StoredScriptsIT;
|
|||
import org.elasticsearch.snapshots.mockstore.MockRepository;
|
||||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.channels.SeekableByteChannel;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
|
@ -1243,30 +1244,44 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
.put("compress", false)
|
||||
.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
|
||||
|
||||
createIndex("test-idx-1", "test-idx-2");
|
||||
final String[] indices = {"test-idx-1", "test-idx-2"};
|
||||
createIndex(indices);
|
||||
logger.info("--> indexing some data");
|
||||
indexRandom(true,
|
||||
client().prepareIndex("test-idx-1", "_doc").setSource("foo", "bar"),
|
||||
client().prepareIndex("test-idx-2", "_doc").setSource("foo", "bar"));
|
||||
|
||||
logger.info("--> creating snapshot");
|
||||
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1").setWaitForCompletion(true).setIndices("test-idx-*").get();
|
||||
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
|
||||
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
|
||||
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1")
|
||||
.setWaitForCompletion(true).setIndices(indices).get();
|
||||
final SnapshotInfo snapshotInfo = createSnapshotResponse.getSnapshotInfo();
|
||||
assertThat(snapshotInfo.successfulShards(), greaterThan(0));
|
||||
assertThat(snapshotInfo.successfulShards(), equalTo(snapshotInfo.totalShards()));
|
||||
|
||||
RepositoriesService service = internalCluster().getInstance(RepositoriesService.class, internalCluster().getMasterName());
|
||||
Repository repository = service.repository("test-repo");
|
||||
|
||||
final Map<String, IndexId> indexIds = repository.getRepositoryData().getIndices();
|
||||
final Path indicesPath = repo.resolve("indices");
|
||||
|
||||
logger.info("--> delete index metadata and shard metadata");
|
||||
Path indices = repo.resolve("indices");
|
||||
Path testIndex1 = indices.resolve("test-idx-1");
|
||||
Path testIndex2 = indices.resolve("test-idx-2");
|
||||
Path testIndex2Shard0 = testIndex2.resolve("0");
|
||||
IOUtils.deleteFilesIgnoringExceptions(testIndex1.resolve("snapshot-test-snap-1"));
|
||||
IOUtils.deleteFilesIgnoringExceptions(testIndex2Shard0.resolve("snapshot-test-snap-1"));
|
||||
for (String index : indices) {
|
||||
Path shardZero = indicesPath.resolve(indexIds.get(index).getId()).resolve("0");
|
||||
if (randomBoolean()) {
|
||||
Files.delete(shardZero.resolve("index-0"));
|
||||
}
|
||||
Files.delete(shardZero.resolve("snap-" + snapshotInfo.snapshotId().getUUID() + ".dat"));
|
||||
}
|
||||
|
||||
logger.info("--> delete snapshot");
|
||||
client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap-1").get();
|
||||
|
||||
logger.info("--> make sure snapshot doesn't exist");
|
||||
assertThrows(client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1"), SnapshotMissingException.class);
|
||||
|
||||
for (String index : indices) {
|
||||
assertTrue(Files.notExists(indicesPath.resolve(indexIds.get(index).getId())));
|
||||
}
|
||||
}
|
||||
|
||||
public void testDeleteSnapshotWithMissingMetadata() throws Exception {
|
||||
|
@ -1420,9 +1435,13 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
|
||||
logger.info("--> deleting shard level index file");
|
||||
try (Stream<Path> files = Files.list(repo.resolve("indices"))) {
|
||||
files.forEach(indexPath ->
|
||||
IOUtils.deleteFilesIgnoringExceptions(indexPath.resolve("0").resolve("index-0"))
|
||||
);
|
||||
files.forEach(indexPath -> {
|
||||
try {
|
||||
Files.delete(indexPath.resolve("0").resolve("index-0"));
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException("Failed to delete expected file", e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
logger.info("--> creating another snapshot");
|
||||
|
|
|
@ -42,6 +42,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -717,22 +718,6 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private static void installNodeStatsHandler(TransportService service, DiscoveryNode...nodes) {
|
||||
service.registerRequestHandler(NodesInfoAction.NAME, NodesInfoRequest::new, ThreadPool.Names.SAME, false, false,
|
||||
(request, channel) -> {
|
||||
List<NodeInfo> nodeInfos = new ArrayList<>();
|
||||
int port = 80;
|
||||
for (DiscoveryNode node : nodes) {
|
||||
HttpInfo http = new HttpInfo(new BoundTransportAddress(new TransportAddress[]{node.getAddress()},
|
||||
new TransportAddress(node.getAddress().address().getAddress(), port++)), 100);
|
||||
nodeInfos.add(new NodeInfo(node.getVersion(), Build.CURRENT, node, null, null, null, null, null, null, http, null,
|
||||
null, null));
|
||||
}
|
||||
channel.sendResponse(new NodesInfoResponse(ClusterName.DEFAULT, nodeInfos, Collections.emptyList()));
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
public void testGetConnectionInfo() throws Exception {
|
||||
List<DiscoveryNode> knownNodes = new CopyOnWriteArrayList<>();
|
||||
try (MockTransportService transport1 = startTransport("seed_node", knownNodes, Version.CURRENT);
|
||||
|
@ -755,32 +740,22 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster",
|
||||
seedNodes, service, maxNumConnections, n -> true)) {
|
||||
// test no nodes connected
|
||||
RemoteConnectionInfo remoteConnectionInfo = assertSerialization(getRemoteConnectionInfo(connection));
|
||||
RemoteConnectionInfo remoteConnectionInfo = assertSerialization(connection.getConnectionInfo());
|
||||
assertNotNull(remoteConnectionInfo);
|
||||
assertEquals(0, remoteConnectionInfo.numNodesConnected);
|
||||
assertEquals(0, remoteConnectionInfo.seedNodes.size());
|
||||
assertEquals(0, remoteConnectionInfo.httpAddresses.size());
|
||||
assertEquals(3, remoteConnectionInfo.seedNodes.size());
|
||||
assertEquals(maxNumConnections, remoteConnectionInfo.connectionsPerCluster);
|
||||
assertEquals("test-cluster", remoteConnectionInfo.clusterAlias);
|
||||
|
||||
// Connect some nodes
|
||||
updateSeedNodes(connection, seedNodes);
|
||||
expectThrows(RemoteTransportException.class, () -> getRemoteConnectionInfo(connection));
|
||||
|
||||
for (MockTransportService s : Arrays.asList(transport1, transport2, transport3)) {
|
||||
installNodeStatsHandler(s, node1, node2, node3);
|
||||
}
|
||||
|
||||
remoteConnectionInfo = getRemoteConnectionInfo(connection);
|
||||
remoteConnectionInfo = assertSerialization(remoteConnectionInfo);
|
||||
remoteConnectionInfo = assertSerialization(connection.getConnectionInfo());
|
||||
assertNotNull(remoteConnectionInfo);
|
||||
assertEquals(connection.getNumNodesConnected(), remoteConnectionInfo.numNodesConnected);
|
||||
assertEquals(Math.min(3, maxNumConnections), connection.getNumNodesConnected());
|
||||
assertEquals(3, remoteConnectionInfo.seedNodes.size());
|
||||
assertEquals(remoteConnectionInfo.httpAddresses.size(), Math.min(3, maxNumConnections));
|
||||
assertEquals(maxNumConnections, remoteConnectionInfo.connectionsPerCluster);
|
||||
assertEquals("test-cluster", remoteConnectionInfo.clusterAlias);
|
||||
for (TransportAddress address : remoteConnectionInfo.httpAddresses) {
|
||||
assertTrue("port range mismatch: " + address.getPort(), address.getPort() >= 80 && address.getPort() <= 90);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -789,48 +764,41 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
public void testRemoteConnectionInfo() throws IOException {
|
||||
RemoteConnectionInfo stats = new RemoteConnectionInfo("test_cluster",
|
||||
Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 1)),
|
||||
Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 80)),
|
||||
4, 3, TimeValue.timeValueMinutes(30), false);
|
||||
assertSerialization(stats);
|
||||
|
||||
RemoteConnectionInfo stats1 = new RemoteConnectionInfo("test_cluster",
|
||||
Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 1)),
|
||||
Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 80)),
|
||||
4, 4, TimeValue.timeValueMinutes(30), true);
|
||||
assertSerialization(stats1);
|
||||
assertNotEquals(stats, stats1);
|
||||
|
||||
stats1 = new RemoteConnectionInfo("test_cluster_1",
|
||||
Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 1)),
|
||||
Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 80)),
|
||||
4, 3, TimeValue.timeValueMinutes(30), false);
|
||||
assertSerialization(stats1);
|
||||
assertNotEquals(stats, stats1);
|
||||
|
||||
stats1 = new RemoteConnectionInfo("test_cluster",
|
||||
Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 15)),
|
||||
Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 80)),
|
||||
4, 3, TimeValue.timeValueMinutes(30), false);
|
||||
assertSerialization(stats1);
|
||||
assertNotEquals(stats, stats1);
|
||||
|
||||
stats1 = new RemoteConnectionInfo("test_cluster",
|
||||
Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 1)),
|
||||
Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 87)),
|
||||
4, 3, TimeValue.timeValueMinutes(30), true);
|
||||
assertSerialization(stats1);
|
||||
assertNotEquals(stats, stats1);
|
||||
|
||||
stats1 = new RemoteConnectionInfo("test_cluster",
|
||||
Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 1)),
|
||||
Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 80)),
|
||||
4, 3, TimeValue.timeValueMinutes(325), true);
|
||||
assertSerialization(stats1);
|
||||
assertNotEquals(stats, stats1);
|
||||
|
||||
stats1 = new RemoteConnectionInfo("test_cluster",
|
||||
Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 1)),
|
||||
Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 80)),
|
||||
5, 3, TimeValue.timeValueMinutes(30), false);
|
||||
assertSerialization(stats1);
|
||||
assertNotEquals(stats, stats1);
|
||||
|
@ -850,13 +818,14 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testRemoteConnectionInfoBwComp() throws IOException {
|
||||
final Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_6_5, Version.V_6_0_0);
|
||||
final Version version = VersionUtils.randomVersionBetween(random(),
|
||||
Version.V_6_1_0, VersionUtils.getPreviousVersion(Version.V_7_0_0_alpha1));
|
||||
RemoteConnectionInfo expected = new RemoteConnectionInfo("test_cluster",
|
||||
Collections.singletonList(new TransportAddress(TransportAddress.META_ADDRESS, 1)),
|
||||
Collections.singletonList(new TransportAddress(TransportAddress.META_ADDRESS, 80)),
|
||||
4, 4, new TimeValue(30, TimeUnit.MINUTES), false);
|
||||
|
||||
String encoded = "AQQAAAAABzAuMC4wLjAAAAABAQQAAAAABzAuMC4wLjAAAABQBDwEBAx0ZXN0X2NsdXN0ZXIAAAAAAAAAAAAAAA==";
|
||||
// This version was created using the serialization code in use from 6.1 but before 7.0
|
||||
String encoded = "AQQAAAAABzAuMC4wLjAAAAABAQQAAAAABzAuMC4wLjAAAABQBDwEBAx0ZXN0X2NsdXN0ZXIA";
|
||||
final byte[] data = Base64.getDecoder().decode(encoded);
|
||||
|
||||
try (StreamInput in = StreamInput.wrap(data)) {
|
||||
|
@ -879,55 +848,29 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
public void testRenderConnectionInfoXContent() throws IOException {
|
||||
RemoteConnectionInfo stats = new RemoteConnectionInfo("test_cluster",
|
||||
Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS,1)),
|
||||
Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS,80)),
|
||||
4, 3, TimeValue.timeValueMinutes(30), true);
|
||||
stats = assertSerialization(stats);
|
||||
XContentBuilder builder = XContentFactory.jsonBuilder();
|
||||
builder.startObject();
|
||||
stats.toXContent(builder, null);
|
||||
builder.endObject();
|
||||
assertEquals("{\"test_cluster\":{\"seeds\":[\"0.0.0.0:1\"],\"http_addresses\":[\"0.0.0.0:80\"],\"connected\":true," +
|
||||
assertEquals("{\"test_cluster\":{\"seeds\":[\"0.0.0.0:1\"],\"connected\":true," +
|
||||
"\"num_nodes_connected\":3,\"max_connections_per_cluster\":4,\"initial_connect_timeout\":\"30m\"," +
|
||||
"\"skip_unavailable\":true}}", Strings.toString(builder));
|
||||
|
||||
stats = new RemoteConnectionInfo("some_other_cluster",
|
||||
Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS,1), new TransportAddress(TransportAddress.META_ADDRESS,2)),
|
||||
Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS,80), new TransportAddress(TransportAddress.META_ADDRESS,81)),
|
||||
2, 0, TimeValue.timeValueSeconds(30), false);
|
||||
stats = assertSerialization(stats);
|
||||
builder = XContentFactory.jsonBuilder();
|
||||
builder.startObject();
|
||||
stats.toXContent(builder, null);
|
||||
builder.endObject();
|
||||
assertEquals("{\"some_other_cluster\":{\"seeds\":[\"0.0.0.0:1\",\"0.0.0.0:2\"],\"http_addresses\":[\"0.0.0.0:80\",\"0.0.0.0:81\"],"
|
||||
assertEquals("{\"some_other_cluster\":{\"seeds\":[\"0.0.0.0:1\",\"0.0.0.0:2\"],"
|
||||
+ "\"connected\":false,\"num_nodes_connected\":0,\"max_connections_per_cluster\":2,\"initial_connect_timeout\":\"30s\"," +
|
||||
"\"skip_unavailable\":false}}", Strings.toString(builder));
|
||||
}
|
||||
|
||||
private RemoteConnectionInfo getRemoteConnectionInfo(RemoteClusterConnection connection) throws Exception {
|
||||
AtomicReference<RemoteConnectionInfo> statsRef = new AtomicReference<>();
|
||||
AtomicReference<Exception> exceptionRef = new AtomicReference<>();
|
||||
CountDownLatch latch = new CountDownLatch(1);
|
||||
connection.getConnectionInfo(new ActionListener<RemoteConnectionInfo>() {
|
||||
@Override
|
||||
public void onResponse(RemoteConnectionInfo remoteConnectionInfo) {
|
||||
statsRef.set(remoteConnectionInfo);
|
||||
latch.countDown();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
exceptionRef.set(e);
|
||||
latch.countDown();
|
||||
}
|
||||
});
|
||||
latch.await();
|
||||
if (exceptionRef.get() != null) {
|
||||
throw exceptionRef.get();
|
||||
}
|
||||
return statsRef.get();
|
||||
}
|
||||
|
||||
public void testEnsureConnected() throws IOException, InterruptedException {
|
||||
List<DiscoveryNode> knownNodes = new CopyOnWriteArrayList<>();
|
||||
try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
[[native-realm]]
|
||||
=== Native User Authentication
|
||||
=== Native user authentication
|
||||
|
||||
The easiest way to manage and authenticate users is with the internal `native`
|
||||
realm. You can use the REST APIs or Kibana to add and remove users, assign user roles, and
|
||||
|
@ -7,7 +7,7 @@ manage user passwords.
|
|||
|
||||
[[native-realm-configuration]]
|
||||
[float]
|
||||
==== Configuring a Native Realm
|
||||
==== Configuring a native realm
|
||||
|
||||
The native realm is added to the realm chain by default. You don't need to
|
||||
explicitly configure a native realm to manage users through the REST APIs.
|
||||
|
@ -47,45 +47,12 @@ xpack:
|
|||
. Restart Elasticsearch.
|
||||
|
||||
[[native-settings]]
|
||||
.Native Realm Settings
|
||||
[cols="4,^3,10"]
|
||||
|=======================
|
||||
| Setting | Required | Description
|
||||
|
||||
| `type` | yes | Indicates the realm type. Must be set to `native`.
|
||||
|
||||
| `order` | no | Indicates the priority of this realm within
|
||||
the realm chain. Realms with a lower order
|
||||
are consulted first. Although not required,
|
||||
we recommend explicitly setting this value
|
||||
when you configure multiple realms. Defaults
|
||||
to `Integer.MAX_VALUE`.
|
||||
|
||||
| `enabled` | no | Indicates whether this realm is enabled or
|
||||
disabled. When set to `false`, the realm is
|
||||
not added to the realm chain and therefore
|
||||
is inactive. Defaults to `true`.
|
||||
|
||||
| `cache.ttl` | no | Specifies the time-to-live for cached user
|
||||
entries. A user's credentials are cached for
|
||||
this period of time. Specify the time period
|
||||
using the standard Elasticsearch
|
||||
{ref}/common-options.html#time-units[time units].
|
||||
Defaults to `20m`.
|
||||
|
||||
| `cache.max_users` | no | Specifies the maximum number of user entries
|
||||
that can be cached at any given time. Defaults
|
||||
to 100,000.
|
||||
|
||||
| `cache.hash_algo` | no | Specifies the hashing algorithm that is used
|
||||
for the cached user credentials. See
|
||||
<<cache-hash-algo, Cache hash algorithms>>
|
||||
for the possible values. (Expert Setting)
|
||||
|=======================
|
||||
==== Native realm settings
|
||||
|
||||
See {ref}/security-settings.html#ref-native-settings[Native Realm Settings].
|
||||
|
||||
[[managing-native-users]]
|
||||
==== Managing Native Users
|
||||
==== Managing native users
|
||||
|
||||
{security} enables you to easily manage users in {kib} on the
|
||||
*Management / Security / Users* page.
|
||||
|
|
|
@ -128,10 +128,7 @@ The `certificate_authorities` option may be used as an alternative to the
|
|||
[[pki-settings]]
|
||||
===== PKI Realm Settings
|
||||
|
||||
See
|
||||
{ref}/security-settings.html#_settings_valid_for_all_realms[Security Settings for All Realms]
|
||||
and
|
||||
{ref}/security-settings.html#ref-pki-settings[PKI Realm Settings].
|
||||
See {ref}/security-settings.html#ref-pki-settings[PKI Realm Settings].
|
||||
|
||||
[[assigning-roles-pki]]
|
||||
==== Mapping Roles for PKI Users
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
[role="xpack"]
|
||||
[[security-settings]]
|
||||
=== Security Settings in Elasticsearch
|
||||
=== Security settings in {es}
|
||||
++++
|
||||
<titleabbrev>Security Settings</titleabbrev>
|
||||
<titleabbrev>Security settings</titleabbrev>
|
||||
++++
|
||||
|
||||
By default, {security} is disabled when you have a basic or trial license. To
|
||||
|
@ -23,14 +23,14 @@ For more information about creating and updating the {es} keystore, see
|
|||
|
||||
[float]
|
||||
[[general-security-settings]]
|
||||
==== General Security Settings
|
||||
==== General security settings
|
||||
`xpack.security.enabled`::
|
||||
Set to `true` to enable {security} on the node. +
|
||||
+
|
||||
If set to `false`, which is the default value for basic and trial licenses,
|
||||
{security} is disabled. It also affects all {kib} instances that connect to this
|
||||
{es} instance; you do not need to disable {security} in those `kibana.yml` files.
|
||||
For more information about disabling {security} in specific {kib} instances, see {kibana-ref}/security-settings-kb.html[{kib} Security Settings].
|
||||
For more information about disabling {security} in specific {kib} instances, see {kibana-ref}/security-settings-kb.html[{kib} security settings].
|
||||
|
||||
`xpack.security.hide_settings`::
|
||||
A comma-separated list of settings that are omitted from the results of the
|
||||
|
@ -42,16 +42,16 @@ sensitive nature of the information.
|
|||
|
||||
[float]
|
||||
[[password-security-settings]]
|
||||
==== Default Password Security Settings
|
||||
==== Default password security settings
|
||||
`xpack.security.authc.accept_default_password`::
|
||||
In `elasticsearch.yml`, set this to `false` to disable support for the default "changeme" password.
|
||||
|
||||
[float]
|
||||
[[anonymous-access-settings]]
|
||||
==== Anonymous Access Settings
|
||||
==== Anonymous access settings
|
||||
You can configure the following anonymous access settings in
|
||||
`elasticsearch.yml`. For more information, see {xpack-ref}/anonymous-access.html[
|
||||
Enabling Anonymous Access].
|
||||
Enabling anonymous access].
|
||||
|
||||
`xpack.security.authc.anonymous.username`::
|
||||
The username (principal) of the anonymous user. Defaults to `_es_anonymous_user`.
|
||||
|
@ -69,12 +69,12 @@ access. Defaults to `true`.
|
|||
|
||||
[float]
|
||||
[[field-document-security-settings]]
|
||||
==== Document and Field Level Security Settings
|
||||
==== Document and field level security settings
|
||||
|
||||
You can set the following document and field level security
|
||||
settings in `elasticsearch.yml`. For more information, see
|
||||
{xpack-ref}/field-and-document-access-control.html[Setting Up Document and Field
|
||||
Level Security].
|
||||
{xpack-ref}/field-and-document-access-control.html[Setting up document and field
|
||||
level security].
|
||||
|
||||
`xpack.security.dls_fls.enabled`::
|
||||
Set to `false` to prevent document and field level security
|
||||
|
@ -82,7 +82,7 @@ from being configured. Defaults to `true`.
|
|||
|
||||
[float]
|
||||
[[token-service-settings]]
|
||||
==== Token Service Settings
|
||||
==== Token service settings
|
||||
|
||||
You can set the following token service settings in
|
||||
`elasticsearch.yml`.
|
||||
|
@ -98,7 +98,7 @@ The length of time that a token is valid for. By default this value is `20m` or
|
|||
|
||||
[float]
|
||||
[[realm-settings]]
|
||||
==== Realm Settings
|
||||
==== Realm settings
|
||||
You configure realm settings in the `xpack.security.authc.realms`
|
||||
namespace in `elasticsearch.yml`. For example:
|
||||
|
||||
|
@ -124,10 +124,11 @@ xpack.security.authc.realms:
|
|||
----------------------------------------
|
||||
|
||||
The valid settings vary depending on the realm type. For more
|
||||
information, see {xpack-ref}/setting-up-authentication.html[Setting Up Authentication].
|
||||
information, see {xpack-ref}/setting-up-authentication.html[Setting up authentication].
|
||||
|
||||
[float]
|
||||
===== Settings Valid for All Realms
|
||||
[[ref-realm-settings]]
|
||||
===== Settings valid for all realms
|
||||
|
||||
`type`::
|
||||
The type of the realm: `native, `ldap`, `active_directory`, `pki`, or `file`. Required.
|
||||
|
@ -141,10 +142,31 @@ recommended when you configure multiple realms. Defaults to `Integer.MAX_VALUE`.
|
|||
Indicates whether a realm is enabled. You can use this setting to disable a
|
||||
realm without removing its configuration information. Defaults to `true`.
|
||||
|
||||
[[ref-native-settings]]
|
||||
[float]
|
||||
===== Native realm settings
|
||||
|
||||
For a native realm, the `type` must be set to `native`. In addition to the
|
||||
<<ref-realm-settings,settings that are valid for all realms>>, you can specify
|
||||
the following optional settings:
|
||||
|
||||
`cache.ttl`:: The time-to-live for cached user entries. User credentials are
|
||||
cached for this period of time. Specify the time period using the standard
|
||||
{es} <<time-units,time units>>. Defaults to `20m`.
|
||||
|
||||
`cache.max_users`:: The maximum number of user entries that can live in the
|
||||
cache at any given time. Defaults to 100,000.
|
||||
|
||||
`cache.hash_algo`:: (Expert Setting) The hashing algorithm that is used for the
|
||||
in-memory cached user credentials. For possible values, see
|
||||
{xpack-ref}/controlling-user-cache.html[Cache hash algorithms]. Defaults to
|
||||
`ssha256`.
|
||||
|
||||
|
||||
[[ref-users-settings]]
|
||||
|
||||
[float]
|
||||
===== File Realm Settings
|
||||
===== File realm settings
|
||||
|
||||
`cache.ttl`::
|
||||
The time-to-live for cached user entries--user credentials are cached for
|
||||
|
@ -163,7 +185,7 @@ all possible values. Defaults to `ssha256`.
|
|||
|
||||
[[ref-ldap-settings]]
|
||||
[float]
|
||||
===== LDAP Realm Settings
|
||||
===== LDAP realm settings
|
||||
`url`::
|
||||
An LDAP URL in the format `ldap[s]://<server>:<port>`. Required.
|
||||
|
||||
|
@ -393,7 +415,7 @@ table for all possible values). Defaults to `ssha256`.
|
|||
|
||||
[[ref-ad-settings]]
|
||||
[float]
|
||||
===== Active Directory Realm Settings
|
||||
===== Active Directory realm settings
|
||||
|
||||
`url`::
|
||||
A URL in the format `ldap[s]://<server>:<port>`. Defaults to `ldap://<domain_name>:389`.
|
||||
|
@ -605,7 +627,7 @@ the in-memory cached user credentials (see {xpack-ref}/controlling-user-cache.ht
|
|||
|
||||
[[ref-pki-settings]]
|
||||
[float]
|
||||
===== PKI Realm Settings
|
||||
===== PKI realm settings
|
||||
|
||||
`username_pattern`::
|
||||
The regular expression pattern used to extract the username from the
|
||||
|
@ -651,7 +673,7 @@ Defaults to `100000`.
|
|||
|
||||
[[ref-saml-settings]]
|
||||
[float]
|
||||
===== SAML Realm Settings
|
||||
===== SAML realm settings
|
||||
`idp.entity_id`::
|
||||
The Entity ID of the SAML Identity Provider
|
||||
|
||||
|
@ -915,10 +937,10 @@ cipher suites that should be supported.
|
|||
|
||||
[float]
|
||||
[[ssl-tls-settings]]
|
||||
==== Default TLS/SSL Settings
|
||||
==== Default TLS/SSL settings
|
||||
You can configure the following TLS/SSL settings in
|
||||
`elasticsearch.yml`. For more information, see
|
||||
{xpack-ref}/encrypting-communications.html[Encrypting Communications]. These settings will be used
|
||||
{xpack-ref}/encrypting-communications.html[Encrypting communications]. These settings will be used
|
||||
for all of {xpack} unless they have been overridden by more specific
|
||||
settings such as those for HTTP or Transport.
|
||||
|
||||
|
@ -961,7 +983,7 @@ Jurisdiction Policy Files_ has been installed, the default value also includes `
|
|||
|
||||
[float]
|
||||
[[tls-ssl-key-settings]]
|
||||
===== Default TLS/SSL Key and Trusted Certificate Settings
|
||||
===== Default TLS/SSL key and trusted certificate settings
|
||||
|
||||
The following settings are used to specify a private key, certificate, and the
|
||||
trusted certificates that should be used when communicating over an SSL/TLS connection.
|
||||
|
@ -971,7 +993,7 @@ trusted along with the certificate(s) from the <<tls-ssl-key-settings, key setti
|
|||
for connections that require client authentication or when acting as a SSL enabled server.
|
||||
|
||||
[float]
|
||||
===== PEM Encoded Files
|
||||
===== PEM encoded files
|
||||
|
||||
When using PEM encoded files, use the following settings:
|
||||
|
||||
|
@ -994,7 +1016,7 @@ that will be presented to clients when they connect.
|
|||
List of paths to the PEM encoded certificate files that should be trusted.
|
||||
|
||||
[float]
|
||||
===== Java Keystore Files
|
||||
===== Java keystore files
|
||||
|
||||
When using Java keystore files (JKS), which contain the private key, certificate
|
||||
and certificates that should be trusted, use the following settings:
|
||||
|
@ -1025,7 +1047,7 @@ Password to the truststore.
|
|||
Password to the truststore.
|
||||
|
||||
[float]
|
||||
===== PKCS#12 Files
|
||||
===== PKCS#12 files
|
||||
|
||||
When using PKCS#12 container files (`.p12` or `.pfx`), which contain the
|
||||
private key, certificate, and certificates that should be trusted, use
|
||||
|
@ -1082,7 +1104,7 @@ include::ssl-settings.asciidoc[]
|
|||
|
||||
[[ssl-tls-profile-settings]]
|
||||
[float]
|
||||
===== Transport Profile TLS/SSL Settings
|
||||
===== Transport profile TLS/SSL settings
|
||||
The same settings that are available for the <<transport-tls-ssl-settings, default transport>>
|
||||
are also available for each transport profile. By default, the settings for a
|
||||
transport profile will be the same as the default transport unless they
|
||||
|
@ -1096,7 +1118,7 @@ setting, this would be `transport.profiles.$PROFILE.xpack.security.ssl.key`.
|
|||
|
||||
[float]
|
||||
[[ip-filtering-settings]]
|
||||
==== IP Filtering Settings
|
||||
==== IP filtering settings
|
||||
You can configure the following settings for {xpack-ref}/ip-filtering.html[IP filtering].
|
||||
|
||||
`xpack.security.transport.filter.allow`::
|
||||
|
|
|
@ -187,7 +187,7 @@ public class MonitoringIT extends ESSingleNodeTestCase {
|
|||
* This test waits for the monitoring service to collect monitoring documents and then checks that all expected documents
|
||||
* have been indexed with the expected information.
|
||||
*/
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/x-pack-elasticsearch/issues/4150")
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29880")
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testMonitoringService() throws Exception {
|
||||
final boolean createAPMIndex = randomBoolean();
|
||||
|
|
|
@ -49,7 +49,7 @@ public class FieldHitExtractorTests extends AbstractWireSerializingTestCase<Fiel
|
|||
return new FieldHitExtractor(instance.fieldName() + "mutated", true, instance.hitName());
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/x-pack-elasticsearch/issues/3082")
|
||||
@AwaitsFix(bugUrl = "implement after we're sure of the InnerHitExtractor's implementation")
|
||||
public void testGetNested() throws IOException {
|
||||
fail("implement after we're sure of the InnerHitExtractor's implementation");
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
setup:
|
||||
- skip:
|
||||
version: "all"
|
||||
reason: "AwaitsFix'ing, see x-pack-elasticsearch #4197"
|
||||
reason: "AwaitsFix'ing, see https://github.com/elastic/elasticsearch/issues/29890"
|
||||
- do:
|
||||
xpack.license.post:
|
||||
body: >
|
||||
|
|
|
@ -145,7 +145,7 @@ public class BootStrapTests extends AbstractWatcherIntegrationTestCase {
|
|||
assertThat(response.getWatchesCount(), equalTo(1L));
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/x-pack-elasticsearch/issues/1915")
|
||||
@AwaitsFix(bugUrl = "Supposedly fixed; https://github.com/elastic/x-pack-elasticsearch/issues/1915")
|
||||
public void testLoadExistingWatchesUponStartup() throws Exception {
|
||||
stopWatcher();
|
||||
|
||||
|
@ -226,7 +226,7 @@ public class BootStrapTests extends AbstractWatcherIntegrationTestCase {
|
|||
assertSingleExecutionAndCompleteWatchHistory(numWatches, numRecords);
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/x-pack-elasticsearch/issues/3437")
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29846")
|
||||
public void testTriggeredWatchLoading() throws Exception {
|
||||
createIndex("output");
|
||||
client().prepareIndex("my-index", "foo", "bar")
|
||||
|
|
|
@ -41,8 +41,6 @@ teardown:
|
|||
- match: { my_remote_cluster.num_nodes_connected: 1}
|
||||
- match: { my_remote_cluster.max_connections_per_cluster: 1}
|
||||
- match: { my_remote_cluster.initial_connect_timeout: "30s" }
|
||||
- is_true: my_remote_cluster.http_addresses.0
|
||||
|
||||
|
||||
---
|
||||
"Add transient remote cluster based on the preset cluster and check remote info":
|
||||
|
@ -70,9 +68,6 @@ teardown:
|
|||
- do:
|
||||
headers: { Authorization: "Basic am9lOnMza3JpdA==" }
|
||||
cluster.remote_info: {}
|
||||
- set: { my_remote_cluster.http_addresses.0: remote_http }
|
||||
- match: { test_remote_cluster.http_addresses.0: $remote_http }
|
||||
|
||||
- match: { test_remote_cluster.connected: true }
|
||||
- match: { my_remote_cluster.connected: true }
|
||||
|
||||
|
@ -87,4 +82,3 @@ teardown:
|
|||
|
||||
- match: { my_remote_cluster.initial_connect_timeout: "30s" }
|
||||
- match: { test_remote_cluster.initial_connect_timeout: "30s" }
|
||||
|
||||
|
|
|
@ -161,7 +161,7 @@ public class OpenLdapTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/x-plugins/issues/2849")
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29758")
|
||||
public void testTcpTimeout() throws Exception {
|
||||
String groupSearchBase = "ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com";
|
||||
String userTemplate = "uid={0},ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com";
|
||||
|
|
|
@ -31,7 +31,7 @@ import static org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule.
|
|||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
@TestLogging("org.elasticsearch.client:TRACE,tracer:TRACE")
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/x-pack-elasticsearch/issues/2920")
|
||||
@AwaitsFix(bugUrl = "flaky tests")
|
||||
public class MonitoringWithWatcherRestIT extends ESRestTestCase {
|
||||
|
||||
@After
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
import org.elasticsearch.gradle.LoggedExec
|
||||
import org.elasticsearch.gradle.MavenFilteringHack
|
||||
import org.elasticsearch.gradle.plugin.MetaPluginBuildPlugin
|
||||
import org.elasticsearch.gradle.plugin.PluginBuildPlugin
|
||||
import org.elasticsearch.gradle.test.NodeInfo
|
||||
|
||||
import javax.net.ssl.HttpsURLConnection
|
||||
|
@ -160,9 +162,9 @@ integTestCluster.dependsOn(importClientCertificateInNodeKeyStore, importNodeCert
|
|||
|
||||
|
||||
ext.pluginsCount = 0
|
||||
project.rootProject.subprojects.findAll { it.path.startsWith(':plugins:') }.each { subproj ->
|
||||
project(':plugins').getChildProjects().each { pluginName, pluginProject ->
|
||||
// need to get a non-decorated project object, so must re-lookup the project by path
|
||||
integTestCluster.plugin(subproj.path)
|
||||
integTestCluster.plugin(pluginProject.path)
|
||||
pluginsCount += 1
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
import org.elasticsearch.gradle.MavenFilteringHack
|
||||
import org.elasticsearch.gradle.plugin.MetaPluginBuildPlugin
|
||||
import org.elasticsearch.gradle.plugin.PluginBuildPlugin
|
||||
|
||||
apply plugin: 'elasticsearch.standalone-rest-test'
|
||||
apply plugin: 'elasticsearch.rest-test'
|
||||
|
@ -8,9 +10,9 @@ dependencies {
|
|||
}
|
||||
|
||||
ext.pluginsCount = 0
|
||||
project.rootProject.subprojects.findAll { it.path.startsWith(':plugins:') }.each { subproj ->
|
||||
project(':plugins').getChildProjects().each { pluginName, pluginProject ->
|
||||
// need to get a non-decorated project object, so must re-lookup the project by path
|
||||
integTestCluster.plugin(subproj.path)
|
||||
integTestCluster.plugin(pluginProject.path)
|
||||
pluginsCount += 1
|
||||
}
|
||||
|
||||
|
|
|
@ -121,7 +121,7 @@ public abstract class RestSqlTestCase extends ESRestTestCase implements ErrorsTe
|
|||
ContentType.APPLICATION_JSON)));
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/x-pack-elasticsearch/issues/2074")
|
||||
@AwaitsFix(bugUrl = "Unclear status, https://github.com/elastic/x-pack-elasticsearch/issues/2074")
|
||||
public void testTimeZone() throws IOException {
|
||||
String mode = randomMode();
|
||||
index("{\"test\":\"2017-07-27 00:00:00\"}",
|
||||
|
|
|
@ -304,7 +304,7 @@ public class ActiveDirectorySessionFactoryTests extends AbstractActiveDirectoryT
|
|||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/x-pack-elasticsearch/issues/3369")
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29840")
|
||||
public void testHandlingLdapReferralErrors() throws Exception {
|
||||
String groupSearchBase = "DC=ad,DC=test,DC=elasticsearch,DC=com";
|
||||
String userTemplate = "CN={0},CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com";
|
||||
|
|
Loading…
Reference in New Issue