Merge branch 'master' into index-lifecycle
This commit is contained in:
commit
bfd6e60a75
|
@ -379,7 +379,7 @@ You can choose which boxes to test by setting the `-Pvagrant.boxes` project prop
|
|||
the valid options for this property are:
|
||||
|
||||
* `sample` - The default, only chooses ubuntu-1404 and centos-7
|
||||
* List of box names, comma separated (e.g. `oel-7,fedora-26`) - Chooses exactly the boxes listed.
|
||||
* List of box names, comma separated (e.g. `oel-7,fedora-28`) - Chooses exactly the boxes listed.
|
||||
* `linux-all` - All linux boxes.
|
||||
* `windows-all` - All Windows boxes. If there are any Windows boxes which do not
|
||||
have images available when this value is provided, the build will fail.
|
||||
|
@ -406,8 +406,8 @@ These are the linux flavors supported, all of which we provide images for
|
|||
* debian-9 aka stretch, the current debian stable distribution
|
||||
* centos-6
|
||||
* centos-7
|
||||
* fedora-26
|
||||
* fedora-27
|
||||
* fedora-28
|
||||
* oel-6 aka Oracle Enterprise Linux 6
|
||||
* oel-7 aka Oracle Enterprise Linux 7
|
||||
* sles-12
|
||||
|
|
|
@ -97,18 +97,18 @@ Vagrant.configure(2) do |config|
|
|||
rpm_common config, box
|
||||
end
|
||||
end
|
||||
'fedora-26'.tap do |box|
|
||||
config.vm.define box, define_opts do |config|
|
||||
config.vm.box = 'elastic/fedora-26-x86_64'
|
||||
dnf_common config, box
|
||||
end
|
||||
end
|
||||
'fedora-27'.tap do |box|
|
||||
config.vm.define box, define_opts do |config|
|
||||
config.vm.box = 'elastic/fedora-27-x86_64'
|
||||
dnf_common config, box
|
||||
end
|
||||
end
|
||||
'fedora-28'.tap do |box|
|
||||
config.vm.define box, define_opts do |config|
|
||||
config.vm.box = 'elastic/fedora-28-x86_64'
|
||||
dnf_common config, box
|
||||
end
|
||||
end
|
||||
'opensuse-42'.tap do |box|
|
||||
config.vm.define box, define_opts do |config|
|
||||
config.vm.box = 'elastic/opensuse-42-x86_64'
|
||||
|
|
10
build.gradle
10
build.gradle
|
@ -36,6 +36,16 @@ import java.nio.file.Files
|
|||
import java.nio.file.Path
|
||||
import java.security.MessageDigest
|
||||
|
||||
plugins {
|
||||
id 'com.gradle.build-scan' version '1.13.2'
|
||||
}
|
||||
if (properties.get("org.elasticsearch.acceptScanTOS", "false") == "true") {
|
||||
buildScan {
|
||||
termsOfServiceUrl = 'https://gradle.com/terms-of-service'
|
||||
termsOfServiceAgree = 'yes'
|
||||
}
|
||||
}
|
||||
|
||||
// common maven publishing configuration
|
||||
subprojects {
|
||||
group = 'org.elasticsearch'
|
||||
|
|
|
@ -23,8 +23,8 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||
'centos-7',
|
||||
'debian-8',
|
||||
'debian-9',
|
||||
'fedora-26',
|
||||
'fedora-27',
|
||||
'fedora-28',
|
||||
'oel-6',
|
||||
'oel-7',
|
||||
'opensuse-42',
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.http.entity.ContentType;
|
|||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.action.DocWriteRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
|
||||
|
@ -711,6 +712,16 @@ final class RequestConverters {
|
|||
return request;
|
||||
}
|
||||
|
||||
static Request deleteRepository(DeleteRepositoryRequest deleteRepositoryRequest) {
|
||||
String endpoint = new EndpointBuilder().addPathPartAsIs("_snapshot").addPathPart(deleteRepositoryRequest.name()).build();
|
||||
Request request = new Request(HttpDelete.METHOD_NAME, endpoint);
|
||||
|
||||
Params parameters = new Params(request);
|
||||
parameters.withMasterTimeout(deleteRepositoryRequest.masterNodeTimeout());
|
||||
parameters.withTimeout(deleteRepositoryRequest.timeout());
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request putTemplate(PutIndexTemplateRequest putIndexTemplateRequest) throws IOException {
|
||||
String endpoint = new EndpointBuilder().addPathPartAsIs("_template").addPathPart(putIndexTemplateRequest.name()).build();
|
||||
Request request = new Request(HttpPut.METHOD_NAME, endpoint);
|
||||
|
|
|
@ -21,6 +21,8 @@ package org.elasticsearch.client;
|
|||
|
||||
import org.apache.http.Header;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryResponse;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
|
||||
|
@ -90,4 +92,28 @@ public final class SnapshotClient {
|
|||
restHighLevelClient.performRequestAsyncAndParseEntity(putRepositoryRequest, RequestConverters::createRepository,
|
||||
PutRepositoryResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes a snapshot repository.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html"> Snapshot and Restore
|
||||
* API on elastic.co</a>
|
||||
*/
|
||||
public DeleteRepositoryResponse deleteRepository(DeleteRepositoryRequest deleteRepositoryRequest, Header... headers)
|
||||
throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(deleteRepositoryRequest, RequestConverters::deleteRepository,
|
||||
DeleteRepositoryResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously deletes a snapshot repository.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html"> Snapshot and Restore
|
||||
* API on elastic.co</a>
|
||||
*/
|
||||
public void deleteRepositoryAsync(DeleteRepositoryRequest deleteRepositoryRequest,
|
||||
ActionListener<DeleteRepositoryResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(deleteRepositoryRequest, RequestConverters::deleteRepository,
|
||||
DeleteRepositoryResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.http.util.EntityUtils;
|
|||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.DocWriteRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
|
||||
|
@ -1546,7 +1547,7 @@ public class RequestConvertersTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testCreateRepository() throws IOException {
|
||||
String repository = "repo";
|
||||
String repository = randomIndicesNames(1, 1)[0];
|
||||
String endpoint = "/_snapshot/" + repository;
|
||||
Path repositoryLocation = PathUtils.get(".");
|
||||
PutRepositoryRequest putRepositoryRequest = new PutRepositoryRequest(repository);
|
||||
|
@ -1555,10 +1556,10 @@ public class RequestConvertersTests extends ESTestCase {
|
|||
|
||||
putRepositoryRequest.settings(
|
||||
Settings.builder()
|
||||
.put(FsRepository.LOCATION_SETTING.getKey(), repositoryLocation)
|
||||
.put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean())
|
||||
.put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES)
|
||||
.build());
|
||||
.put(FsRepository.LOCATION_SETTING.getKey(), repositoryLocation)
|
||||
.put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean())
|
||||
.put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES)
|
||||
.build());
|
||||
|
||||
Request request = RequestConverters.createRepository(putRepositoryRequest);
|
||||
assertThat(endpoint, equalTo(request.getEndpoint()));
|
||||
|
@ -1566,6 +1567,24 @@ public class RequestConvertersTests extends ESTestCase {
|
|||
assertToXContentBody(putRepositoryRequest, request.getEntity());
|
||||
}
|
||||
|
||||
public void testDeleteRepository() {
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
String repository = randomIndicesNames(1, 1)[0];
|
||||
|
||||
StringBuilder endpoint = new StringBuilder("/_snapshot/" + repository);
|
||||
|
||||
DeleteRepositoryRequest deleteRepositoryRequest = new DeleteRepositoryRequest();
|
||||
deleteRepositoryRequest.name(repository);
|
||||
setRandomMasterTimeout(deleteRepositoryRequest, expectedParams);
|
||||
setRandomTimeout(deleteRepositoryRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams);
|
||||
|
||||
Request request = RequestConverters.deleteRepository(deleteRepositoryRequest);
|
||||
assertThat(endpoint.toString(), equalTo(request.getEndpoint()));
|
||||
assertThat(HttpDelete.METHOD_NAME, equalTo(request.getMethod()));
|
||||
assertThat(expectedParams, equalTo(request.getParameters()));
|
||||
assertNull(request.getEntity());
|
||||
}
|
||||
|
||||
public void testPutTemplateRequest() throws Exception {
|
||||
Map<String, String> names = new HashMap<>();
|
||||
names.put("log", "log");
|
||||
|
|
|
@ -19,7 +19,11 @@
|
|||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryResponse;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
|
||||
|
@ -29,6 +33,7 @@ import org.elasticsearch.repositories.fs.FsRepository;
|
|||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
|
@ -40,7 +45,6 @@ public class SnapshotIT extends ESRestHighLevelClientTestCase {
|
|||
request.type(type);
|
||||
return execute(request, highLevelClient().snapshot()::createRepository,
|
||||
highLevelClient().snapshot()::createRepositoryAsync);
|
||||
|
||||
}
|
||||
|
||||
public void testCreateRepository() throws IOException {
|
||||
|
@ -48,7 +52,7 @@ public class SnapshotIT extends ESRestHighLevelClientTestCase {
|
|||
assertTrue(response.isAcknowledged());
|
||||
}
|
||||
|
||||
public void testModulesGetRepositoriesUsingParams() throws IOException {
|
||||
public void testSnapshotGetRepositoriesUsingParams() throws IOException {
|
||||
String testRepository = "test";
|
||||
assertTrue(createTestRepository(testRepository, FsRepository.TYPE, "{\"location\": \".\"}").isAcknowledged());
|
||||
assertTrue(createTestRepository("other", FsRepository.TYPE, "{\"location\": \".\"}").isAcknowledged());
|
||||
|
@ -60,7 +64,7 @@ public class SnapshotIT extends ESRestHighLevelClientTestCase {
|
|||
assertThat(1, equalTo(response.repositories().size()));
|
||||
}
|
||||
|
||||
public void testModulesGetDefaultRepositories() throws IOException {
|
||||
public void testSnapshotGetDefaultRepositories() throws IOException {
|
||||
assertTrue(createTestRepository("other", FsRepository.TYPE, "{\"location\": \".\"}").isAcknowledged());
|
||||
assertTrue(createTestRepository("test", FsRepository.TYPE, "{\"location\": \".\"}").isAcknowledged());
|
||||
|
||||
|
@ -69,7 +73,7 @@ public class SnapshotIT extends ESRestHighLevelClientTestCase {
|
|||
assertThat(2, equalTo(response.repositories().size()));
|
||||
}
|
||||
|
||||
public void testModulesGetRepositoriesNonExistent() throws IOException {
|
||||
public void testSnapshotGetRepositoriesNonExistent() {
|
||||
String repository = "doesnotexist";
|
||||
GetRepositoriesRequest request = new GetRepositoriesRequest(new String[]{repository});
|
||||
ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> execute(request,
|
||||
|
@ -79,4 +83,23 @@ public class SnapshotIT extends ESRestHighLevelClientTestCase {
|
|||
assertThat(exception.getMessage(), equalTo(
|
||||
"Elasticsearch exception [type=repository_missing_exception, reason=[" + repository + "] missing]"));
|
||||
}
|
||||
|
||||
public void testSnapshotDeleteRepository() throws IOException {
|
||||
String repository = "test";
|
||||
String repositorySettings = "{\"type\":\"fs\", \"settings\":{\"location\": \".\"}}";
|
||||
|
||||
highLevelClient().getLowLevelClient().performRequest("put", "_snapshot/" + repository,
|
||||
Collections.emptyMap(), new StringEntity(repositorySettings, ContentType.APPLICATION_JSON));
|
||||
|
||||
GetRepositoriesRequest request = new GetRepositoriesRequest();
|
||||
GetRepositoriesResponse response = execute(request, highLevelClient().snapshot()::getRepositories,
|
||||
highLevelClient().snapshot()::getRepositoriesAsync);
|
||||
assertThat(1, equalTo(response.repositories().size()));
|
||||
|
||||
DeleteRepositoryRequest deleteRequest = new DeleteRepositoryRequest(repository);
|
||||
DeleteRepositoryResponse deleteResponse = execute(deleteRequest, highLevelClient().snapshot()::deleteRepository,
|
||||
highLevelClient().snapshot()::deleteRepositoryAsync);
|
||||
|
||||
assertTrue(deleteResponse.isAcknowledged());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,6 +21,8 @@ package org.elasticsearch.client.documentation;
|
|||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.LatchedActionListener;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryResponse;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
|
||||
|
@ -235,6 +237,66 @@ public class SnapshotClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
}
|
||||
}
|
||||
|
||||
public void testSnapshotDeleteRepository() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
createTestRepositories();
|
||||
|
||||
// tag::delete-repository-request
|
||||
DeleteRepositoryRequest request = new DeleteRepositoryRequest(repositoryName);
|
||||
// end::delete-repository-request
|
||||
|
||||
// tag::delete-repository-request-masterTimeout
|
||||
request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
request.masterNodeTimeout("1m"); // <2>
|
||||
// end::delete-repository-request-masterTimeout
|
||||
// tag::delete-repository-request-timeout
|
||||
request.timeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
request.timeout("1m"); // <2>
|
||||
// end::delete-repository-request-timeout
|
||||
|
||||
// tag::delete-repository-execute
|
||||
DeleteRepositoryResponse response = client.snapshot().deleteRepository(request);
|
||||
// end::delete-repository-execute
|
||||
|
||||
// tag::delete-repository-response
|
||||
boolean acknowledged = response.isAcknowledged(); // <1>
|
||||
// end::delete-repository-response
|
||||
assertTrue(acknowledged);
|
||||
}
|
||||
|
||||
public void testSnapshotDeleteRepositoryAsync() throws InterruptedException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
DeleteRepositoryRequest request = new DeleteRepositoryRequest();
|
||||
|
||||
// tag::delete-repository-execute-listener
|
||||
ActionListener<DeleteRepositoryResponse> listener =
|
||||
new ActionListener<DeleteRepositoryResponse>() {
|
||||
@Override
|
||||
public void onResponse(DeleteRepositoryResponse deleteRepositoryResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::delete-repository-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::delete-repository-execute-async
|
||||
client.snapshot().deleteRepositoryAsync(request, listener); // <1>
|
||||
// end::delete-repository-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
private void createTestRepositories() throws IOException {
|
||||
PutRepositoryRequest request = new PutRepositoryRequest(repositoryName);
|
||||
request.type(FsRepository.TYPE);
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -e -o pipefail
|
||||
|
||||
source "`dirname "$0"`"/elasticsearch-env
|
||||
|
||||
IFS=';' read -r -a additional_sources <<< "$ES_ADDITIONAL_SOURCES"
|
||||
for additional_source in "${additional_sources[@]}"
|
||||
do
|
||||
source "`dirname "$0"`"/$additional_source
|
||||
done
|
||||
|
||||
exec \
|
||||
"$JAVA" \
|
||||
$ES_JAVA_OPTS \
|
||||
-Des.path.home="$ES_HOME" \
|
||||
-Des.path.conf="$ES_PATH_CONF" \
|
||||
-Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \
|
||||
-Des.distribution.type="$ES_DISTRIBUTION_TYPE" \
|
||||
-cp "$ES_CLASSPATH" \
|
||||
$1 \
|
||||
"${@:2}"
|
|
@ -1,14 +1,5 @@
|
|||
#!/bin/bash
|
||||
|
||||
source "`dirname "$0"`"/elasticsearch-env
|
||||
|
||||
exec \
|
||||
"$JAVA" \
|
||||
$ES_JAVA_OPTS \
|
||||
-Des.path.home="$ES_HOME" \
|
||||
-Des.path.conf="$ES_PATH_CONF" \
|
||||
-Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \
|
||||
-Des.distribution.type="$ES_DISTRIBUTION_TYPE" \
|
||||
-cp "$ES_CLASSPATH" \
|
||||
"`dirname "$0"`"/elasticsearch-cli \
|
||||
org.elasticsearch.common.settings.KeyStoreCli \
|
||||
"$@"
|
||||
|
|
|
@ -1,14 +1,5 @@
|
|||
#!/bin/bash
|
||||
|
||||
source "`dirname "$0"`"/elasticsearch-env
|
||||
|
||||
exec \
|
||||
"$JAVA" \
|
||||
$ES_JAVA_OPTS \
|
||||
-Des.path.home="$ES_HOME" \
|
||||
-Des.path.conf="$ES_PATH_CONF" \
|
||||
-Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \
|
||||
-Des.distribution.type="$ES_DISTRIBUTION_TYPE" \
|
||||
-cp "$ES_CLASSPATH" \
|
||||
"`dirname "$0"`"/elasticsearch-cli \
|
||||
org.elasticsearch.plugins.PluginCli \
|
||||
"$@"
|
||||
|
|
|
@ -1,14 +1,5 @@
|
|||
#!/bin/bash
|
||||
|
||||
source "`dirname "$0"`"/elasticsearch-env
|
||||
|
||||
exec \
|
||||
"$JAVA" \
|
||||
$ES_JAVA_OPTS \
|
||||
-Des.path.home="$ES_HOME" \
|
||||
-Des.path.conf="$ES_PATH_CONF" \
|
||||
-Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \
|
||||
-Des.distribution.type="$ES_DISTRIBUTION_TYPE" \
|
||||
-cp "$ES_CLASSPATH" \
|
||||
"`dirname "$0"`"/elasticsearch-cli \
|
||||
org.elasticsearch.index.translog.TranslogToolCli \
|
||||
"$@"
|
||||
|
|
|
@ -30,7 +30,6 @@ import org.elasticsearch.cli.EnvironmentAwareCommand;
|
|||
import org.elasticsearch.cli.ExitCodes;
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
import org.elasticsearch.cli.UserException;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.hash.MessageDigests;
|
||||
|
@ -240,7 +239,7 @@ class InstallPluginCommand extends EnvironmentAwareCommand {
|
|||
/** Downloads the plugin and returns the file it was downloaded to. */
|
||||
private Path download(Terminal terminal, String pluginId, Path tmpDir) throws Exception {
|
||||
if (OFFICIAL_PLUGINS.contains(pluginId)) {
|
||||
final String url = getElasticUrl(terminal, getStagingHash(), Version.CURRENT, pluginId, Platforms.PLATFORM_NAME);
|
||||
final String url = getElasticUrl(terminal, getStagingHash(), Version.CURRENT, isSnapshot(), pluginId, Platforms.PLATFORM_NAME);
|
||||
terminal.println("-> Downloading " + pluginId + " from elastic");
|
||||
return downloadZipAndChecksum(terminal, url, tmpDir, false);
|
||||
}
|
||||
|
@ -272,22 +271,43 @@ class InstallPluginCommand extends EnvironmentAwareCommand {
|
|||
return System.getProperty(PROPERTY_STAGING_ID);
|
||||
}
|
||||
|
||||
boolean isSnapshot() {
|
||||
return Build.CURRENT.isSnapshot();
|
||||
}
|
||||
|
||||
/** Returns the url for an official elasticsearch plugin. */
|
||||
private String getElasticUrl(Terminal terminal, String stagingHash, Version version,
|
||||
String pluginId, String platform) throws IOException {
|
||||
private String getElasticUrl(
|
||||
final Terminal terminal,
|
||||
final String stagingHash,
|
||||
final Version version,
|
||||
final boolean isSnapshot,
|
||||
final String pluginId,
|
||||
final String platform) throws IOException, UserException {
|
||||
final String baseUrl;
|
||||
if (stagingHash != null) {
|
||||
baseUrl = String.format(Locale.ROOT,
|
||||
"https://staging.elastic.co/%s-%s/downloads/elasticsearch-plugins/%s", version, stagingHash, pluginId);
|
||||
} else {
|
||||
baseUrl = String.format(Locale.ROOT,
|
||||
"https://artifacts.elastic.co/downloads/elasticsearch-plugins/%s", pluginId);
|
||||
if (isSnapshot && stagingHash == null) {
|
||||
throw new UserException(
|
||||
ExitCodes.CONFIG, "attempted to install release build of official plugin on snapshot build of Elasticsearch");
|
||||
}
|
||||
final String platformUrl = String.format(Locale.ROOT, "%s/%s-%s-%s.zip", baseUrl, pluginId, platform, version);
|
||||
if (stagingHash != null) {
|
||||
if (isSnapshot) {
|
||||
baseUrl = nonReleaseUrl("snapshots", version, stagingHash, pluginId);
|
||||
} else {
|
||||
baseUrl = nonReleaseUrl("staging", version, stagingHash, pluginId);
|
||||
}
|
||||
} else {
|
||||
baseUrl = String.format(Locale.ROOT, "https://artifacts.elastic.co/downloads/elasticsearch-plugins/%s", pluginId);
|
||||
}
|
||||
final String platformUrl =
|
||||
String.format(Locale.ROOT, "%s/%s-%s-%s.zip", baseUrl, pluginId, platform, Version.displayVersion(version, isSnapshot));
|
||||
if (urlExists(terminal, platformUrl)) {
|
||||
return platformUrl;
|
||||
}
|
||||
return String.format(Locale.ROOT, "%s/%s-%s.zip", baseUrl, pluginId, version);
|
||||
return String.format(Locale.ROOT, "%s/%s-%s.zip", baseUrl, pluginId, Version.displayVersion(version, isSnapshot));
|
||||
}
|
||||
|
||||
private String nonReleaseUrl(final String hostname, final Version version, final String stagingHash, final String pluginId) {
|
||||
return String.format(
|
||||
Locale.ROOT, "https://%s.elastic.co/%s-%s/downloads/elasticsearch-plugins/%s", hostname, version, stagingHash, pluginId);
|
||||
}
|
||||
|
||||
/** Returns the url for an elasticsearch plugin in maven. */
|
||||
|
|
|
@ -800,7 +800,7 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
skipJarHellCommand.execute(terminal, pluginZip, isBatch, env.v2());
|
||||
}
|
||||
|
||||
void assertInstallPluginFromUrl(String pluginId, String name, String url, String stagingHash,
|
||||
void assertInstallPluginFromUrl(String pluginId, String name, String url, String stagingHash, boolean isSnapshot,
|
||||
String shaExtension, Function<byte[], String> shaCalculator) throws Exception {
|
||||
Tuple<Path, Environment> env = createEnv(fs, temp);
|
||||
Path pluginDir = createPluginDir(temp);
|
||||
|
@ -834,6 +834,12 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
String getStagingHash() {
|
||||
return stagingHash;
|
||||
}
|
||||
|
||||
@Override
|
||||
boolean isSnapshot() {
|
||||
return isSnapshot;
|
||||
}
|
||||
|
||||
@Override
|
||||
void jarHellCheck(PluginInfo candidateInfo, Path candidate, Path pluginsDir, Path modulesDir) throws Exception {
|
||||
// no jarhell check
|
||||
|
@ -843,48 +849,82 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
assertPlugin(name, pluginDir, env.v2());
|
||||
}
|
||||
|
||||
public void assertInstallPluginFromUrl(String pluginId, String name, String url, String stagingHash) throws Exception {
|
||||
public void assertInstallPluginFromUrl(
|
||||
final String pluginId, final String name, final String url, final String stagingHash, boolean isSnapshot) throws Exception {
|
||||
MessageDigest digest = MessageDigest.getInstance("SHA-512");
|
||||
assertInstallPluginFromUrl(pluginId, name, url, stagingHash, ".sha512", checksumAndFilename(digest, url));
|
||||
assertInstallPluginFromUrl(pluginId, name, url, stagingHash, isSnapshot, ".sha512", checksumAndFilename(digest, url));
|
||||
}
|
||||
|
||||
public void testOfficalPlugin() throws Exception {
|
||||
String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" + Version.CURRENT + ".zip";
|
||||
assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, null);
|
||||
assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, null, false);
|
||||
}
|
||||
|
||||
public void testOfficialPluginSnapshot() throws Exception {
|
||||
String url = String.format(
|
||||
Locale.ROOT,
|
||||
"https://snapshots.elastic.co/%s-abc123/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-%s.zip",
|
||||
Version.CURRENT,
|
||||
Version.displayVersion(Version.CURRENT, true));
|
||||
assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, "abc123", true);
|
||||
}
|
||||
|
||||
public void testInstallReleaseBuildOfPluginOnSnapshotBuild() {
|
||||
String url = String.format(
|
||||
Locale.ROOT,
|
||||
"https://snapshots.elastic.co/%s-abc123/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-%s.zip",
|
||||
Version.CURRENT,
|
||||
Version.displayVersion(Version.CURRENT, true));
|
||||
// attemping to install a release build of a plugin (no staging ID) on a snapshot build should throw a user exception
|
||||
final UserException e =
|
||||
expectThrows(UserException.class, () -> assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, null, true));
|
||||
assertThat(e.exitCode, equalTo(ExitCodes.CONFIG));
|
||||
assertThat(
|
||||
e, hasToString(containsString("attempted to install release build of official plugin on snapshot build of Elasticsearch")));
|
||||
}
|
||||
|
||||
public void testOfficalPluginStaging() throws Exception {
|
||||
String url = "https://staging.elastic.co/" + Version.CURRENT + "-abc123/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-"
|
||||
+ Version.CURRENT + ".zip";
|
||||
assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, "abc123");
|
||||
assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, "abc123", false);
|
||||
}
|
||||
|
||||
public void testOfficalPlatformPlugin() throws Exception {
|
||||
String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" + Platforms.PLATFORM_NAME +
|
||||
"-" + Version.CURRENT + ".zip";
|
||||
assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, null);
|
||||
assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, null, false);
|
||||
}
|
||||
|
||||
public void testOfficialPlatformPluginSnapshot() throws Exception {
|
||||
String url = String.format(
|
||||
Locale.ROOT,
|
||||
"https://snapshots.elastic.co/%s-abc123/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-%s-%s.zip",
|
||||
Version.CURRENT,
|
||||
Platforms.PLATFORM_NAME,
|
||||
Version.displayVersion(Version.CURRENT, true));
|
||||
assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, "abc123", true);
|
||||
}
|
||||
|
||||
public void testOfficalPlatformPluginStaging() throws Exception {
|
||||
String url = "https://staging.elastic.co/" + Version.CURRENT + "-abc123/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-"
|
||||
+ Platforms.PLATFORM_NAME + "-"+ Version.CURRENT + ".zip";
|
||||
assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, "abc123");
|
||||
assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, "abc123", false);
|
||||
}
|
||||
|
||||
public void testMavenPlugin() throws Exception {
|
||||
String url = "https://repo1.maven.org/maven2/mygroup/myplugin/1.0.0/myplugin-1.0.0.zip";
|
||||
assertInstallPluginFromUrl("mygroup:myplugin:1.0.0", "myplugin", url, null);
|
||||
assertInstallPluginFromUrl("mygroup:myplugin:1.0.0", "myplugin", url, null, false);
|
||||
}
|
||||
|
||||
public void testMavenPlatformPlugin() throws Exception {
|
||||
String url = "https://repo1.maven.org/maven2/mygroup/myplugin/1.0.0/myplugin-" + Platforms.PLATFORM_NAME + "-1.0.0.zip";
|
||||
assertInstallPluginFromUrl("mygroup:myplugin:1.0.0", "myplugin", url, null);
|
||||
assertInstallPluginFromUrl("mygroup:myplugin:1.0.0", "myplugin", url, null, false);
|
||||
}
|
||||
|
||||
public void testMavenSha1Backcompat() throws Exception {
|
||||
String url = "https://repo1.maven.org/maven2/mygroup/myplugin/1.0.0/myplugin-1.0.0.zip";
|
||||
MessageDigest digest = MessageDigest.getInstance("SHA-1");
|
||||
assertInstallPluginFromUrl("mygroup:myplugin:1.0.0", "myplugin", url, null, ".sha1", checksum(digest));
|
||||
assertInstallPluginFromUrl("mygroup:myplugin:1.0.0", "myplugin", url, null, false, ".sha1", checksum(digest));
|
||||
assertTrue(terminal.getOutput(), terminal.getOutput().contains("sha512 not found, falling back to sha1"));
|
||||
}
|
||||
|
||||
|
@ -892,7 +932,7 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" + Version.CURRENT + ".zip";
|
||||
MessageDigest digest = MessageDigest.getInstance("SHA-1");
|
||||
UserException e = expectThrows(UserException.class, () ->
|
||||
assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, null, ".sha1", checksum(digest)));
|
||||
assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, null, false, ".sha1", checksum(digest)));
|
||||
assertEquals(ExitCodes.IO_ERROR, e.exitCode);
|
||||
assertEquals("Plugin checksum missing: " + url + ".sha512", e.getMessage());
|
||||
}
|
||||
|
@ -900,7 +940,7 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
public void testMavenShaMissing() throws Exception {
|
||||
String url = "https://repo1.maven.org/maven2/mygroup/myplugin/1.0.0/myplugin-1.0.0.zip";
|
||||
UserException e = expectThrows(UserException.class, () ->
|
||||
assertInstallPluginFromUrl("mygroup:myplugin:1.0.0", "myplugin", url, null, ".dne", bytes -> null));
|
||||
assertInstallPluginFromUrl("mygroup:myplugin:1.0.0", "myplugin", url, null, false, ".dne", bytes -> null));
|
||||
assertEquals(ExitCodes.IO_ERROR, e.exitCode);
|
||||
assertEquals("Plugin checksum missing: " + url + ".sha1", e.getMessage());
|
||||
}
|
||||
|
@ -909,7 +949,7 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" + Version.CURRENT + ".zip";
|
||||
MessageDigest digest = MessageDigest.getInstance("SHA-512");
|
||||
UserException e = expectThrows(UserException.class, () ->
|
||||
assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, null, ".sha512", checksum(digest)));
|
||||
assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, null, false, ".sha512", checksum(digest)));
|
||||
assertEquals(ExitCodes.IO_ERROR, e.exitCode);
|
||||
assertTrue(e.getMessage(), e.getMessage().startsWith("Invalid checksum file"));
|
||||
}
|
||||
|
@ -923,6 +963,7 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
"analysis-icu",
|
||||
url,
|
||||
null,
|
||||
false,
|
||||
".sha512",
|
||||
checksumAndString(digest, " repository-s3-" + Version.CURRENT + ".zip")));
|
||||
assertEquals(ExitCodes.IO_ERROR, e.exitCode);
|
||||
|
@ -938,6 +979,7 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
"analysis-icu",
|
||||
url,
|
||||
null,
|
||||
false,
|
||||
".sha512",
|
||||
checksumAndString(digest, " analysis-icu-" + Version.CURRENT + ".zip\nfoobar")));
|
||||
assertEquals(ExitCodes.IO_ERROR, e.exitCode);
|
||||
|
@ -952,6 +994,7 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
"analysis-icu",
|
||||
url,
|
||||
null,
|
||||
false,
|
||||
".sha512",
|
||||
bytes -> "foobar analysis-icu-" + Version.CURRENT + ".zip"));
|
||||
assertEquals(ExitCodes.IO_ERROR, e.exitCode);
|
||||
|
@ -961,7 +1004,7 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
public void testSha1Mismatch() throws Exception {
|
||||
String url = "https://repo1.maven.org/maven2/mygroup/myplugin/1.0.0/myplugin-1.0.0.zip";
|
||||
UserException e = expectThrows(UserException.class, () ->
|
||||
assertInstallPluginFromUrl("mygroup:myplugin:1.0.0", "myplugin", url, null, ".sha1", bytes -> "foobar"));
|
||||
assertInstallPluginFromUrl("mygroup:myplugin:1.0.0", "myplugin", url, null, false, ".sha1", bytes -> "foobar"));
|
||||
assertEquals(ExitCodes.IO_ERROR, e.exitCode);
|
||||
assertTrue(e.getMessage(), e.getMessage().contains("SHA-1 mismatch, expected foobar"));
|
||||
}
|
||||
|
|
|
@ -0,0 +1,82 @@
|
|||
[[java-rest-high-snapshot-delete-repository]]
|
||||
=== Snapshot Delete Repository API
|
||||
|
||||
The Snapshot Delete Repository API allows to delete a registered repository.
|
||||
|
||||
[[java-rest-high-snapshot-delete-repository-request]]
|
||||
==== Snapshot Delete Repository Request
|
||||
|
||||
A `DeleteRepositoryRequest`:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[delete-repository-request]
|
||||
--------------------------------------------------
|
||||
|
||||
==== Optional Arguments
|
||||
The following arguments can optionally be provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-request-timeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to wait for the all the nodes to acknowledge the settings were applied
|
||||
as a `TimeValue`
|
||||
<2> Timeout to wait for the all the nodes to acknowledge the settings were applied
|
||||
as a `String`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[delete-repository-request-masterTimeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to connect to the master node as a `TimeValue`
|
||||
<2> Timeout to connect to the master node as a `String`
|
||||
|
||||
[[java-rest-high-snapshot-delete-repository-sync]]
|
||||
==== Synchronous Execution
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[delete-repository-execute]
|
||||
--------------------------------------------------
|
||||
|
||||
[[java-rest-high-snapshot-delete-repository-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The asynchronous execution of a snapshot delete repository requires both the
|
||||
`DeleteRepositoryRequest` instance and an `ActionListener` instance to be
|
||||
passed to the asynchronous method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[delete-repository-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `DeleteRepositoryRequest` to execute and the `ActionListener`
|
||||
to use when the execution completes
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once it is
|
||||
completed the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed.
|
||||
|
||||
A typical listener for `DeleteRepositoryResponse` looks like:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[delete-repository-execute-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed. The response is
|
||||
provided as an argument
|
||||
<2> Called in case of a failure. The raised exception is provided as an argument
|
||||
|
||||
[[java-rest-high-cluster-delete-repository-response]]
|
||||
==== Snapshot Delete Repository Response
|
||||
|
||||
The returned `DeleteRepositoryResponse` allows to retrieve information about the
|
||||
executed operation as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[delete-repository-response]
|
||||
--------------------------------------------------
|
||||
<1> Indicates the node has acknowledged the request
|
|
@ -114,6 +114,9 @@ include::cluster/list_tasks.asciidoc[]
|
|||
The Java High Level REST Client supports the following Snapshot APIs:
|
||||
|
||||
* <<java-rest-high-snapshot-get-repository>>
|
||||
* <<java-rest-high-snapshot-create-repository>>
|
||||
* <<java-rest-high-snapshot-delete-repository>>
|
||||
|
||||
include::snapshot/get_repository.asciidoc[]
|
||||
include::snapshot/create_repository.asciidoc[]
|
||||
include::snapshot/delete_repository.asciidoc[]
|
||||
|
|
|
@ -11,7 +11,7 @@ The Painless execute API allows an arbitrary script to be executed and a result
|
|||
|======
|
||||
| Name | Required | Default | Description
|
||||
| `script` | yes | - | The script to execute
|
||||
| `context` | no | `execute_api_script` | The context the script should be executed in.
|
||||
| `context` | no | `painless_test` | The context the script should be executed in.
|
||||
|======
|
||||
|
||||
==== Contexts
|
||||
|
|
|
@ -84,7 +84,7 @@ When `proxy.type` is set to `http` or `socks`, `proxy.host` and `proxy.port` mus
|
|||
|
||||
|
||||
[[repository-azure-repository-settings]]
|
||||
===== Repository settings
|
||||
==== Repository settings
|
||||
|
||||
The Azure repository supports following settings:
|
||||
|
||||
|
@ -178,7 +178,7 @@ client.admin().cluster().preparePutRepository("my_backup_java1")
|
|||
----
|
||||
|
||||
[[repository-azure-validation]]
|
||||
===== Repository validation rules
|
||||
==== Repository validation rules
|
||||
|
||||
According to the http://msdn.microsoft.com/en-us/library/dd135715.aspx[containers naming guide], a container name must
|
||||
be a valid DNS name, conforming to the following naming rules:
|
||||
|
|
|
@ -378,7 +378,8 @@ PUT /catalan_example
|
|||
"filter": {
|
||||
"catalan_elision": {
|
||||
"type": "elision",
|
||||
"articles": [ "d", "l", "m", "n", "s", "t"]
|
||||
"articles": [ "d", "l", "m", "n", "s", "t"],
|
||||
"articles_case": true
|
||||
},
|
||||
"catalan_stop": {
|
||||
"type": "stop",
|
||||
|
@ -1156,7 +1157,8 @@ PUT /italian_example
|
|||
"nell", "sull", "coll", "pell",
|
||||
"gl", "agl", "dagl", "degl", "negl",
|
||||
"sugl", "un", "m", "t", "s", "v", "d"
|
||||
]
|
||||
],
|
||||
"articles_case": true
|
||||
},
|
||||
"italian_stop": {
|
||||
"type": "stop",
|
||||
|
|
|
@ -124,8 +124,8 @@ the shared file system repository it is necessary to mount the same shared files
|
|||
master and data nodes. This location (or one of its parent directories) must be registered in the `path.repo`
|
||||
setting on all master and data nodes.
|
||||
|
||||
Assuming that the shared filesystem is mounted to `/mount/backups/my_backup`, the following setting should be added to
|
||||
`elasticsearch.yml` file:
|
||||
Assuming that the shared filesystem is mounted to `/mount/backups/my_fs_backup_location`, the following setting should
|
||||
be added to `elasticsearch.yml` file:
|
||||
|
||||
[source,yaml]
|
||||
--------------
|
||||
|
@ -141,7 +141,7 @@ path.repo: ["\\\\MY_SERVER\\Snapshots"]
|
|||
--------------
|
||||
|
||||
After all nodes are restarted, the following command can be used to register the shared file system repository with
|
||||
the name `my_backup`:
|
||||
the name `my_fs_backup`:
|
||||
|
||||
[source,js]
|
||||
-----------------------------------
|
||||
|
@ -419,7 +419,7 @@ A repository can be unregistered using the following command:
|
|||
|
||||
[source,sh]
|
||||
-----------------------------------
|
||||
DELETE /_snapshot/my_fs_backup
|
||||
DELETE /_snapshot/my_backup
|
||||
-----------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
|
|
@ -33,8 +33,6 @@ publishing {
|
|||
}
|
||||
|
||||
dependencies {
|
||||
compile "org.apache.logging.log4j:log4j-api:${versions.log4j}"
|
||||
|
||||
testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"
|
||||
testCompile "junit:junit:${versions.junit}"
|
||||
testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}"
|
||||
|
@ -64,18 +62,3 @@ forbiddenApisMain {
|
|||
// es-all is not checked as we connect and accept sockets
|
||||
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')]
|
||||
}
|
||||
|
||||
//JarHell is part of es core, which we don't want to pull in
|
||||
jarHell.enabled=false
|
||||
|
||||
thirdPartyAudit.excludes = [
|
||||
'org/osgi/framework/AdaptPermission',
|
||||
'org/osgi/framework/AdminPermission',
|
||||
'org/osgi/framework/Bundle',
|
||||
'org/osgi/framework/BundleActivator',
|
||||
'org/osgi/framework/BundleContext',
|
||||
'org/osgi/framework/BundleEvent',
|
||||
'org/osgi/framework/SynchronousBundleListener',
|
||||
'org/osgi/framework/wiring/BundleWire',
|
||||
'org/osgi/framework/wiring/BundleWiring'
|
||||
]
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
7a2999229464e7a324aa503c0a52ec0f05efe7bd
|
|
@ -1,202 +0,0 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 1999-2005 The Apache Software Foundation
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -1,5 +0,0 @@
|
|||
Apache log4j
|
||||
Copyright 2007 The Apache Software Foundation
|
||||
|
||||
This product includes software developed at
|
||||
The Apache Software Foundation (http://www.apache.org/).
|
|
@ -19,11 +19,9 @@
|
|||
|
||||
package org.elasticsearch.nio;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.channels.SelectionKey;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
|
@ -33,8 +31,8 @@ public class AcceptorEventHandler extends EventHandler {
|
|||
|
||||
private final Supplier<SocketSelector> selectorSupplier;
|
||||
|
||||
public AcceptorEventHandler(Logger logger, Supplier<SocketSelector> selectorSupplier) {
|
||||
super(logger);
|
||||
public AcceptorEventHandler(Supplier<SocketSelector> selectorSupplier, Consumer<Exception> exceptionHandler) {
|
||||
super(exceptionHandler);
|
||||
this.selectorSupplier = selectorSupplier;
|
||||
}
|
||||
|
||||
|
@ -58,7 +56,7 @@ public class AcceptorEventHandler extends EventHandler {
|
|||
* @param exception that occurred
|
||||
*/
|
||||
protected void registrationException(ServerChannelContext context, Exception exception) {
|
||||
logger.error(new ParameterizedMessage("failed to register server channel: {}", context.getChannel()), exception);
|
||||
context.handleException(exception);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -78,7 +76,6 @@ public class AcceptorEventHandler extends EventHandler {
|
|||
* @param exception that occurred
|
||||
*/
|
||||
protected void acceptException(ServerChannelContext context, Exception exception) {
|
||||
logger.debug(() -> new ParameterizedMessage("exception while accepting new channel from server channel: {}",
|
||||
context.getChannel()), exception);
|
||||
context.handleException(exception);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -83,7 +83,7 @@ public abstract class ESSelector implements Closeable {
|
|||
try {
|
||||
selector.close();
|
||||
} catch (IOException e) {
|
||||
eventHandler.closeSelectorException(e);
|
||||
eventHandler.selectorException(e);
|
||||
} finally {
|
||||
runLock.unlock();
|
||||
exitedLoop.countDown();
|
||||
|
@ -123,7 +123,7 @@ public abstract class ESSelector implements Closeable {
|
|||
throw e;
|
||||
}
|
||||
} catch (IOException e) {
|
||||
eventHandler.selectException(e);
|
||||
eventHandler.selectorException(e);
|
||||
} catch (Exception e) {
|
||||
eventHandler.uncaughtException(e);
|
||||
}
|
||||
|
|
|
@ -19,37 +19,26 @@
|
|||
|
||||
package org.elasticsearch.nio;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.channels.Selector;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
public abstract class EventHandler {
|
||||
|
||||
protected final Logger logger;
|
||||
protected final Consumer<Exception> exceptionHandler;
|
||||
|
||||
EventHandler(Logger logger) {
|
||||
this.logger = logger;
|
||||
protected EventHandler(Consumer<Exception> exceptionHandler) {
|
||||
this.exceptionHandler = exceptionHandler;
|
||||
}
|
||||
|
||||
/**
|
||||
* This method handles an IOException that was thrown during a call to {@link Selector#select(long)}.
|
||||
* This method handles an IOException that was thrown during a call to {@link Selector#select(long)} or
|
||||
* {@link Selector#close()}.
|
||||
*
|
||||
* @param exception the exception
|
||||
*/
|
||||
protected void selectException(IOException exception) {
|
||||
logger.warn(new ParameterizedMessage("io exception during select [thread={}]", Thread.currentThread().getName()), exception);
|
||||
}
|
||||
|
||||
/**
|
||||
* This method handles an IOException that was thrown during a call to {@link Selector#close()}.
|
||||
*
|
||||
* @param exception the exception
|
||||
*/
|
||||
protected void closeSelectorException(IOException exception) {
|
||||
logger.warn(new ParameterizedMessage("io exception while closing selector [thread={}]", Thread.currentThread().getName()),
|
||||
exception);
|
||||
protected void selectorException(IOException exception) {
|
||||
exceptionHandler.accept(exception);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -79,11 +68,11 @@ public abstract class EventHandler {
|
|||
/**
|
||||
* This method is called when an attempt to close a channel throws an exception.
|
||||
*
|
||||
* @param context that was being closed
|
||||
* @param channel that was being closed
|
||||
* @param exception that occurred
|
||||
*/
|
||||
protected void closeException(ChannelContext<?> context, Exception exception) {
|
||||
logger.debug(() -> new ParameterizedMessage("exception while closing channel: {}", context.getChannel()), exception);
|
||||
protected void closeException(ChannelContext<?> channel, Exception exception) {
|
||||
channel.handleException(exception);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -95,6 +84,6 @@ public abstract class EventHandler {
|
|||
* @param exception that was thrown
|
||||
*/
|
||||
protected void genericChannelException(ChannelContext<?> channel, Exception exception) {
|
||||
logger.debug(() -> new ParameterizedMessage("exception while handling event for channel: {}", channel.getChannel()), exception);
|
||||
channel.handleException(exception);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.nio;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.nio.utils.ExceptionsHelper;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -29,7 +28,6 @@ import java.util.List;
|
|||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.ThreadFactory;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
|
@ -56,16 +54,16 @@ public class NioGroup implements AutoCloseable {
|
|||
|
||||
private final AtomicBoolean isOpen = new AtomicBoolean(true);
|
||||
|
||||
public NioGroup(Logger logger, ThreadFactory acceptorThreadFactory, int acceptorCount,
|
||||
BiFunction<Logger, Supplier<SocketSelector>, AcceptorEventHandler> acceptorEventHandlerFunction,
|
||||
public NioGroup(ThreadFactory acceptorThreadFactory, int acceptorCount,
|
||||
Function<Supplier<SocketSelector>, AcceptorEventHandler> acceptorEventHandlerFunction,
|
||||
ThreadFactory socketSelectorThreadFactory, int socketSelectorCount,
|
||||
Function<Logger, SocketEventHandler> socketEventHandlerFunction) throws IOException {
|
||||
Supplier<SocketEventHandler> socketEventHandlerFunction) throws IOException {
|
||||
acceptors = new ArrayList<>(acceptorCount);
|
||||
socketSelectors = new ArrayList<>(socketSelectorCount);
|
||||
|
||||
try {
|
||||
for (int i = 0; i < socketSelectorCount; ++i) {
|
||||
SocketSelector selector = new SocketSelector(socketEventHandlerFunction.apply(logger));
|
||||
SocketSelector selector = new SocketSelector(socketEventHandlerFunction.get());
|
||||
socketSelectors.add(selector);
|
||||
}
|
||||
startSelectors(socketSelectors, socketSelectorThreadFactory);
|
||||
|
@ -73,7 +71,7 @@ public class NioGroup implements AutoCloseable {
|
|||
for (int i = 0; i < acceptorCount; ++i) {
|
||||
SocketSelector[] childSelectors = this.socketSelectors.toArray(new SocketSelector[this.socketSelectors.size()]);
|
||||
Supplier<SocketSelector> selectorSupplier = new RoundRobinSupplier<>(childSelectors);
|
||||
AcceptingSelector acceptor = new AcceptingSelector(acceptorEventHandlerFunction.apply(logger, selectorSupplier));
|
||||
AcceptingSelector acceptor = new AcceptingSelector(acceptorEventHandlerFunction.apply(selectorSupplier));
|
||||
acceptors.add(acceptor);
|
||||
}
|
||||
startSelectors(acceptors, acceptorThreadFactory);
|
||||
|
|
|
@ -19,23 +19,17 @@
|
|||
|
||||
package org.elasticsearch.nio;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.channels.SelectionKey;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
/**
|
||||
* Event handler designed to handle events from non-server sockets
|
||||
*/
|
||||
public class SocketEventHandler extends EventHandler {
|
||||
|
||||
private final Logger logger;
|
||||
|
||||
public SocketEventHandler(Logger logger) {
|
||||
super(logger);
|
||||
this.logger = logger;
|
||||
public SocketEventHandler(Consumer<Exception> exceptionHandler) {
|
||||
super(exceptionHandler);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -62,7 +56,6 @@ public class SocketEventHandler extends EventHandler {
|
|||
* @param exception that occurred
|
||||
*/
|
||||
protected void registrationException(SocketChannelContext context, Exception exception) {
|
||||
logger.debug(() -> new ParameterizedMessage("failed to register socket channel: {}", context.getChannel()), exception);
|
||||
context.handleException(exception);
|
||||
}
|
||||
|
||||
|
@ -85,7 +78,6 @@ public class SocketEventHandler extends EventHandler {
|
|||
* @param exception that occurred
|
||||
*/
|
||||
protected void connectException(SocketChannelContext context, Exception exception) {
|
||||
logger.debug(() -> new ParameterizedMessage("failed to connect to socket channel: {}", context.getChannel()), exception);
|
||||
context.handleException(exception);
|
||||
}
|
||||
|
||||
|
@ -106,7 +98,6 @@ public class SocketEventHandler extends EventHandler {
|
|||
* @param exception that occurred
|
||||
*/
|
||||
protected void readException(SocketChannelContext context, Exception exception) {
|
||||
logger.debug(() -> new ParameterizedMessage("exception while reading from socket channel: {}", context.getChannel()), exception);
|
||||
context.handleException(exception);
|
||||
}
|
||||
|
||||
|
@ -127,18 +118,16 @@ public class SocketEventHandler extends EventHandler {
|
|||
* @param exception that occurred
|
||||
*/
|
||||
protected void writeException(SocketChannelContext context, Exception exception) {
|
||||
logger.debug(() -> new ParameterizedMessage("exception while writing to socket channel: {}", context.getChannel()), exception);
|
||||
context.handleException(exception);
|
||||
}
|
||||
|
||||
/**
|
||||
* This method is called when a listener attached to a channel operation throws an exception.
|
||||
*
|
||||
* @param listener that was called
|
||||
* @param exception that occurred
|
||||
*/
|
||||
protected <V> void listenerException(BiConsumer<V, Throwable> listener, Exception exception) {
|
||||
logger.warn(new ParameterizedMessage("exception while executing listener: {}", listener), exception);
|
||||
protected void listenerException(Exception exception) {
|
||||
exceptionHandler.accept(exception);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -143,7 +143,7 @@ public class SocketSelector extends ESSelector {
|
|||
try {
|
||||
listener.accept(value, null);
|
||||
} catch (Exception e) {
|
||||
eventHandler.listenerException(listener, e);
|
||||
eventHandler.listenerException(e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -159,7 +159,7 @@ public class SocketSelector extends ESSelector {
|
|||
try {
|
||||
listener.accept(null, exception);
|
||||
} catch (Exception e) {
|
||||
eventHandler.listenerException(listener, e);
|
||||
eventHandler.listenerException(e);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ public class AcceptorEventHandlerTests extends ESTestCase {
|
|||
ArrayList<SocketSelector> selectors = new ArrayList<>();
|
||||
selectors.add(mock(SocketSelector.class));
|
||||
selectorSupplier = new RoundRobinSupplier<>(selectors.toArray(new SocketSelector[selectors.size()]));
|
||||
handler = new AcceptorEventHandler(logger, selectorSupplier);
|
||||
handler = new AcceptorEventHandler(selectorSupplier, mock(Consumer.class));
|
||||
|
||||
channel = new NioServerSocketChannel(mock(ServerSocketChannel.class));
|
||||
context = new DoNotRegisterContext(channel, mock(AcceptingSelector.class), mock(Consumer.class));
|
||||
|
@ -99,6 +99,14 @@ public class AcceptorEventHandlerTests extends ESTestCase {
|
|||
verify(serverChannelContext).acceptChannels(selectorSupplier);
|
||||
}
|
||||
|
||||
public void testAcceptExceptionCallsExceptionHandler() throws IOException {
|
||||
ServerChannelContext serverChannelContext = mock(ServerChannelContext.class);
|
||||
IOException exception = new IOException();
|
||||
handler.acceptException(serverChannelContext, exception);
|
||||
|
||||
verify(serverChannelContext).handleException(exception);
|
||||
}
|
||||
|
||||
private class DoNotRegisterContext extends ServerChannelContext {
|
||||
|
||||
|
||||
|
|
|
@ -27,7 +27,6 @@ import java.nio.channels.CancelledKeyException;
|
|||
import java.nio.channels.ClosedSelectorException;
|
||||
import java.nio.channels.SelectionKey;
|
||||
import java.nio.channels.Selector;
|
||||
import java.nio.channels.SocketChannel;
|
||||
|
||||
import static org.mockito.Matchers.anyInt;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
@ -81,7 +80,7 @@ public class ESSelectorTests extends ESTestCase {
|
|||
|
||||
this.selector.singleLoop();
|
||||
|
||||
verify(handler).selectException(ioException);
|
||||
verify(handler).selectorException(ioException);
|
||||
}
|
||||
|
||||
public void testSelectorClosedIfOpenAndEventLoopNotRunning() throws IOException {
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.elasticsearch.test.ESTestCase;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
@ -34,10 +35,12 @@ public class NioGroupTests extends ESTestCase {
|
|||
private NioGroup nioGroup;
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
nioGroup = new NioGroup(logger, daemonThreadFactory(Settings.EMPTY, "acceptor"), 1, AcceptorEventHandler::new,
|
||||
daemonThreadFactory(Settings.EMPTY, "selector"), 1, SocketEventHandler::new);
|
||||
nioGroup = new NioGroup(daemonThreadFactory(Settings.EMPTY, "acceptor"), 1,
|
||||
(s) -> new AcceptorEventHandler(s, mock(Consumer.class)), daemonThreadFactory(Settings.EMPTY, "selector"), 1,
|
||||
() -> new SocketEventHandler(mock(Consumer.class)));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -69,10 +72,12 @@ public class NioGroupTests extends ESTestCase {
|
|||
nioGroup.close();
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testExceptionAtStartIsHandled() throws IOException {
|
||||
RuntimeException ex = new RuntimeException();
|
||||
CheckedRunnable<IOException> ctor = () -> new NioGroup(logger, r -> {throw ex;}, 1,
|
||||
AcceptorEventHandler::new, daemonThreadFactory(Settings.EMPTY, "selector"), 1, SocketEventHandler::new);
|
||||
CheckedRunnable<IOException> ctor = () -> new NioGroup(r -> {throw ex;}, 1,
|
||||
(s) -> new AcceptorEventHandler(s, mock(Consumer.class)), daemonThreadFactory(Settings.EMPTY, "selector"),
|
||||
1, () -> new SocketEventHandler(mock(Consumer.class)));
|
||||
RuntimeException runtimeException = expectThrows(RuntimeException.class, ctor::run);
|
||||
assertSame(ex, runtimeException);
|
||||
// ctor starts threads. So we are testing that a failure to construct will stop threads. Our thread
|
||||
|
|
|
@ -36,7 +36,8 @@ import static org.mockito.Mockito.when;
|
|||
|
||||
public class SocketEventHandlerTests extends ESTestCase {
|
||||
|
||||
private Consumer<Exception> exceptionHandler;
|
||||
private Consumer<Exception> channelExceptionHandler;
|
||||
private Consumer<Exception> genericExceptionHandler;
|
||||
|
||||
private ReadWriteHandler readWriteHandler;
|
||||
private SocketEventHandler handler;
|
||||
|
@ -47,15 +48,16 @@ public class SocketEventHandlerTests extends ESTestCase {
|
|||
@Before
|
||||
@SuppressWarnings("unchecked")
|
||||
public void setUpHandler() throws IOException {
|
||||
exceptionHandler = mock(Consumer.class);
|
||||
channelExceptionHandler = mock(Consumer.class);
|
||||
genericExceptionHandler = mock(Consumer.class);
|
||||
readWriteHandler = mock(ReadWriteHandler.class);
|
||||
SocketSelector selector = mock(SocketSelector.class);
|
||||
handler = new SocketEventHandler(logger);
|
||||
handler = new SocketEventHandler(genericExceptionHandler);
|
||||
rawChannel = mock(SocketChannel.class);
|
||||
channel = new NioSocketChannel(rawChannel);
|
||||
when(rawChannel.finishConnect()).thenReturn(true);
|
||||
|
||||
context = new DoNotRegisterContext(channel, selector, exceptionHandler, new TestSelectionKey(0), readWriteHandler);
|
||||
context = new DoNotRegisterContext(channel, selector, channelExceptionHandler, new TestSelectionKey(0), readWriteHandler);
|
||||
channel.setContext(context);
|
||||
handler.handleRegistration(context);
|
||||
|
||||
|
@ -96,7 +98,7 @@ public class SocketEventHandlerTests extends ESTestCase {
|
|||
public void testRegistrationExceptionCallsExceptionHandler() throws IOException {
|
||||
CancelledKeyException exception = new CancelledKeyException();
|
||||
handler.registrationException(context, exception);
|
||||
verify(exceptionHandler).accept(exception);
|
||||
verify(channelExceptionHandler).accept(exception);
|
||||
}
|
||||
|
||||
public void testConnectDoesNotRemoveOP_CONNECTInterestIfIncomplete() throws IOException {
|
||||
|
@ -114,7 +116,7 @@ public class SocketEventHandlerTests extends ESTestCase {
|
|||
public void testConnectExceptionCallsExceptionHandler() throws IOException {
|
||||
IOException exception = new IOException();
|
||||
handler.connectException(context, exception);
|
||||
verify(exceptionHandler).accept(exception);
|
||||
verify(channelExceptionHandler).accept(exception);
|
||||
}
|
||||
|
||||
public void testHandleReadDelegatesToContext() throws IOException {
|
||||
|
@ -130,13 +132,13 @@ public class SocketEventHandlerTests extends ESTestCase {
|
|||
public void testReadExceptionCallsExceptionHandler() {
|
||||
IOException exception = new IOException();
|
||||
handler.readException(context, exception);
|
||||
verify(exceptionHandler).accept(exception);
|
||||
verify(channelExceptionHandler).accept(exception);
|
||||
}
|
||||
|
||||
public void testWriteExceptionCallsExceptionHandler() {
|
||||
IOException exception = new IOException();
|
||||
handler.writeException(context, exception);
|
||||
verify(exceptionHandler).accept(exception);
|
||||
verify(channelExceptionHandler).accept(exception);
|
||||
}
|
||||
|
||||
public void testPostHandlingCallWillCloseTheChannelIfReady() throws IOException {
|
||||
|
@ -192,6 +194,12 @@ public class SocketEventHandlerTests extends ESTestCase {
|
|||
assertEquals(SelectionKey.OP_READ, key.interestOps());
|
||||
}
|
||||
|
||||
public void testListenerExceptionCallsGenericExceptionHandler() throws IOException {
|
||||
RuntimeException listenerException = new RuntimeException();
|
||||
handler.listenerException(listenerException);
|
||||
verify(genericExceptionHandler).accept(listenerException);
|
||||
}
|
||||
|
||||
private class DoNotRegisterContext extends BytesChannelContext {
|
||||
|
||||
private final TestSelectionKey selectionKey;
|
||||
|
|
|
@ -297,7 +297,7 @@ public class SocketSelectorTests extends ESTestCase {
|
|||
|
||||
socketSelector.executeListener(listener, null);
|
||||
|
||||
verify(eventHandler).listenerException(listener, exception);
|
||||
verify(eventHandler).listenerException(exception);
|
||||
}
|
||||
|
||||
public void testExecuteFailedListenerWillHandleException() throws Exception {
|
||||
|
@ -307,6 +307,6 @@ public class SocketSelectorTests extends ESTestCase {
|
|||
|
||||
socketSelector.executeFailedListener(listener, ioException);
|
||||
|
||||
verify(eventHandler).listenerException(listener, exception);
|
||||
verify(eventHandler).listenerException(exception);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -154,9 +154,10 @@ public class NioHttpServerTransport extends AbstractHttpServerTransport {
|
|||
try {
|
||||
int acceptorCount = NIO_HTTP_ACCEPTOR_COUNT.get(settings);
|
||||
int workerCount = NIO_HTTP_WORKER_COUNT.get(settings);
|
||||
nioGroup = new NioGroup(logger, daemonThreadFactory(this.settings, TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX), acceptorCount,
|
||||
AcceptorEventHandler::new, daemonThreadFactory(this.settings, TRANSPORT_WORKER_THREAD_NAME_PREFIX),
|
||||
workerCount, SocketEventHandler::new);
|
||||
nioGroup = new NioGroup(daemonThreadFactory(this.settings, TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX), acceptorCount,
|
||||
(s) -> new AcceptorEventHandler(s, this::nonChannelExceptionCaught),
|
||||
daemonThreadFactory(this.settings, TRANSPORT_WORKER_THREAD_NAME_PREFIX), workerCount,
|
||||
() -> new SocketEventHandler(this::nonChannelExceptionCaught));
|
||||
channelFactory = new HttpChannelFactory();
|
||||
this.boundAddress = createBoundHttpAddress();
|
||||
|
||||
|
@ -265,6 +266,10 @@ public class NioHttpServerTransport extends AbstractHttpServerTransport {
|
|||
}
|
||||
}
|
||||
|
||||
protected void nonChannelExceptionCaught(Exception ex) {
|
||||
logger.warn(new ParameterizedMessage("exception caught on transport layer [thread={}]", Thread.currentThread().getName()), ex);
|
||||
}
|
||||
|
||||
private void closeChannels(List<NioChannel> channels) {
|
||||
List<ActionFuture<Void>> futures = new ArrayList<>(channels.size());
|
||||
|
||||
|
@ -312,8 +317,10 @@ public class NioHttpServerTransport extends AbstractHttpServerTransport {
|
|||
@Override
|
||||
public NioServerSocketChannel createServerChannel(AcceptingSelector selector, ServerSocketChannel channel) throws IOException {
|
||||
NioServerSocketChannel nioChannel = new NioServerSocketChannel(channel);
|
||||
ServerChannelContext context = new ServerChannelContext(nioChannel, this, selector, NioHttpServerTransport.this::acceptChannel,
|
||||
(e) -> {});
|
||||
Consumer<Exception> exceptionHandler = (e) -> logger.error(() ->
|
||||
new ParameterizedMessage("exception from server channel caught on transport layer [{}]", channel), e);
|
||||
Consumer<NioSocketChannel> acceptor = NioHttpServerTransport.this::acceptChannel;
|
||||
ServerChannelContext context = new ServerChannelContext(nioChannel, this, selector, acceptor, exceptionHandler);
|
||||
nioChannel.setContext(context);
|
||||
return nioChannel;
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.transport.nio;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
|
@ -105,9 +106,10 @@ public class NioTransport extends TcpTransport {
|
|||
if (useNetworkServer) {
|
||||
acceptorCount = NioTransport.NIO_ACCEPTOR_COUNT.get(settings);
|
||||
}
|
||||
nioGroup = new NioGroup(logger, daemonThreadFactory(this.settings, TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX), acceptorCount,
|
||||
AcceptorEventHandler::new, daemonThreadFactory(this.settings, TRANSPORT_WORKER_THREAD_NAME_PREFIX),
|
||||
NioTransport.NIO_WORKER_COUNT.get(settings), SocketEventHandler::new);
|
||||
nioGroup = new NioGroup(daemonThreadFactory(this.settings, TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX), acceptorCount,
|
||||
(s) -> new AcceptorEventHandler(s, this::onNonChannelException),
|
||||
daemonThreadFactory(this.settings, TRANSPORT_WORKER_THREAD_NAME_PREFIX), NioTransport.NIO_WORKER_COUNT.get(settings),
|
||||
() -> new SocketEventHandler(this::onNonChannelException));
|
||||
|
||||
ProfileSettings clientProfileSettings = new ProfileSettings(settings, "default");
|
||||
clientChannelFactory = channelFactory(clientProfileSettings, true);
|
||||
|
@ -193,8 +195,10 @@ public class NioTransport extends TcpTransport {
|
|||
@Override
|
||||
public TcpNioServerSocketChannel createServerChannel(AcceptingSelector selector, ServerSocketChannel channel) throws IOException {
|
||||
TcpNioServerSocketChannel nioChannel = new TcpNioServerSocketChannel(profileName, channel);
|
||||
ServerChannelContext context = new ServerChannelContext(nioChannel, this, selector, NioTransport.this::acceptChannel,
|
||||
(e) -> {});
|
||||
Consumer<Exception> exceptionHandler = (e) -> logger.error(() ->
|
||||
new ParameterizedMessage("exception from server channel caught on transport layer [{}]", channel), e);
|
||||
Consumer<NioSocketChannel> acceptor = NioTransport.this::acceptChannel;
|
||||
ServerChannelContext context = new ServerChannelContext(nioChannel, this, selector, acceptor, exceptionHandler);
|
||||
nioChannel.setContext(context);
|
||||
return nioChannel;
|
||||
}
|
||||
|
|
|
@ -30,6 +30,26 @@ task bwcTest {
|
|||
}
|
||||
|
||||
for (Version version : bwcVersions.wireCompatible) {
|
||||
/*
|
||||
* The goal here is to:
|
||||
* <ul>
|
||||
* <li>start three nodes on the old version
|
||||
* <li>run tests with systemProperty 'tests.rest.suite', 'old_cluster'
|
||||
* <li>shut down one node
|
||||
* <li>start a node with the new version
|
||||
* <li>run tests with systemProperty 'tests.rest.suite', 'mixed_cluster'
|
||||
* <li>shut down one node on the old version
|
||||
* <li>start a node with the new version
|
||||
* <li>run tests with systemProperty 'tests.rest.suite', 'mixed_cluster' again
|
||||
* <li>shut down the last node with the old version
|
||||
* <li>start a node with the new version
|
||||
* <li>run tests with systemProperty 'tests.rest.suite', 'upgraded_cluster'
|
||||
* <li>shut down the entire cluster
|
||||
* </ul>
|
||||
*
|
||||
* Be careful: gradle dry run spits out tasks in the wrong order but,
|
||||
* strangely, running the tasks works properly.
|
||||
*/
|
||||
String baseName = "v${version}"
|
||||
|
||||
Task oldClusterTest = tasks.create(name: "${baseName}#oldClusterTest", type: RestIntegTestTask) {
|
||||
|
@ -39,8 +59,8 @@ for (Version version : bwcVersions.wireCompatible) {
|
|||
Object extension = extensions.findByName("${baseName}#oldClusterTestCluster")
|
||||
configure(extensions.findByName("${baseName}#oldClusterTestCluster")) {
|
||||
bwcVersion = version
|
||||
numBwcNodes = 2
|
||||
numNodes = 2
|
||||
numBwcNodes = 3
|
||||
numNodes = 3
|
||||
clusterName = 'rolling-upgrade'
|
||||
setting 'repositories.url.allowed_urls', 'http://snapshot.test*'
|
||||
if (version.onOrAfter('5.3.0')) {
|
||||
|
@ -53,43 +73,57 @@ for (Version version : bwcVersions.wireCompatible) {
|
|||
systemProperty 'tests.rest.suite', 'old_cluster'
|
||||
}
|
||||
|
||||
Task mixedClusterTest = tasks.create(name: "${baseName}#mixedClusterTest", type: RestIntegTestTask)
|
||||
|
||||
configure(extensions.findByName("${baseName}#mixedClusterTestCluster")) {
|
||||
dependsOn oldClusterTestRunner, "${baseName}#oldClusterTestCluster#node1.stop"
|
||||
clusterName = 'rolling-upgrade'
|
||||
unicastTransportUri = { seedNode, node, ant -> oldClusterTest.nodes.get(0).transportUri() }
|
||||
minimumMasterNodes = { 2 }
|
||||
/* Override the data directory so the new node always gets the node we
|
||||
* just stopped's data directory. */
|
||||
dataDir = { nodeNumber -> oldClusterTest.nodes[1].dataDir }
|
||||
setting 'repositories.url.allowed_urls', 'http://snapshot.test*'
|
||||
Closure configureUpgradeCluster = {String name, Task lastRunner, int stopNode, Closure unicastSeed ->
|
||||
configure(extensions.findByName("${baseName}#${name}")) {
|
||||
dependsOn lastRunner, "${baseName}#oldClusterTestCluster#node${stopNode}.stop"
|
||||
clusterName = 'rolling-upgrade'
|
||||
unicastTransportUri = { seedNode, node, ant -> unicastSeed() }
|
||||
minimumMasterNodes = { 3 }
|
||||
/* Override the data directory so the new node always gets the node we
|
||||
* just stopped's data directory. */
|
||||
dataDir = { nodeNumber -> oldClusterTest.nodes[stopNode].dataDir }
|
||||
setting 'repositories.url.allowed_urls', 'http://snapshot.test*'
|
||||
}
|
||||
}
|
||||
|
||||
Task mixedClusterTestRunner = tasks.getByName("${baseName}#mixedClusterTestRunner")
|
||||
mixedClusterTestRunner.configure {
|
||||
Task oneThirdUpgradedTest = tasks.create(name: "${baseName}#oneThirdUpgradedTest", type: RestIntegTestTask)
|
||||
|
||||
configureUpgradeCluster("oneThirdUpgradedTestCluster", oldClusterTestRunner,
|
||||
0, { oldClusterTest.nodes.get(1).transportUri() })
|
||||
|
||||
Task oneThirdUpgradedTestRunner = tasks.getByName("${baseName}#oneThirdUpgradedTestRunner")
|
||||
oneThirdUpgradedTestRunner.configure {
|
||||
systemProperty 'tests.rest.suite', 'mixed_cluster'
|
||||
finalizedBy "${baseName}#oldClusterTestCluster#node0.stop"
|
||||
systemProperty 'tests.first_round', 'true'
|
||||
finalizedBy "${baseName}#oldClusterTestCluster#node1.stop"
|
||||
}
|
||||
|
||||
Task twoThirdsUpgradedTest = tasks.create(name: "${baseName}#twoThirdsUpgradedTest", type: RestIntegTestTask)
|
||||
|
||||
configureUpgradeCluster("twoThirdsUpgradedTestCluster", oneThirdUpgradedTestRunner,
|
||||
1, { oneThirdUpgradedTest.nodes.get(0).transportUri() })
|
||||
|
||||
Task twoThirdsUpgradedTestRunner = tasks.getByName("${baseName}#twoThirdsUpgradedTestRunner")
|
||||
twoThirdsUpgradedTestRunner.configure {
|
||||
systemProperty 'tests.rest.suite', 'mixed_cluster'
|
||||
systemProperty 'tests.first_round', 'false'
|
||||
finalizedBy "${baseName}#oldClusterTestCluster#node2.stop"
|
||||
}
|
||||
|
||||
Task upgradedClusterTest = tasks.create(name: "${baseName}#upgradedClusterTest", type: RestIntegTestTask)
|
||||
|
||||
configure(extensions.findByName("${baseName}#upgradedClusterTestCluster")) {
|
||||
dependsOn mixedClusterTestRunner, "${baseName}#oldClusterTestCluster#node0.stop"
|
||||
clusterName = 'rolling-upgrade'
|
||||
unicastTransportUri = { seedNode, node, ant -> mixedClusterTest.nodes.get(0).transportUri() }
|
||||
minimumMasterNodes = { 2 }
|
||||
/* Override the data directory so the new node always gets the node we
|
||||
* just stopped's data directory. */
|
||||
dataDir = { nodeNumber -> oldClusterTest.nodes[0].dataDir}
|
||||
setting 'repositories.url.allowed_urls', 'http://snapshot.test*'
|
||||
}
|
||||
configureUpgradeCluster("upgradedClusterTestCluster", twoThirdsUpgradedTestRunner,
|
||||
2, { twoThirdsUpgradedTest.nodes.get(0).transportUri() })
|
||||
|
||||
Task upgradedClusterTestRunner = tasks.getByName("${baseName}#upgradedClusterTestRunner")
|
||||
upgradedClusterTestRunner.configure {
|
||||
systemProperty 'tests.rest.suite', 'upgraded_cluster'
|
||||
// only need to kill the mixed cluster tests node here because we explicitly told it to not stop nodes upon completion
|
||||
finalizedBy "${baseName}#mixedClusterTestCluster#stop"
|
||||
/*
|
||||
* Force stopping all the upgraded nodes after the test runner
|
||||
* so they are alive during the test.
|
||||
*/
|
||||
finalizedBy "${baseName}#oneThirdUpgradedTestCluster#stop"
|
||||
finalizedBy "${baseName}#twoThirdsUpgradedTestCluster#stop"
|
||||
}
|
||||
|
||||
Task versionBwcTest = tasks.create(name: "${baseName}#bwcTest") {
|
||||
|
|
|
@ -0,0 +1,91 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.upgrades;
|
||||
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||
import org.elasticsearch.test.rest.yaml.ObjectPath;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiOfLength;
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING;
|
||||
import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING;
|
||||
import static org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
|
||||
public abstract class AbstractRollingTestCase extends ESRestTestCase {
|
||||
protected enum ClusterType {
|
||||
OLD,
|
||||
MIXED,
|
||||
UPGRADED;
|
||||
|
||||
public static ClusterType parse(String value) {
|
||||
switch (value) {
|
||||
case "old_cluster":
|
||||
return OLD;
|
||||
case "mixed_cluster":
|
||||
return MIXED;
|
||||
case "upgraded_cluster":
|
||||
return UPGRADED;
|
||||
default:
|
||||
throw new AssertionError("unknown cluster type: " + value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected static final ClusterType CLUSTER_TYPE = ClusterType.parse(System.getProperty("tests.rest.suite"));
|
||||
|
||||
@Override
|
||||
protected final boolean preserveIndicesUponCompletion() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected final boolean preserveReposUponCompletion() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected final Settings restClientSettings() {
|
||||
return Settings.builder().put(super.restClientSettings())
|
||||
// increase the timeout here to 90 seconds to handle long waits for a green
|
||||
// cluster health. the waits for green need to be longer than a minute to
|
||||
// account for delayed shards
|
||||
.put(ESRestTestCase.CLIENT_RETRY_TIMEOUT, "90s")
|
||||
.put(ESRestTestCase.CLIENT_SOCKET_TIMEOUT, "90s")
|
||||
.build();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,135 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.upgrades;
|
||||
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.Response;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
|
||||
/**
|
||||
* Basic test that indexed documents survive the rolling restart. See
|
||||
* {@link RecoveryIT} for much more in depth testing of the mechanism
|
||||
* by which they survive.
|
||||
*/
|
||||
public class IndexingIT extends AbstractRollingTestCase {
|
||||
public void testIndexing() throws IOException {
|
||||
switch (CLUSTER_TYPE) {
|
||||
case OLD:
|
||||
break;
|
||||
case MIXED:
|
||||
Request waitForYellow = new Request("GET", "/_cluster/health");
|
||||
waitForYellow.addParameter("wait_for_nodes", "3");
|
||||
waitForYellow.addParameter("wait_for_status", "yellow");
|
||||
client().performRequest(waitForYellow);
|
||||
break;
|
||||
case UPGRADED:
|
||||
Request waitForGreen = new Request("GET", "/_cluster/health/test_index,index_with_replicas,empty_index");
|
||||
waitForGreen.addParameter("wait_for_nodes", "3");
|
||||
waitForGreen.addParameter("wait_for_status", "green");
|
||||
// wait for long enough that we give delayed unassigned shards to stop being delayed
|
||||
waitForGreen.addParameter("timeout", "70s");
|
||||
waitForGreen.addParameter("level", "shards");
|
||||
client().performRequest(waitForGreen);
|
||||
break;
|
||||
default:
|
||||
throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]");
|
||||
}
|
||||
|
||||
if (CLUSTER_TYPE == ClusterType.OLD) {
|
||||
Request createTestIndex = new Request("PUT", "/test_index");
|
||||
createTestIndex.setJsonEntity("{\"settings\": {\"index.number_of_replicas\": 0}}");
|
||||
client().performRequest(createTestIndex);
|
||||
|
||||
String recoverQuickly = "{\"settings\": {\"index.unassigned.node_left.delayed_timeout\": \"100ms\"}}";
|
||||
Request createIndexWithReplicas = new Request("PUT", "/index_with_replicas");
|
||||
createIndexWithReplicas.setJsonEntity(recoverQuickly);
|
||||
client().performRequest(createIndexWithReplicas);
|
||||
|
||||
Request createEmptyIndex = new Request("PUT", "/empty_index");
|
||||
// Ask for recovery to be quick
|
||||
createEmptyIndex.setJsonEntity(recoverQuickly);
|
||||
client().performRequest(createEmptyIndex);
|
||||
|
||||
bulk("test_index", "_OLD", 5);
|
||||
bulk("index_with_replicas", "_OLD", 5);
|
||||
}
|
||||
|
||||
int expectedCount;
|
||||
switch (CLUSTER_TYPE) {
|
||||
case OLD:
|
||||
expectedCount = 5;
|
||||
break;
|
||||
case MIXED:
|
||||
if (Booleans.parseBoolean(System.getProperty("tests.first_round"))) {
|
||||
expectedCount = 5;
|
||||
} else {
|
||||
expectedCount = 10;
|
||||
}
|
||||
break;
|
||||
case UPGRADED:
|
||||
expectedCount = 15;
|
||||
break;
|
||||
default:
|
||||
throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]");
|
||||
}
|
||||
|
||||
assertCount("test_index", expectedCount);
|
||||
assertCount("index_with_replicas", 5);
|
||||
assertCount("empty_index", 0);
|
||||
|
||||
if (CLUSTER_TYPE != ClusterType.OLD) {
|
||||
bulk("test_index", "_" + CLUSTER_TYPE, 5);
|
||||
Request toBeDeleted = new Request("PUT", "/test_index/doc/to_be_deleted");
|
||||
toBeDeleted.addParameter("refresh", "true");
|
||||
toBeDeleted.setJsonEntity("{\"f1\": \"delete-me\"}");
|
||||
client().performRequest(toBeDeleted);
|
||||
assertCount("test_index", expectedCount + 6);
|
||||
|
||||
Request delete = new Request("DELETE", "/test_index/doc/to_be_deleted");
|
||||
delete.addParameter("refresh", "true");
|
||||
client().performRequest(delete);
|
||||
|
||||
assertCount("test_index", expectedCount + 5);
|
||||
}
|
||||
}
|
||||
|
||||
private void bulk(String index, String valueSuffix, int count) throws IOException {
|
||||
StringBuilder b = new StringBuilder();
|
||||
for (int i = 0; i < count; i++) {
|
||||
b.append("{\"index\": {\"_index\": \"").append(index).append("\", \"_type\": \"doc\"}}\n");
|
||||
b.append("{\"f1\": \"v").append(i).append(valueSuffix).append("\", \"f2\": ").append(i).append("}\n");
|
||||
}
|
||||
Request bulk = new Request("POST", "/_bulk");
|
||||
bulk.addParameter("refresh", "true");
|
||||
bulk.setJsonEntity(b.toString());
|
||||
client().performRequest(bulk);
|
||||
}
|
||||
|
||||
private void assertCount(String index, int count) throws IOException {
|
||||
Request searchTestIndexRequest = new Request("POST", "/" + index + "/_search");
|
||||
searchTestIndexRequest.addParameter("filter_path", "hits.total");
|
||||
Response searchTestIndexResponse = client().performRequest(searchTestIndexRequest);
|
||||
assertEquals("{\"hits\":{\"total\":" + count + "}}",
|
||||
EntityUtils.toString(searchTestIndexResponse.getEntity(), StandardCharsets.UTF_8));
|
||||
}
|
||||
}
|
|
@ -46,53 +46,13 @@ import static org.hamcrest.Matchers.equalTo;
|
|||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
|
||||
public class RecoveryIT extends ESRestTestCase {
|
||||
|
||||
@Override
|
||||
protected boolean preserveIndicesUponCompletion() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean preserveReposUponCompletion() {
|
||||
return true;
|
||||
}
|
||||
|
||||
private enum CLUSTER_TYPE {
|
||||
OLD,
|
||||
MIXED,
|
||||
UPGRADED;
|
||||
|
||||
public static CLUSTER_TYPE parse(String value) {
|
||||
switch (value) {
|
||||
case "old_cluster":
|
||||
return OLD;
|
||||
case "mixed_cluster":
|
||||
return MIXED;
|
||||
case "upgraded_cluster":
|
||||
return UPGRADED;
|
||||
default:
|
||||
throw new AssertionError("unknown cluster type: " + value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private final CLUSTER_TYPE clusterType = CLUSTER_TYPE.parse(System.getProperty("tests.rest.suite"));
|
||||
|
||||
@Override
|
||||
protected Settings restClientSettings() {
|
||||
return Settings.builder().put(super.restClientSettings())
|
||||
// increase the timeout here to 90 seconds to handle long waits for a green
|
||||
// cluster health. the waits for green need to be longer than a minute to
|
||||
// account for delayed shards
|
||||
.put(ESRestTestCase.CLIENT_RETRY_TIMEOUT, "90s")
|
||||
.put(ESRestTestCase.CLIENT_SOCKET_TIMEOUT, "90s")
|
||||
.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* In depth testing of the recovery mechanism during a rolling restart.
|
||||
*/
|
||||
public class RecoveryIT extends AbstractRollingTestCase {
|
||||
public void testHistoryUUIDIsGenerated() throws Exception {
|
||||
final String index = "index_history_uuid";
|
||||
if (clusterType == CLUSTER_TYPE.OLD) {
|
||||
if (CLUSTER_TYPE == ClusterType.OLD) {
|
||||
Settings.Builder settings = Settings.builder()
|
||||
.put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
|
||||
|
@ -102,7 +62,7 @@ public class RecoveryIT extends ESRestTestCase {
|
|||
// before timing out
|
||||
.put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms");
|
||||
createIndex(index, settings.build());
|
||||
} else if (clusterType == CLUSTER_TYPE.UPGRADED) {
|
||||
} else if (CLUSTER_TYPE == ClusterType.UPGRADED) {
|
||||
ensureGreen(index);
|
||||
Response response = client().performRequest("GET", index + "/_stats", Collections.singletonMap("level", "shards"));
|
||||
assertOK(response);
|
||||
|
@ -157,11 +117,11 @@ public class RecoveryIT extends ESRestTestCase {
|
|||
final Map<String, Object> nodeMap = objectPath.evaluate("nodes");
|
||||
List<String> nodes = new ArrayList<>(nodeMap.keySet());
|
||||
|
||||
switch (clusterType) {
|
||||
switch (CLUSTER_TYPE) {
|
||||
case OLD:
|
||||
Settings.Builder settings = Settings.builder()
|
||||
.put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
|
||||
.put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2)
|
||||
// if the node with the replica is the first to be restarted, while a replica is still recovering
|
||||
// then delayed allocation will kick in. When the node comes back, the master will search for a copy
|
||||
// but the recovering copy will be seen as invalid and the cluster health won't return to GREEN
|
||||
|
@ -181,6 +141,7 @@ public class RecoveryIT extends ESRestTestCase {
|
|||
assertOK(client().performRequest("POST", index + "/_refresh"));
|
||||
assertCount(index, "_only_nodes:" + nodes.get(0), 60);
|
||||
assertCount(index, "_only_nodes:" + nodes.get(1), 60);
|
||||
assertCount(index, "_only_nodes:" + nodes.get(2), 60);
|
||||
// make sure that we can index while the replicas are recovering
|
||||
updateIndexSettings(index, Settings.builder().put(INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "primaries"));
|
||||
break;
|
||||
|
@ -191,9 +152,10 @@ public class RecoveryIT extends ESRestTestCase {
|
|||
assertOK(client().performRequest("POST", index + "/_refresh"));
|
||||
assertCount(index, "_only_nodes:" + nodes.get(0), 110);
|
||||
assertCount(index, "_only_nodes:" + nodes.get(1), 110);
|
||||
assertCount(index, "_only_nodes:" + nodes.get(2), 110);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalStateException("unknown type " + clusterType);
|
||||
throw new IllegalStateException("unknown type " + CLUSTER_TYPE);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -221,11 +183,11 @@ public class RecoveryIT extends ESRestTestCase {
|
|||
|
||||
public void testRelocationWithConcurrentIndexing() throws Exception {
|
||||
final String index = "relocation_with_concurrent_indexing";
|
||||
switch (clusterType) {
|
||||
switch (CLUSTER_TYPE) {
|
||||
case OLD:
|
||||
Settings.Builder settings = Settings.builder()
|
||||
.put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
|
||||
.put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2)
|
||||
// if the node with the replica is the first to be restarted, while a replica is still recovering
|
||||
// then delayed allocation will kick in. When the node comes back, the master will search for a copy
|
||||
// but the recovering copy will be seen as invalid and the cluster health won't return to GREEN
|
||||
|
@ -258,7 +220,7 @@ public class RecoveryIT extends ESRestTestCase {
|
|||
break;
|
||||
case UPGRADED:
|
||||
updateIndexSettings(index, Settings.builder()
|
||||
.put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
|
||||
.put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2)
|
||||
.put("index.routing.allocation.include._id", (String)null)
|
||||
);
|
||||
asyncIndexDocs(index, 60, 50).get();
|
||||
|
@ -271,9 +233,10 @@ public class RecoveryIT extends ESRestTestCase {
|
|||
|
||||
assertCount(index, "_only_nodes:" + nodes.get(0), 110);
|
||||
assertCount(index, "_only_nodes:" + nodes.get(1), 110);
|
||||
assertCount(index, "_only_nodes:" + nodes.get(2), 110);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalStateException("unknown type " + clusterType);
|
||||
throw new IllegalStateException("unknown type " + CLUSTER_TYPE);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -60,4 +60,3 @@ public class UpgradeClusterClientYamlTestSuiteIT extends ESClientYamlSuiteTestCa
|
|||
.build();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,74 +1,8 @@
|
|||
---
|
||||
"Index data and search on the mixed cluster":
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: yellow
|
||||
wait_for_nodes: 2
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: test_index
|
||||
|
||||
- match: { hits.total: 5 } # no new indexed data, so expect the original 5 documents from the old cluster
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: index_with_replicas
|
||||
|
||||
- match: { hits.total: 5 } # just check we recovered fine
|
||||
|
||||
- do:
|
||||
bulk:
|
||||
refresh: true
|
||||
body:
|
||||
- '{"index": {"_index": "test_index", "_type": "doc"}}'
|
||||
- '{"f1": "v1_mixed", "f2": 5}'
|
||||
- '{"index": {"_index": "test_index", "_type": "doc"}}'
|
||||
- '{"f1": "v2_mixed", "f2": 6}'
|
||||
- '{"index": {"_index": "test_index", "_type": "doc"}}'
|
||||
- '{"f1": "v3_mixed", "f2": 7}'
|
||||
- '{"index": {"_index": "test_index", "_type": "doc"}}'
|
||||
- '{"f1": "v4_mixed", "f2": 8}'
|
||||
- '{"index": {"_index": "test_index", "_type": "doc"}}'
|
||||
- '{"f1": "v5_mixed", "f2": 9}'
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: test_index
|
||||
type: doc
|
||||
id: d10
|
||||
body: {"f1": "v6_mixed", "f2": 10}
|
||||
|
||||
- do:
|
||||
indices.refresh:
|
||||
index: test_index
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: test_index
|
||||
|
||||
- match: { hits.total: 11 } # 5 docs from old cluster, 6 docs from mixed cluster
|
||||
|
||||
- do:
|
||||
delete:
|
||||
index: test_index
|
||||
type: doc
|
||||
id: d10
|
||||
|
||||
- do:
|
||||
indices.refresh:
|
||||
index: test_index
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: test_index
|
||||
|
||||
- match: { hits.total: 10 }
|
||||
|
||||
---
|
||||
"Verify that we can still find things with the template":
|
||||
- do:
|
||||
search_template:
|
||||
index: test_search_template
|
||||
body:
|
||||
id: test_search_template
|
||||
params:
|
||||
|
|
|
@ -1,76 +1,5 @@
|
|||
---
|
||||
"Index data, search, and create things in the cluster state that we'll validate are there after the ugprade":
|
||||
- do:
|
||||
indices.create:
|
||||
index: test_index
|
||||
body:
|
||||
settings:
|
||||
index:
|
||||
number_of_replicas: 0
|
||||
- do:
|
||||
indices.create:
|
||||
index: index_with_replicas # dummy index to ensure we can recover indices with replicas just fine
|
||||
body:
|
||||
# if the node with the replica is the first to be restarted, then delayed
|
||||
# allocation will kick in, and the cluster health won't return to GREEN
|
||||
# before timing out
|
||||
index.unassigned.node_left.delayed_timeout: "100ms"
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: empty_index # index to ensure we can recover empty indices
|
||||
body:
|
||||
# if the node with the replica is the first to be restarted, then delayed
|
||||
# allocation will kick in, and the cluster health won't return to GREEN
|
||||
# before timing out
|
||||
index.unassigned.node_left.delayed_timeout: "100ms"
|
||||
|
||||
- do:
|
||||
bulk:
|
||||
refresh: true
|
||||
body:
|
||||
- '{"index": {"_index": "test_index", "_type": "doc"}}'
|
||||
- '{"f1": "v1_old", "f2": 0}'
|
||||
- '{"index": {"_index": "test_index", "_type": "doc"}}'
|
||||
- '{"f1": "v2_old", "f2": 1}'
|
||||
- '{"index": {"_index": "test_index", "_type": "doc"}}'
|
||||
- '{"f1": "v3_old", "f2": 2}'
|
||||
- '{"index": {"_index": "test_index", "_type": "doc"}}'
|
||||
- '{"f1": "v4_old", "f2": 3}'
|
||||
- '{"index": {"_index": "test_index", "_type": "doc"}}'
|
||||
- '{"f1": "v5_old", "f2": 4}'
|
||||
|
||||
- do:
|
||||
bulk:
|
||||
refresh: true
|
||||
body:
|
||||
- '{"index": {"_index": "index_with_replicas", "_type": "doc"}}'
|
||||
- '{"f1": "d_old"}'
|
||||
- '{"index": {"_index": "index_with_replicas", "_type": "doc"}}'
|
||||
- '{"f1": "d_old"}'
|
||||
- '{"index": {"_index": "index_with_replicas", "_type": "doc"}}'
|
||||
- '{"f1": "d_old"}'
|
||||
- '{"index": {"_index": "index_with_replicas", "_type": "doc"}}'
|
||||
- '{"f1": "d_old"}'
|
||||
- '{"index": {"_index": "index_with_replicas", "_type": "doc"}}'
|
||||
- '{"f1": "d_old"}'
|
||||
|
||||
- do:
|
||||
indices.refresh:
|
||||
index: test_index,index_with_replicas
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: test_index
|
||||
|
||||
- match: { hits.total: 5 }
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: index_with_replicas
|
||||
|
||||
- match: { hits.total: 5 }
|
||||
|
||||
"Create things in the cluster state that we'll validate are there after the ugprade":
|
||||
- do:
|
||||
snapshot.create_repository:
|
||||
repository: my_repo
|
||||
|
@ -91,6 +20,21 @@
|
|||
}
|
||||
- match: { "acknowledged": true }
|
||||
|
||||
- do:
|
||||
bulk:
|
||||
refresh: true
|
||||
body:
|
||||
- '{"index": {"_index": "test_search_template", "_type": "doc"}}'
|
||||
- '{"f1": "v1_old"}'
|
||||
- '{"index": {"_index": "test_search_template", "_type": "doc"}}'
|
||||
- '{"f1": "v2_old"}'
|
||||
- '{"index": {"_index": "test_search_template", "_type": "doc"}}'
|
||||
- '{"f1": "v3_old"}'
|
||||
- '{"index": {"_index": "test_search_template", "_type": "doc"}}'
|
||||
- '{"f1": "v4_old"}'
|
||||
- '{"index": {"_index": "test_search_template", "_type": "doc"}}'
|
||||
- '{"f1": "v5_old"}'
|
||||
|
||||
- do:
|
||||
put_script:
|
||||
id: test_search_template
|
||||
|
@ -105,6 +49,7 @@
|
|||
|
||||
- do:
|
||||
search_template:
|
||||
index: test_search_template
|
||||
body:
|
||||
id: test_search_template
|
||||
params:
|
||||
|
|
|
@ -1,55 +1,8 @@
|
|||
---
|
||||
"Index data and search on the upgraded cluster":
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: green
|
||||
wait_for_nodes: 2
|
||||
# wait for long enough that we give delayed unassigned shards to stop being delayed
|
||||
timeout: 70s
|
||||
level: shards
|
||||
index: test_index,index_with_replicas,empty_index
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: test_index
|
||||
|
||||
- match: { hits.total: 10 } # no new indexed data, so expect the original 10 documents from the old and mixed clusters
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: index_with_replicas
|
||||
|
||||
- match: { hits.total: 5 } # just check we recovered fine
|
||||
|
||||
- do:
|
||||
bulk:
|
||||
refresh: true
|
||||
body:
|
||||
- '{"index": {"_index": "test_index", "_type": "doc"}}'
|
||||
- '{"f1": "v1_upgraded", "f2": 10}'
|
||||
- '{"index": {"_index": "test_index", "_type": "doc"}}'
|
||||
- '{"f1": "v2_upgraded", "f2": 11}'
|
||||
- '{"index": {"_index": "test_index", "_type": "doc"}}'
|
||||
- '{"f1": "v3_upgraded", "f2": 12}'
|
||||
- '{"index": {"_index": "test_index", "_type": "doc"}}'
|
||||
- '{"f1": "v4_upgraded", "f2": 13}'
|
||||
- '{"index": {"_index": "test_index", "_type": "doc"}}'
|
||||
- '{"f1": "v5_upgraded", "f2": 14}'
|
||||
|
||||
- do:
|
||||
indices.refresh:
|
||||
index: test_index
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: test_index
|
||||
|
||||
- match: { hits.total: 15 } # 10 docs from previous clusters plus 5 new docs
|
||||
|
||||
---
|
||||
"Verify that we can still find things with the template":
|
||||
- do:
|
||||
search_template:
|
||||
index: test_search_template
|
||||
body:
|
||||
id: test_search_template
|
||||
params:
|
||||
|
|
|
@ -22,6 +22,9 @@ package org.elasticsearch.action.admin.cluster.repositories.delete;
|
|||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -30,6 +33,13 @@ import java.io.IOException;
|
|||
*/
|
||||
public class DeleteRepositoryResponse extends AcknowledgedResponse {
|
||||
|
||||
private static final ConstructingObjectParser<DeleteRepositoryResponse, Void> PARSER =
|
||||
new ConstructingObjectParser<>("delete_repository", true, args -> new DeleteRepositoryResponse((boolean) args[0]));
|
||||
|
||||
static {
|
||||
declareAcknowledgedField(PARSER);
|
||||
}
|
||||
|
||||
DeleteRepositoryResponse() {
|
||||
}
|
||||
|
||||
|
@ -49,4 +59,7 @@ public class DeleteRepositoryResponse extends AcknowledgedResponse {
|
|||
writeAcknowledged(out);
|
||||
}
|
||||
|
||||
public static DeleteRepositoryResponse fromXContent(XContentParser parser) {
|
||||
return PARSER.apply(parser, null);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -449,6 +449,10 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp
|
|||
// index
|
||||
for (Map.Entry<String, CompletionInputMetaData> completionInput : inputMap.entrySet()) {
|
||||
String input = completionInput.getKey();
|
||||
if (input.trim().isEmpty()) {
|
||||
context.addIgnoredField(fieldType.name());
|
||||
continue;
|
||||
}
|
||||
// truncate input
|
||||
if (input.length() > maxInputLength) {
|
||||
int len = Math.min(maxInputLength, input.length());
|
||||
|
|
|
@ -49,7 +49,6 @@ public class RestDeleteRepositoryAction extends BaseRestHandler {
|
|||
@Override
|
||||
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
|
||||
DeleteRepositoryRequest deleteRepositoryRequest = deleteRepositoryRequest(request.param("repository"));
|
||||
deleteRepositoryRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteRepositoryRequest.masterNodeTimeout()));
|
||||
deleteRepositoryRequest.timeout(request.paramAsTime("timeout", deleteRepositoryRequest.timeout()));
|
||||
deleteRepositoryRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteRepositoryRequest.masterNodeTimeout()));
|
||||
return channel -> client.admin().cluster().deleteRepository(deleteRepositoryRequest, new RestToXContentListener<>(channel));
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.rest.action.admin.indices;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.indices.shrink.ResizeRequest;
|
||||
import org.elasticsearch.action.admin.indices.shrink.ResizeType;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
|
@ -47,6 +48,8 @@ public abstract class RestResizeHandler extends BaseRestHandler {
|
|||
public final RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
|
||||
final ResizeRequest resizeRequest = new ResizeRequest(request.param("target"), request.param("index"));
|
||||
resizeRequest.setResizeType(getResizeType());
|
||||
// copy_settings should be removed in Elasticsearch 8.0.0; cf. https://github.com/elastic/elasticsearch/issues/28347
|
||||
assert Version.CURRENT.major < 8;
|
||||
final String rawCopySettings = request.param("copy_settings");
|
||||
final Boolean copySettings;
|
||||
if (rawCopySettings == null) {
|
||||
|
|
|
@ -107,6 +107,15 @@ public interface TcpChannel extends Releasable {
|
|||
*/
|
||||
void sendMessage(BytesReference reference, ActionListener<Void> listener);
|
||||
|
||||
/**
|
||||
* Closes the channel without blocking.
|
||||
*
|
||||
* @param channel to close
|
||||
*/
|
||||
static <C extends TcpChannel> void closeChannel(C channel) {
|
||||
closeChannel(channel, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Closes the channel.
|
||||
*
|
||||
|
|
|
@ -983,7 +983,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
|||
protected void onException(TcpChannel channel, Exception e) {
|
||||
if (!lifecycle.started()) {
|
||||
// just close and ignore - we are already stopped and just need to make sure we release all resources
|
||||
TcpChannel.closeChannel(channel, false);
|
||||
TcpChannel.closeChannel(channel);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -991,20 +991,20 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
|||
logger.trace(() -> new ParameterizedMessage(
|
||||
"close connection exception caught on transport layer [{}], disconnecting from relevant node", channel), e);
|
||||
// close the channel, which will cause a node to be disconnected if relevant
|
||||
TcpChannel.closeChannel(channel, false);
|
||||
TcpChannel.closeChannel(channel);
|
||||
} else if (isConnectException(e)) {
|
||||
logger.trace(() -> new ParameterizedMessage("connect exception caught on transport layer [{}]", channel), e);
|
||||
// close the channel as safe measure, which will cause a node to be disconnected if relevant
|
||||
TcpChannel.closeChannel(channel, false);
|
||||
TcpChannel.closeChannel(channel);
|
||||
} else if (e instanceof BindException) {
|
||||
logger.trace(() -> new ParameterizedMessage("bind exception caught on transport layer [{}]", channel), e);
|
||||
// close the channel as safe measure, which will cause a node to be disconnected if relevant
|
||||
TcpChannel.closeChannel(channel, false);
|
||||
TcpChannel.closeChannel(channel);
|
||||
} else if (e instanceof CancelledKeyException) {
|
||||
logger.trace(() -> new ParameterizedMessage(
|
||||
"cancelled key exception caught on transport layer [{}], disconnecting from relevant node", channel), e);
|
||||
// close the channel as safe measure, which will cause a node to be disconnected if relevant
|
||||
TcpChannel.closeChannel(channel, false);
|
||||
TcpChannel.closeChannel(channel);
|
||||
} else if (e instanceof TcpTransport.HttpOnTransportException) {
|
||||
// in case we are able to return data, serialize the exception content and sent it back to the client
|
||||
if (channel.isOpen()) {
|
||||
|
@ -1012,13 +1012,13 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
|||
final SendMetricListener closeChannel = new SendMetricListener(message.length()) {
|
||||
@Override
|
||||
protected void innerInnerOnResponse(Void v) {
|
||||
TcpChannel.closeChannel(channel, false);
|
||||
TcpChannel.closeChannel(channel);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void innerOnFailure(Exception e) {
|
||||
logger.debug("failed to send message to httpOnTransport channel", e);
|
||||
TcpChannel.closeChannel(channel, false);
|
||||
TcpChannel.closeChannel(channel);
|
||||
}
|
||||
};
|
||||
internalSendMessage(channel, message, closeChannel);
|
||||
|
@ -1026,10 +1026,20 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
|||
} else {
|
||||
logger.warn(() -> new ParameterizedMessage("exception caught on transport layer [{}], closing connection", channel), e);
|
||||
// close the channel, which will cause a node to be disconnected if relevant
|
||||
TcpChannel.closeChannel(channel, false);
|
||||
TcpChannel.closeChannel(channel);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Exception handler for exceptions that are not associated with a specific channel.
|
||||
*
|
||||
* @param exception the exception
|
||||
*/
|
||||
protected void onNonChannelException(Exception exception) {
|
||||
logger.warn(new ParameterizedMessage("exception caught on transport layer [thread={}]", Thread.currentThread().getName()),
|
||||
exception);
|
||||
}
|
||||
|
||||
protected void serverAcceptedChannel(TcpChannel channel) {
|
||||
boolean addedOnThisCall = acceptedChannels.add(channel);
|
||||
assert addedOnThisCall : "Channel should only be added to accept channel set once";
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.admin.cluster.repositories.delete;
|
||||
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.test.AbstractStreamableXContentTestCase;
|
||||
|
||||
public class DeleteRepositoryResponseTests extends AbstractStreamableXContentTestCase<DeleteRepositoryResponse> {
|
||||
|
||||
@Override
|
||||
protected DeleteRepositoryResponse doParseInstance(XContentParser parser) {
|
||||
return DeleteRepositoryResponse.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected DeleteRepositoryResponse createBlankInstance() {
|
||||
return new DeleteRepositoryResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected DeleteRepositoryResponse createTestInstance() {
|
||||
return new DeleteRepositoryResponse(randomBoolean());
|
||||
}
|
||||
}
|
|
@ -154,6 +154,7 @@ public class QueueResizingEsThreadPoolExecutorTests extends ESTestCase {
|
|||
context.close();
|
||||
}
|
||||
|
||||
@TestLogging("org.elasticsearch.common.util.concurrent:DEBUG")
|
||||
public void testAutoQueueSizingWithMax() throws Exception {
|
||||
ThreadContext context = new ThreadContext(Settings.EMPTY);
|
||||
ResizableBlockingQueue<Runnable> queue =
|
||||
|
|
|
@ -397,6 +397,19 @@ public class CompletionFieldMapperTests extends ESSingleNodeTestCase {
|
|||
assertThat(cause, instanceOf(IllegalArgumentException.class));
|
||||
assertThat(cause.getMessage(), containsString("[0x1e]"));
|
||||
}
|
||||
|
||||
// empty inputs are ignored
|
||||
ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type1", "1", BytesReference
|
||||
.bytes(XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.array("completion", " ", "")
|
||||
.endObject()),
|
||||
XContentType.JSON));
|
||||
assertThat(doc.docs().size(), equalTo(1));
|
||||
assertNull(doc.docs().get(0).get("completion"));
|
||||
assertNotNull(doc.docs().get(0).getField("_ignored"));
|
||||
IndexableField ignoredFields = doc.docs().get(0).getField("_ignored");
|
||||
assertThat(ignoredFields.stringValue(), equalTo("completion"));
|
||||
}
|
||||
|
||||
public void testPrefixQueryType() throws Exception {
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.transport.nio;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
|
@ -52,6 +53,7 @@ import java.nio.ByteBuffer;
|
|||
import java.nio.channels.ServerSocketChannel;
|
||||
import java.nio.channels.SocketChannel;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap;
|
||||
|
@ -96,9 +98,10 @@ public class MockNioTransport extends TcpTransport {
|
|||
if (useNetworkServer) {
|
||||
acceptorCount = 1;
|
||||
}
|
||||
nioGroup = new NioGroup(logger, daemonThreadFactory(this.settings, TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX), acceptorCount,
|
||||
AcceptorEventHandler::new, daemonThreadFactory(this.settings, TRANSPORT_WORKER_THREAD_NAME_PREFIX),
|
||||
2, TestingSocketEventHandler::new);
|
||||
nioGroup = new NioGroup(daemonThreadFactory(this.settings, TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX), acceptorCount,
|
||||
(s) -> new AcceptorEventHandler(s, this::onNonChannelException),
|
||||
daemonThreadFactory(this.settings, TRANSPORT_WORKER_THREAD_NAME_PREFIX), 2,
|
||||
() -> new TestingSocketEventHandler(this::onNonChannelException));
|
||||
|
||||
ProfileSettings clientProfileSettings = new ProfileSettings(settings, "default");
|
||||
clientChannelFactory = new MockTcpChannelFactory(clientProfileSettings, "client");
|
||||
|
@ -172,8 +175,10 @@ public class MockNioTransport extends TcpTransport {
|
|||
@Override
|
||||
public MockServerChannel createServerChannel(AcceptingSelector selector, ServerSocketChannel channel) throws IOException {
|
||||
MockServerChannel nioServerChannel = new MockServerChannel(profileName, channel, this, selector);
|
||||
Consumer<Exception> exceptionHandler = (e) -> logger.error(() ->
|
||||
new ParameterizedMessage("exception from server channel caught on transport layer [{}]", channel), e);
|
||||
ServerChannelContext context = new ServerChannelContext(nioServerChannel, this, selector, MockNioTransport.this::acceptChannel,
|
||||
(e) -> {});
|
||||
exceptionHandler);
|
||||
nioServerChannel.setContext(context);
|
||||
return nioServerChannel;
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.transport.nio;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.nio.SocketChannelContext;
|
||||
import org.elasticsearch.nio.SocketEventHandler;
|
||||
|
||||
|
@ -27,15 +26,16 @@ import java.io.IOException;
|
|||
import java.util.Collections;
|
||||
import java.util.Set;
|
||||
import java.util.WeakHashMap;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
public class TestingSocketEventHandler extends SocketEventHandler {
|
||||
|
||||
public TestingSocketEventHandler(Logger logger) {
|
||||
super(logger);
|
||||
}
|
||||
|
||||
private Set<SocketChannelContext> hasConnectedMap = Collections.newSetFromMap(new WeakHashMap<>());
|
||||
|
||||
public TestingSocketEventHandler(Consumer<Exception> exceptionHandler) {
|
||||
super(exceptionHandler);
|
||||
}
|
||||
|
||||
public void handleConnect(SocketChannelContext context) throws IOException {
|
||||
assert hasConnectedMap.contains(context) == false : "handleConnect should only be called is a channel is not yet connected";
|
||||
super.handleConnect(context);
|
||||
|
|
|
@ -9,6 +9,7 @@ import org.elasticsearch.ResourceAlreadyExistsException;
|
|||
import org.elasticsearch.ResourceNotFoundException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.AbstractDiffable;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.Diff;
|
||||
import org.elasticsearch.cluster.DiffableUtils;
|
||||
import org.elasticsearch.cluster.NamedDiff;
|
||||
|
@ -467,6 +468,14 @@ public class MlMetadata implements MetaData.Custom {
|
|||
}
|
||||
}
|
||||
|
||||
public static MlMetadata getMlMetadata(ClusterState state) {
|
||||
MlMetadata mlMetadata = (state == null) ? null : state.getMetaData().custom(MLMetadataField.TYPE);
|
||||
if (mlMetadata == null) {
|
||||
return EMPTY_METADATA;
|
||||
}
|
||||
return mlMetadata;
|
||||
}
|
||||
|
||||
public static class JobAlreadyMarkedAsDeletedException extends RuntimeException {
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,7 +6,6 @@
|
|||
package org.elasticsearch.xpack.core.ml.job.persistence;
|
||||
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
|
||||
/**
|
||||
|
@ -47,8 +46,7 @@ public final class AnomalyDetectorsIndex {
|
|||
* @return The index name
|
||||
*/
|
||||
public static String getPhysicalIndexFromState(ClusterState state, String jobId) {
|
||||
MlMetadata meta = state.getMetaData().custom(MLMetadataField.TYPE);
|
||||
return meta.getJobs().get(jobId).getResultsIndexName();
|
||||
return MlMetadata.getMlMetadata(state).getJobs().get(jobId).getResultsIndexName();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -111,7 +111,7 @@ public class SecurityNetty4Transport extends Netty4Transport {
|
|||
protected void onException(TcpChannel channel, Exception e) {
|
||||
if (!lifecycle.started()) {
|
||||
// just close and ignore - we are already stopped and just need to make sure we release all resources
|
||||
TcpChannel.closeChannel(channel, false);
|
||||
TcpChannel.closeChannel(channel);
|
||||
} else if (SSLExceptionHelper.isNotSslRecordException(e)) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace(
|
||||
|
@ -119,21 +119,21 @@ public class SecurityNetty4Transport extends Netty4Transport {
|
|||
} else {
|
||||
logger.warn("received plaintext traffic on an encrypted channel, closing connection {}", channel);
|
||||
}
|
||||
TcpChannel.closeChannel(channel, false);
|
||||
TcpChannel.closeChannel(channel);
|
||||
} else if (SSLExceptionHelper.isCloseDuringHandshakeException(e)) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace(new ParameterizedMessage("connection {} closed during ssl handshake", channel), e);
|
||||
} else {
|
||||
logger.warn("connection {} closed during handshake", channel);
|
||||
}
|
||||
TcpChannel.closeChannel(channel, false);
|
||||
TcpChannel.closeChannel(channel);
|
||||
} else if (SSLExceptionHelper.isReceivedCertificateUnknownException(e)) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace(new ParameterizedMessage("client did not trust server's certificate, closing connection {}", channel), e);
|
||||
} else {
|
||||
logger.warn("client did not trust this server's certificate, closing connection {}", channel);
|
||||
}
|
||||
TcpChannel.closeChannel(channel, false);
|
||||
TcpChannel.closeChannel(channel);
|
||||
} else {
|
||||
super.onException(channel, e);
|
||||
}
|
||||
|
|
|
@ -23,7 +23,6 @@ import org.elasticsearch.xpack.core.XPackFeatureSet;
|
|||
import org.elasticsearch.xpack.core.XPackPlugin;
|
||||
import org.elasticsearch.xpack.core.XPackSettings;
|
||||
import org.elasticsearch.xpack.core.XPackField;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MachineLearningFeatureSetUsage;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.core.ml.action.GetDatafeedsStatsAction;
|
||||
|
@ -132,15 +131,7 @@ public class MachineLearningFeatureSet implements XPackFeatureSet {
|
|||
@Override
|
||||
public void usage(ActionListener<XPackFeatureSet.Usage> listener) {
|
||||
ClusterState state = clusterService.state();
|
||||
MlMetadata mlMetadata = state.getMetaData().custom(MLMetadataField.TYPE);
|
||||
|
||||
// Handle case when usage is called but MlMetadata has not been installed yet
|
||||
if (mlMetadata == null) {
|
||||
listener.onResponse(new MachineLearningFeatureSetUsage(available(), enabled,
|
||||
Collections.emptyMap(), Collections.emptyMap()));
|
||||
} else {
|
||||
new Retriever(client, mlMetadata, available(), enabled()).execute(listener);
|
||||
}
|
||||
new Retriever(client, MlMetadata.getMlMetadata(state), available(), enabled()).execute(listener);
|
||||
}
|
||||
|
||||
public static class Retriever {
|
||||
|
|
|
@ -13,7 +13,6 @@ import org.elasticsearch.cluster.service.ClusterService;
|
|||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.core.ml.action.OpenJobAction;
|
||||
import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction;
|
||||
|
@ -90,8 +89,7 @@ public class MlAssignmentNotifier extends AbstractComponent implements ClusterSt
|
|||
}
|
||||
} else if (StartDatafeedAction.TASK_NAME.equals(currentTask.getTaskName())) {
|
||||
String datafeedId = ((StartDatafeedAction.DatafeedParams) currentTask.getParams()).getDatafeedId();
|
||||
MlMetadata mlMetadata = event.state().getMetaData().custom(MLMetadataField.TYPE);
|
||||
DatafeedConfig datafeedConfig = mlMetadata.getDatafeed(datafeedId);
|
||||
DatafeedConfig datafeedConfig = MlMetadata.getMlMetadata(event.state()).getDatafeed(datafeedId);
|
||||
if (currentAssignment.getExecutorNode() == null) {
|
||||
String msg = "No node found to start datafeed [" + datafeedId +"]. Reasons [" +
|
||||
currentAssignment.getExplanation() + "]";
|
||||
|
|
|
@ -7,20 +7,13 @@ package org.elasticsearch.xpack.ml;
|
|||
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateListener;
|
||||
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.component.LifecycleListener;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.gateway.GatewayService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
class MlInitializationService extends AbstractComponent implements ClusterStateListener {
|
||||
|
||||
|
@ -28,8 +21,6 @@ class MlInitializationService extends AbstractComponent implements ClusterStateL
|
|||
private final ClusterService clusterService;
|
||||
private final Client client;
|
||||
|
||||
private final AtomicBoolean installMlMetadataCheck = new AtomicBoolean(false);
|
||||
|
||||
private volatile MlDailyMaintenanceService mlDailyMaintenanceService;
|
||||
|
||||
MlInitializationService(Settings settings, ThreadPool threadPool, ClusterService clusterService, Client client) {
|
||||
|
@ -48,45 +39,12 @@ class MlInitializationService extends AbstractComponent implements ClusterStateL
|
|||
}
|
||||
|
||||
if (event.localNodeMaster()) {
|
||||
MetaData metaData = event.state().metaData();
|
||||
installMlMetadata(metaData);
|
||||
installDailyMaintenanceService();
|
||||
} else {
|
||||
uninstallDailyMaintenanceService();
|
||||
}
|
||||
}
|
||||
|
||||
private void installMlMetadata(MetaData metaData) {
|
||||
if (metaData.custom(MLMetadataField.TYPE) == null) {
|
||||
if (installMlMetadataCheck.compareAndSet(false, true)) {
|
||||
threadPool.executor(ThreadPool.Names.GENERIC).execute(() ->
|
||||
clusterService.submitStateUpdateTask("install-ml-metadata", new ClusterStateUpdateTask() {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
// If the metadata has been added already don't try to update
|
||||
if (currentState.metaData().custom(MLMetadataField.TYPE) != null) {
|
||||
return currentState;
|
||||
}
|
||||
ClusterState.Builder builder = new ClusterState.Builder(currentState);
|
||||
MetaData.Builder metadataBuilder = MetaData.builder(currentState.metaData());
|
||||
metadataBuilder.putCustom(MLMetadataField.TYPE, MlMetadata.EMPTY_METADATA);
|
||||
builder.metaData(metadataBuilder.build());
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
installMlMetadataCheck.set(false);
|
||||
logger.error("unable to install ml metadata", e);
|
||||
}
|
||||
})
|
||||
);
|
||||
}
|
||||
} else {
|
||||
installMlMetadataCheck.set(false);
|
||||
}
|
||||
}
|
||||
|
||||
private void installDailyMaintenanceService() {
|
||||
if (mlDailyMaintenanceService == null) {
|
||||
mlDailyMaintenanceService = new MlDailyMaintenanceService(clusterService.getClusterName(), threadPool, client);
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.elasticsearch.discovery.MasterNotDiscoveredException;
|
|||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.core.ml.action.CloseJobAction;
|
||||
import org.elasticsearch.xpack.core.ml.action.FinalizeJobExecutionAction;
|
||||
|
@ -92,8 +91,7 @@ public class TransportCloseJobAction extends TransportTasksAction<TransportOpenJ
|
|||
static void resolveAndValidateJobId(CloseJobAction.Request request, ClusterState state, List<String> openJobIds,
|
||||
List<String> closingJobIds) {
|
||||
PersistentTasksCustomMetaData tasksMetaData = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE);
|
||||
MlMetadata maybeNull = state.metaData().custom(MLMetadataField.TYPE);
|
||||
final MlMetadata mlMetadata = (maybeNull == null) ? MlMetadata.EMPTY_METADATA : maybeNull;
|
||||
final MlMetadata mlMetadata = MlMetadata.getMlMetadata(state);
|
||||
|
||||
List<String> failedJobs = new ArrayList<>();
|
||||
|
||||
|
@ -107,7 +105,7 @@ public class TransportCloseJobAction extends TransportTasksAction<TransportOpenJ
|
|||
};
|
||||
|
||||
Set<String> expandedJobIds = mlMetadata.expandJobIds(request.getJobId(), request.allowNoJobs());
|
||||
expandedJobIds.stream().forEach(jobIdProcessor::accept);
|
||||
expandedJobIds.forEach(jobIdProcessor::accept);
|
||||
if (request.isForce() == false && failedJobs.size() > 0) {
|
||||
if (expandedJobIds.size() == 1) {
|
||||
throw ExceptionsHelper.conflictStatusException("cannot close job [{}] because it failed, use force close",
|
||||
|
|
|
@ -119,8 +119,8 @@ public class TransportDeleteDatafeedAction extends TransportMasterNodeAction<Del
|
|||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
MlMetadata currentMetadata = currentState.getMetaData().custom(MLMetadataField.TYPE);
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
MlMetadata currentMetadata = MlMetadata.getMlMetadata(currentState);
|
||||
PersistentTasksCustomMetaData persistentTasks =
|
||||
currentState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE);
|
||||
MlMetadata newMetadata = new MlMetadata.Builder(currentMetadata)
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.elasticsearch.rest.RestStatus;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.core.ml.action.DeleteFilterAction;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetaIndex;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.core.ml.job.config.Detector;
|
||||
|
@ -60,8 +59,7 @@ public class TransportDeleteFilterAction extends HandledTransportAction<DeleteFi
|
|||
|
||||
final String filterId = request.getFilterId();
|
||||
ClusterState state = clusterService.state();
|
||||
MlMetadata currentMlMetadata = state.metaData().custom(MLMetadataField.TYPE);
|
||||
Map<String, Job> jobs = currentMlMetadata.getJobs();
|
||||
Map<String, Job> jobs = MlMetadata.getMlMetadata(state).getJobs();
|
||||
List<String> currentlyUsedBy = new ArrayList<>();
|
||||
for (Job job : jobs.values()) {
|
||||
List<Detector> detectors = job.getAnalysisConfig().getDetectors();
|
||||
|
|
|
@ -200,10 +200,9 @@ public class TransportDeleteJobAction extends TransportMasterNodeAction<DeleteJo
|
|||
void markJobAsDeleting(String jobId, ActionListener<Boolean> listener, boolean force) {
|
||||
clusterService.submitStateUpdateTask("mark-job-as-deleted", new ClusterStateUpdateTask() {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
MlMetadata currentMlMetadata = currentState.metaData().custom(MLMetadataField.TYPE);
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
PersistentTasksCustomMetaData tasks = currentState.metaData().custom(PersistentTasksCustomMetaData.TYPE);
|
||||
MlMetadata.Builder builder = new MlMetadata.Builder(currentMlMetadata);
|
||||
MlMetadata.Builder builder = new MlMetadata.Builder(MlMetadata.getMlMetadata(currentState));
|
||||
builder.markJobAsDeleted(jobId, tasks, force);
|
||||
return buildNewClusterState(currentState, builder);
|
||||
}
|
||||
|
@ -248,11 +247,7 @@ public class TransportDeleteJobAction extends TransportMasterNodeAction<DeleteJo
|
|||
}
|
||||
|
||||
static boolean jobIsDeletedFromState(String jobId, ClusterState clusterState) {
|
||||
MlMetadata metadata = clusterState.metaData().custom(MLMetadataField.TYPE);
|
||||
if (metadata == null) {
|
||||
return true;
|
||||
}
|
||||
return !metadata.getJobs().containsKey(jobId);
|
||||
return !MlMetadata.getMlMetadata(clusterState).getJobs().containsKey(jobId);
|
||||
}
|
||||
|
||||
private static ClusterState buildNewClusterState(ClusterState currentState, MlMetadata.Builder builder) {
|
||||
|
|
|
@ -56,8 +56,8 @@ public class TransportFinalizeJobExecutionAction extends TransportMasterNodeActi
|
|||
logger.debug("finalizing jobs [{}]", jobIdString);
|
||||
clusterService.submitStateUpdateTask(source, new ClusterStateUpdateTask() {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
MlMetadata mlMetadata = currentState.metaData().custom(MLMetadataField.TYPE);
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
MlMetadata mlMetadata = MlMetadata.getMlMetadata(currentState);
|
||||
MlMetadata.Builder mlMetadataBuilder = new MlMetadata.Builder(mlMetadata);
|
||||
Date finishedTime = new Date();
|
||||
|
||||
|
|
|
@ -15,7 +15,6 @@ import org.elasticsearch.common.inject.Inject;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.core.ml.action.GetCalendarEventsAction;
|
||||
import org.elasticsearch.xpack.core.ml.action.GetCalendarsAction;
|
||||
|
@ -70,7 +69,7 @@ public class TransportGetCalendarEventsAction extends HandledTransportAction<Get
|
|||
|
||||
if (request.getJobId() != null) {
|
||||
ClusterState state = clusterService.state();
|
||||
MlMetadata currentMlMetadata = state.metaData().custom(MLMetadataField.TYPE);
|
||||
MlMetadata currentMlMetadata = MlMetadata.getMlMetadata(state);
|
||||
|
||||
List<String> jobGroups;
|
||||
String requestId = request.getJobId();
|
||||
|
|
|
@ -18,7 +18,6 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.core.ml.action.GetDatafeedsAction;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.core.ml.action.util.QueryPage;
|
||||
import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig;
|
||||
|
@ -52,10 +51,7 @@ public class TransportGetDatafeedsAction extends TransportMasterNodeReadAction<G
|
|||
ActionListener<GetDatafeedsAction.Response> listener) throws Exception {
|
||||
logger.debug("Get datafeed '{}'", request.getDatafeedId());
|
||||
|
||||
MlMetadata mlMetadata = state.metaData().custom(MLMetadataField.TYPE);
|
||||
if (mlMetadata == null) {
|
||||
mlMetadata = MlMetadata.EMPTY_METADATA;
|
||||
}
|
||||
MlMetadata mlMetadata = MlMetadata.getMlMetadata(state);
|
||||
Set<String> expandedDatafeedIds = mlMetadata.expandDatafeedIds(request.getDatafeedId(), request.allowNoDatafeeds());
|
||||
List<DatafeedConfig> datafeedConfigs = new ArrayList<>();
|
||||
for (String expandedDatafeedId : expandedDatafeedIds) {
|
||||
|
|
|
@ -18,7 +18,6 @@ import org.elasticsearch.common.inject.Inject;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.core.ml.action.GetDatafeedsStatsAction;
|
||||
import org.elasticsearch.xpack.core.ml.action.util.QueryPage;
|
||||
|
@ -56,11 +55,7 @@ public class TransportGetDatafeedsStatsAction extends TransportMasterNodeReadAct
|
|||
ActionListener<GetDatafeedsStatsAction.Response> listener) throws Exception {
|
||||
logger.debug("Get stats for datafeed '{}'", request.getDatafeedId());
|
||||
|
||||
MlMetadata mlMetadata = state.metaData().custom(MLMetadataField.TYPE);
|
||||
if (mlMetadata == null) {
|
||||
mlMetadata = MlMetadata.EMPTY_METADATA;
|
||||
}
|
||||
|
||||
MlMetadata mlMetadata = MlMetadata.getMlMetadata(state);
|
||||
Set<String> expandedDatafeedIds = mlMetadata.expandDatafeedIds(request.getDatafeedId(), request.allowNoDatafeeds());
|
||||
|
||||
PersistentTasksCustomMetaData tasksInProgress = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE);
|
||||
|
|
|
@ -23,7 +23,6 @@ import org.elasticsearch.common.util.concurrent.AtomicArray;
|
|||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction;
|
||||
import org.elasticsearch.xpack.core.ml.action.util.QueryPage;
|
||||
|
@ -69,8 +68,7 @@ public class TransportGetJobsStatsAction extends TransportTasksAction<TransportO
|
|||
|
||||
@Override
|
||||
protected void doExecute(Task task, GetJobsStatsAction.Request request, ActionListener<GetJobsStatsAction.Response> listener) {
|
||||
MlMetadata clusterMlMetadata = clusterService.state().metaData().custom(MLMetadataField.TYPE);
|
||||
MlMetadata mlMetadata = (clusterMlMetadata == null) ? MlMetadata.EMPTY_METADATA : clusterMlMetadata;
|
||||
MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterService.state());
|
||||
request.setExpandedJobsIds(new ArrayList<>(mlMetadata.expandJobIds(request.getJobId(), request.allowNoJobs())));
|
||||
ActionListener<GetJobsStatsAction.Response> finalListener = listener;
|
||||
listener = ActionListener.wrap(response -> gatherStatsForClosedJobs(mlMetadata,
|
||||
|
|
|
@ -49,7 +49,6 @@ import org.elasticsearch.tasks.TaskId;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.core.XPackField;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetaIndex;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.core.ml.action.OpenJobAction;
|
||||
|
@ -163,7 +162,7 @@ public class TransportOpenJobAction extends TransportMasterNodeAction<OpenJobAct
|
|||
continue;
|
||||
}
|
||||
|
||||
MlMetadata mlMetadata = clusterState.getMetaData().custom(MLMetadataField.TYPE);
|
||||
MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterState);
|
||||
Job job = mlMetadata.getJobs().get(jobId);
|
||||
Set<String> compatibleJobTypes = Job.getCompatibleJobTypes(node.getVersion());
|
||||
if (compatibleJobTypes.contains(job.getJobType()) == false) {
|
||||
|
@ -474,8 +473,7 @@ public class TransportOpenJobAction extends TransportMasterNodeAction<OpenJobAct
|
|||
// Step 3. Update established model memory for pre-6.1 jobs that haven't had it set
|
||||
ActionListener<Boolean> missingMappingsListener = ActionListener.wrap(
|
||||
response -> {
|
||||
MlMetadata mlMetadata = clusterService.state().getMetaData().custom(MLMetadataField.TYPE);
|
||||
Job job = mlMetadata.getJobs().get(jobParams.getJobId());
|
||||
Job job = MlMetadata.getMlMetadata(clusterService.state()).getJobs().get(jobParams.getJobId());
|
||||
if (job != null) {
|
||||
Version jobVersion = job.getJobVersion();
|
||||
Long jobEstablishedModelMemory = job.getEstablishedModelMemory();
|
||||
|
@ -650,8 +648,7 @@ public class TransportOpenJobAction extends TransportMasterNodeAction<OpenJobAct
|
|||
public void validate(OpenJobAction.JobParams params, ClusterState clusterState) {
|
||||
// If we already know that we can't find an ml node because all ml nodes are running at capacity or
|
||||
// simply because there are no ml nodes in the cluster then we fail quickly here:
|
||||
MlMetadata mlMetadata = clusterState.metaData().custom(MLMetadataField.TYPE);
|
||||
TransportOpenJobAction.validate(params.getJobId(), mlMetadata);
|
||||
TransportOpenJobAction.validate(params.getJobId(), MlMetadata.getMlMetadata(clusterState));
|
||||
PersistentTasksCustomMetaData.Assignment assignment = selectLeastLoadedMlNode(params.getJobId(), clusterState,
|
||||
maxConcurrentJobAllocations, fallbackMaxNumberOfOpenJobs, maxMachineMemoryPercent, logger);
|
||||
if (assignment.getExecutorNode() == null) {
|
||||
|
|
|
@ -17,7 +17,6 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.core.ClientHelper;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.core.ml.action.PreviewDatafeedAction;
|
||||
import org.elasticsearch.xpack.core.ml.datafeed.ChunkingConfig;
|
||||
|
@ -52,7 +51,7 @@ public class TransportPreviewDatafeedAction extends HandledTransportAction<Previ
|
|||
|
||||
@Override
|
||||
protected void doExecute(PreviewDatafeedAction.Request request, ActionListener<PreviewDatafeedAction.Response> listener) {
|
||||
MlMetadata mlMetadata = clusterService.state().getMetaData().custom(MLMetadataField.TYPE);
|
||||
MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterService.state());
|
||||
DatafeedConfig datafeed = mlMetadata.getDatafeed(request.getDatafeedId());
|
||||
if (datafeed == null) {
|
||||
throw ExceptionsHelper.missingDatafeedException(request.getDatafeedId());
|
||||
|
|
|
@ -14,9 +14,7 @@ import org.elasticsearch.action.support.ActionFilters;
|
|||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
|
@ -26,16 +24,12 @@ import org.elasticsearch.index.engine.VersionConflictEngineException;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.core.ml.action.PutCalendarAction;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetaIndex;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.core.ml.calendars.Calendar;
|
||||
import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN;
|
||||
import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin;
|
||||
|
@ -43,17 +37,15 @@ import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin;
|
|||
public class TransportPutCalendarAction extends HandledTransportAction<PutCalendarAction.Request, PutCalendarAction.Response> {
|
||||
|
||||
private final Client client;
|
||||
private final ClusterService clusterService;
|
||||
|
||||
@Inject
|
||||
public TransportPutCalendarAction(Settings settings, ThreadPool threadPool,
|
||||
TransportService transportService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
Client client, ClusterService clusterService) {
|
||||
Client client) {
|
||||
super(settings, PutCalendarAction.NAME, threadPool, transportService, actionFilters,
|
||||
indexNameExpressionResolver, PutCalendarAction.Request::new);
|
||||
this.client = client;
|
||||
this.clusterService = clusterService;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -141,7 +141,7 @@ public class TransportPutDatafeedAction extends TransportMasterNodeAction<PutDat
|
|||
}
|
||||
|
||||
private ClusterState putDatafeed(PutDatafeedAction.Request request, ClusterState clusterState) {
|
||||
MlMetadata currentMetadata = clusterState.getMetaData().custom(MLMetadataField.TYPE);
|
||||
MlMetadata currentMetadata = MlMetadata.getMlMetadata(clusterState);
|
||||
MlMetadata newMetadata = new MlMetadata.Builder(currentMetadata)
|
||||
.putDatafeed(request.getDatafeed(), threadPool.getThreadContext()).build();
|
||||
return ClusterState.builder(clusterState).metaData(
|
||||
|
|
|
@ -130,7 +130,7 @@ public class TransportStartDatafeedAction extends TransportMasterNodeAction<Star
|
|||
};
|
||||
|
||||
// Verify data extractor factory can be created, then start persistent task
|
||||
MlMetadata mlMetadata = state.metaData().custom(MLMetadataField.TYPE);
|
||||
MlMetadata mlMetadata = MlMetadata.getMlMetadata(state);
|
||||
PersistentTasksCustomMetaData tasks = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE);
|
||||
validate(params.getDatafeedId(), mlMetadata, tasks);
|
||||
DatafeedConfig datafeed = mlMetadata.getDatafeed(params.getDatafeedId());
|
||||
|
@ -221,9 +221,8 @@ public class TransportStartDatafeedAction extends TransportMasterNodeAction<Star
|
|||
|
||||
@Override
|
||||
public void validate(StartDatafeedAction.DatafeedParams params, ClusterState clusterState) {
|
||||
MlMetadata mlMetadata = clusterState.metaData().custom(MLMetadataField.TYPE);
|
||||
PersistentTasksCustomMetaData tasks = clusterState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE);
|
||||
TransportStartDatafeedAction.validate(params.getDatafeedId(), mlMetadata, tasks);
|
||||
TransportStartDatafeedAction.validate(params.getDatafeedId(), MlMetadata.getMlMetadata(clusterState), tasks);
|
||||
new DatafeedNodeSelector(clusterState, resolver, params.getDatafeedId()).checkDatafeedTaskCanBeCreated();
|
||||
}
|
||||
|
||||
|
|
|
@ -130,7 +130,7 @@ public class TransportStopDatafeedAction extends TransportTasksAction<TransportS
|
|||
new ActionListenerResponseHandler<>(listener, StopDatafeedAction.Response::new));
|
||||
}
|
||||
} else {
|
||||
MlMetadata mlMetadata = state.getMetaData().custom(MLMetadataField.TYPE);
|
||||
MlMetadata mlMetadata = MlMetadata.getMlMetadata(state);
|
||||
PersistentTasksCustomMetaData tasks = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE);
|
||||
|
||||
List<String> startedDatafeeds = new ArrayList<>();
|
||||
|
|
|
@ -16,7 +16,6 @@ import org.elasticsearch.common.inject.Inject;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.core.ml.action.PutCalendarAction;
|
||||
import org.elasticsearch.xpack.core.ml.action.UpdateCalendarJobAction;
|
||||
|
@ -29,7 +28,6 @@ import java.util.stream.Collectors;
|
|||
|
||||
public class TransportUpdateCalendarJobAction extends HandledTransportAction<UpdateCalendarJobAction.Request, PutCalendarAction.Response> {
|
||||
|
||||
private final ClusterService clusterService;
|
||||
private final JobProvider jobProvider;
|
||||
private final JobManager jobManager;
|
||||
|
||||
|
@ -37,28 +35,21 @@ public class TransportUpdateCalendarJobAction extends HandledTransportAction<Upd
|
|||
public TransportUpdateCalendarJobAction(Settings settings, ThreadPool threadPool,
|
||||
TransportService transportService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
ClusterService clusterService, JobProvider jobProvider, JobManager jobManager) {
|
||||
JobProvider jobProvider, JobManager jobManager) {
|
||||
super(settings, UpdateCalendarJobAction.NAME, threadPool, transportService, actionFilters,
|
||||
indexNameExpressionResolver, UpdateCalendarJobAction.Request::new);
|
||||
this.clusterService = clusterService;
|
||||
this.jobProvider = jobProvider;
|
||||
this.jobManager = jobManager;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(UpdateCalendarJobAction.Request request, ActionListener<PutCalendarAction.Response> listener) {
|
||||
ClusterState clusterState = clusterService.state();
|
||||
MlMetadata maybeNullMetaData = clusterState.getMetaData().custom(MLMetadataField.TYPE);
|
||||
final MlMetadata mlMetadata = maybeNullMetaData == null ? MlMetadata.EMPTY_METADATA : maybeNullMetaData;
|
||||
|
||||
Set<String> jobIdsToAdd = Strings.tokenizeByCommaToSet(request.getJobIdsToAddExpression());
|
||||
Set<String> jobIdsToRemove = Strings.tokenizeByCommaToSet(request.getJobIdsToRemoveExpression());
|
||||
|
||||
jobProvider.updateCalendar(request.getCalendarId(), jobIdsToAdd, jobIdsToRemove, mlMetadata,
|
||||
jobProvider.updateCalendar(request.getCalendarId(), jobIdsToAdd, jobIdsToRemove,
|
||||
c -> {
|
||||
List<String> existingJobsOrGroups =
|
||||
c.getJobIds().stream().filter(mlMetadata::isGroupOrJob).collect(Collectors.toList());
|
||||
jobManager.updateProcessOnCalendarChanged(existingJobsOrGroups);
|
||||
jobManager.updateProcessOnCalendarChanged(c.getJobIds());
|
||||
listener.onResponse(new PutCalendarAction.Response(c));
|
||||
}, listener::onFailure);
|
||||
}
|
||||
|
|
|
@ -63,9 +63,9 @@ public class TransportUpdateDatafeedAction extends TransportMasterNodeAction<Upd
|
|||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
DatafeedUpdate update = request.getUpdate();
|
||||
MlMetadata currentMetadata = currentState.getMetaData().custom(MLMetadataField.TYPE);
|
||||
MlMetadata currentMetadata = MlMetadata.getMlMetadata(currentState);
|
||||
PersistentTasksCustomMetaData persistentTasks =
|
||||
currentState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE);
|
||||
MlMetadata newMetadata = new MlMetadata.Builder(currentMetadata)
|
||||
|
|
|
@ -20,7 +20,6 @@ import org.elasticsearch.common.util.concurrent.FutureUtils;
|
|||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.core.ml.action.CloseJobAction;
|
||||
import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction;
|
||||
|
@ -80,10 +79,7 @@ public class DatafeedManager extends AbstractComponent {
|
|||
public void run(TransportStartDatafeedAction.DatafeedTask task, Consumer<Exception> taskHandler) {
|
||||
String datafeedId = task.getDatafeedId();
|
||||
ClusterState state = clusterService.state();
|
||||
MlMetadata mlMetadata = state.metaData().custom(MLMetadataField.TYPE);
|
||||
if (mlMetadata == null) {
|
||||
mlMetadata = MlMetadata.EMPTY_METADATA;
|
||||
}
|
||||
MlMetadata mlMetadata = MlMetadata.getMlMetadata(state);
|
||||
|
||||
DatafeedConfig datafeed = mlMetadata.getDatafeed(datafeedId);
|
||||
Job job = mlMetadata.getJobs().get(datafeed.getJobId());
|
||||
|
|
|
@ -12,7 +12,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
|||
import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig;
|
||||
import org.elasticsearch.xpack.core.ml.job.config.JobState;
|
||||
|
@ -33,7 +32,7 @@ public class DatafeedNodeSelector {
|
|||
private final IndexNameExpressionResolver resolver;
|
||||
|
||||
public DatafeedNodeSelector(ClusterState clusterState, IndexNameExpressionResolver resolver, String datafeedId) {
|
||||
MlMetadata mlMetadata = Objects.requireNonNull(clusterState.metaData().custom(MLMetadataField.TYPE));
|
||||
MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterState);
|
||||
PersistentTasksCustomMetaData tasks = clusterState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE);
|
||||
this.datafeed = mlMetadata.getDatafeed(datafeedId);
|
||||
this.jobTask = MlMetadata.getJobTask(datafeed.getJobId(), tasks);
|
||||
|
|
|
@ -67,6 +67,7 @@ import java.util.concurrent.atomic.AtomicReference;
|
|||
import java.util.function.Consumer;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* Allows interactions with jobs. The managed interactions include:
|
||||
|
@ -133,8 +134,7 @@ public class JobManager extends AbstractComponent {
|
|||
* @throws ResourceNotFoundException if no job matches {@code jobId}
|
||||
*/
|
||||
public static Job getJobOrThrowIfUnknown(String jobId, ClusterState clusterState) {
|
||||
MlMetadata mlMetadata = clusterState.getMetaData().custom(MLMetadataField.TYPE);
|
||||
Job job = (mlMetadata == null) ? null : mlMetadata.getJobs().get(jobId);
|
||||
Job job = MlMetadata.getMlMetadata(clusterState).getJobs().get(jobId);
|
||||
if (job == null) {
|
||||
throw ExceptionsHelper.missingJobException(jobId);
|
||||
}
|
||||
|
@ -142,11 +142,7 @@ public class JobManager extends AbstractComponent {
|
|||
}
|
||||
|
||||
private Set<String> expandJobIds(String expression, boolean allowNoJobs, ClusterState clusterState) {
|
||||
MlMetadata mlMetadata = clusterState.getMetaData().custom(MLMetadataField.TYPE);
|
||||
if (mlMetadata == null) {
|
||||
mlMetadata = MlMetadata.EMPTY_METADATA;
|
||||
}
|
||||
return mlMetadata.expandJobIds(expression, allowNoJobs);
|
||||
return MlMetadata.getMlMetadata(clusterState).expandJobIds(expression, allowNoJobs);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -160,7 +156,7 @@ public class JobManager extends AbstractComponent {
|
|||
*/
|
||||
public QueryPage<Job> expandJobs(String expression, boolean allowNoJobs, ClusterState clusterState) {
|
||||
Set<String> expandedJobIds = expandJobIds(expression, allowNoJobs, clusterState);
|
||||
MlMetadata mlMetadata = clusterState.getMetaData().custom(MLMetadataField.TYPE);
|
||||
MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterState);
|
||||
List<Job> jobs = new ArrayList<>();
|
||||
for (String expandedJobId : expandedJobIds) {
|
||||
jobs.add(mlMetadata.getJobs().get(expandedJobId));
|
||||
|
@ -188,8 +184,8 @@ public class JobManager extends AbstractComponent {
|
|||
DEPRECATION_LOGGER.deprecated("Creating jobs with delimited data format is deprecated. Please use xcontent instead.");
|
||||
}
|
||||
|
||||
MlMetadata currentMlMetadata = state.metaData().custom(MLMetadataField.TYPE);
|
||||
if (currentMlMetadata != null && currentMlMetadata.getJobs().containsKey(job.getId())) {
|
||||
MlMetadata currentMlMetadata = MlMetadata.getMlMetadata(state);
|
||||
if (currentMlMetadata.getJobs().containsKey(job.getId())) {
|
||||
actionListener.onFailure(ExceptionsHelper.jobAlreadyExists(job.getId()));
|
||||
return;
|
||||
}
|
||||
|
@ -425,8 +421,13 @@ public class JobManager extends AbstractComponent {
|
|||
|
||||
public void updateProcessOnCalendarChanged(List<String> calendarJobIds) {
|
||||
ClusterState clusterState = clusterService.state();
|
||||
MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterState);
|
||||
|
||||
List<String> existingJobsOrGroups =
|
||||
calendarJobIds.stream().filter(mlMetadata::isGroupOrJob).collect(Collectors.toList());
|
||||
|
||||
Set<String> expandedJobIds = new HashSet<>();
|
||||
calendarJobIds.forEach(jobId -> expandedJobIds.addAll(expandJobIds(jobId, true, clusterState)));
|
||||
existingJobsOrGroups.forEach(jobId -> expandedJobIds.addAll(expandJobIds(jobId, true, clusterState)));
|
||||
for (String jobId : expandedJobIds) {
|
||||
if (isJobOpen(clusterState, jobId)) {
|
||||
updateJobProcessNotifier.submitJobUpdate(UpdateParams.scheduledEventsUpdate(jobId), ActionListener.wrap(
|
||||
|
@ -469,8 +470,8 @@ public class JobManager extends AbstractComponent {
|
|||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
MlMetadata currentMlMetadata = currentState.metaData().custom(MLMetadataField.TYPE);
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
MlMetadata currentMlMetadata = MlMetadata.getMlMetadata(currentState);
|
||||
if (currentMlMetadata.getJobs().containsKey(jobId) == false) {
|
||||
// We wouldn't have got here if the job never existed so
|
||||
// the Job must have been deleted by another action.
|
||||
|
@ -560,8 +561,7 @@ public class JobManager extends AbstractComponent {
|
|||
}
|
||||
|
||||
private static MlMetadata.Builder createMlMetadataBuilder(ClusterState currentState) {
|
||||
MlMetadata currentMlMetadata = currentState.metaData().custom(MLMetadataField.TYPE);
|
||||
return new MlMetadata.Builder(currentMlMetadata);
|
||||
return new MlMetadata.Builder(MlMetadata.getMlMetadata(currentState));
|
||||
}
|
||||
|
||||
private static ClusterState buildNewClusterState(ClusterState currentState, MlMetadata.Builder builder) {
|
||||
|
|
|
@ -1114,7 +1114,7 @@ public class JobProvider {
|
|||
result -> handler.accept(result.result), errorHandler, () -> null);
|
||||
}
|
||||
|
||||
public void updateCalendar(String calendarId, Set<String> jobIdsToAdd, Set<String> jobIdsToRemove, MlMetadata mlMetadata,
|
||||
public void updateCalendar(String calendarId, Set<String> jobIdsToAdd, Set<String> jobIdsToRemove,
|
||||
Consumer<Calendar> handler, Consumer<Exception> errorHandler) {
|
||||
|
||||
ActionListener<Calendar> getCalendarListener = ActionListener.wrap(
|
||||
|
|
|
@ -11,7 +11,6 @@ import org.elasticsearch.cluster.service.ClusterService;
|
|||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.query.BoolQueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.core.ml.job.config.Job;
|
||||
import org.elasticsearch.xpack.core.ml.job.results.Result;
|
||||
|
@ -61,12 +60,8 @@ abstract class AbstractExpiredJobDataRemover implements MlDataRemover {
|
|||
}
|
||||
|
||||
private Iterator<Job> newJobIterator() {
|
||||
List<Job> jobs = new ArrayList<>();
|
||||
ClusterState clusterState = clusterService.state();
|
||||
MlMetadata mlMetadata = clusterState.getMetaData().custom(MLMetadataField.TYPE);
|
||||
if (mlMetadata != null) {
|
||||
jobs.addAll(mlMetadata.getJobs().values());
|
||||
}
|
||||
List<Job> jobs = new ArrayList<>(MlMetadata.getMlMetadata(clusterState).getJobs().values());
|
||||
return createVolatileCursorIterator(jobs);
|
||||
}
|
||||
|
||||
|
|
|
@ -11,10 +11,8 @@ import org.elasticsearch.action.bulk.BulkRequestBuilder;
|
|||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex;
|
||||
import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings;
|
||||
|
@ -24,7 +22,6 @@ import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.Quantiles;
|
|||
import org.elasticsearch.xpack.ml.job.persistence.BatchedStateDocIdsIterator;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Deque;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
@ -84,12 +81,7 @@ public class UnusedStateRemover implements MlDataRemover {
|
|||
}
|
||||
|
||||
private Set<String> getJobIds() {
|
||||
ClusterState clusterState = clusterService.state();
|
||||
MlMetadata mlMetadata = clusterState.getMetaData().custom(MLMetadataField.TYPE);
|
||||
if (mlMetadata != null) {
|
||||
return mlMetadata.getJobs().keySet();
|
||||
}
|
||||
return Collections.emptySet();
|
||||
return MlMetadata.getMlMetadata(clusterService.state()).getJobs().keySet();
|
||||
}
|
||||
|
||||
private void executeDeleteUnusedStateDocs(BulkRequestBuilder deleteUnusedStateRequestBuilder, ActionListener<Boolean> listener) {
|
||||
|
|
|
@ -65,7 +65,7 @@ public class MachineLearningFeatureSetTests extends ESTestCase {
|
|||
private XPackLicenseState licenseState;
|
||||
|
||||
@Before
|
||||
public void init() throws Exception {
|
||||
public void init() {
|
||||
commonSettings = Settings.builder()
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath())
|
||||
.put(MachineLearningField.AUTODETECT_PROCESS.getKey(), false)
|
||||
|
@ -232,9 +232,28 @@ public class MachineLearningFeatureSetTests extends ESTestCase {
|
|||
try (XContentBuilder builder = XContentFactory.jsonBuilder()) {
|
||||
usage.toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
source = new XContentSource(builder);
|
||||
assertThat(source.getValue("jobs"), equalTo(Collections.emptyMap()));
|
||||
assertThat(source.getValue("datafeeds"), equalTo(Collections.emptyMap()));
|
||||
}
|
||||
|
||||
assertThat(source.getValue("jobs._all.count"), equalTo(0));
|
||||
assertThat(source.getValue("jobs._all.detectors.min"), equalTo(0.0));
|
||||
assertThat(source.getValue("jobs._all.detectors.max"), equalTo(0.0));
|
||||
assertThat(source.getValue("jobs._all.detectors.total"), equalTo(0.0));
|
||||
assertThat(source.getValue("jobs._all.detectors.avg"), equalTo(0.0));
|
||||
assertThat(source.getValue("jobs._all.model_size.min"), equalTo(0.0));
|
||||
assertThat(source.getValue("jobs._all.model_size.max"), equalTo(0.0));
|
||||
assertThat(source.getValue("jobs._all.model_size.total"), equalTo(0.0));
|
||||
assertThat(source.getValue("jobs._all.model_size.avg"), equalTo(0.0));
|
||||
|
||||
assertThat(source.getValue("jobs.opening"), is(nullValue()));
|
||||
assertThat(source.getValue("jobs.opened"), is(nullValue()));
|
||||
assertThat(source.getValue("jobs.closing"), is(nullValue()));
|
||||
assertThat(source.getValue("jobs.closed"), is(nullValue()));
|
||||
assertThat(source.getValue("jobs.failed"), is(nullValue()));
|
||||
|
||||
assertThat(source.getValue("datafeeds._all.count"), equalTo(0));
|
||||
|
||||
assertThat(source.getValue("datafeeds.started"), is(nullValue()));
|
||||
assertThat(source.getValue("datafeeds.stopped"), is(nullValue()));
|
||||
}
|
||||
|
||||
private void givenJobs(List<Job> jobs, List<GetJobsStatsAction.Response.JobStats> jobsStats) {
|
||||
|
|
|
@ -10,7 +10,6 @@ import org.elasticsearch.client.Client;
|
|||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
|
@ -22,20 +21,15 @@ import org.elasticsearch.threadpool.ThreadPool;
|
|||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.junit.Before;
|
||||
import org.mockito.Mockito;
|
||||
|
||||
import java.net.InetAddress;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.elasticsearch.mock.orig.Mockito.doAnswer;
|
||||
import static org.elasticsearch.mock.orig.Mockito.times;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Matchers.eq;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
@ -68,7 +62,7 @@ public class MlInitializationServiceTests extends ESTestCase {
|
|||
when(clusterService.getClusterName()).thenReturn(CLUSTER_NAME);
|
||||
}
|
||||
|
||||
public void testInitialize() throws Exception {
|
||||
public void testInitialize() {
|
||||
MlInitializationService initializationService = new MlInitializationService(Settings.EMPTY, threadPool, clusterService, client);
|
||||
|
||||
ClusterState cs = ClusterState.builder(new ClusterName("_name"))
|
||||
|
@ -80,11 +74,10 @@ public class MlInitializationServiceTests extends ESTestCase {
|
|||
.build();
|
||||
initializationService.clusterChanged(new ClusterChangedEvent("_source", cs, cs));
|
||||
|
||||
verify(clusterService, times(1)).submitStateUpdateTask(eq("install-ml-metadata"), any());
|
||||
assertThat(initializationService.getDailyMaintenanceService().isStarted(), is(true));
|
||||
}
|
||||
|
||||
public void testInitialize_noMasterNode() throws Exception {
|
||||
public void testInitialize_noMasterNode() {
|
||||
MlInitializationService initializationService = new MlInitializationService(Settings.EMPTY, threadPool, clusterService, client);
|
||||
|
||||
ClusterState cs = ClusterState.builder(new ClusterName("_name"))
|
||||
|
@ -94,11 +87,10 @@ public class MlInitializationServiceTests extends ESTestCase {
|
|||
.build();
|
||||
initializationService.clusterChanged(new ClusterChangedEvent("_source", cs, cs));
|
||||
|
||||
verify(clusterService, times(0)).submitStateUpdateTask(eq("install-ml-metadata"), any());
|
||||
assertThat(initializationService.getDailyMaintenanceService(), is(nullValue()));
|
||||
}
|
||||
|
||||
public void testInitialize_alreadyInitialized() throws Exception {
|
||||
public void testInitialize_alreadyInitialized() {
|
||||
MlInitializationService initializationService = new MlInitializationService(Settings.EMPTY, threadPool, clusterService, client);
|
||||
|
||||
ClusterState cs = ClusterState.builder(new ClusterName("_name"))
|
||||
|
@ -113,67 +105,10 @@ public class MlInitializationServiceTests extends ESTestCase {
|
|||
initializationService.setDailyMaintenanceService(initialDailyMaintenanceService);
|
||||
initializationService.clusterChanged(new ClusterChangedEvent("_source", cs, cs));
|
||||
|
||||
verify(clusterService, times(0)).submitStateUpdateTask(eq("install-ml-metadata"), any());
|
||||
assertSame(initialDailyMaintenanceService, initializationService.getDailyMaintenanceService());
|
||||
}
|
||||
|
||||
public void testInitialize_onlyOnce() throws Exception {
|
||||
MlInitializationService initializationService = new MlInitializationService(Settings.EMPTY, threadPool, clusterService, client);
|
||||
|
||||
ClusterState cs = ClusterState.builder(new ClusterName("_name"))
|
||||
.nodes(DiscoveryNodes.builder()
|
||||
.add(new DiscoveryNode("_node_id", new TransportAddress(InetAddress.getLoopbackAddress(), 9200), Version.CURRENT))
|
||||
.localNodeId("_node_id")
|
||||
.masterNodeId("_node_id"))
|
||||
.metaData(MetaData.builder())
|
||||
.build();
|
||||
initializationService.clusterChanged(new ClusterChangedEvent("_source", cs, cs));
|
||||
initializationService.clusterChanged(new ClusterChangedEvent("_source", cs, cs));
|
||||
|
||||
verify(clusterService, times(1)).submitStateUpdateTask(eq("install-ml-metadata"), any());
|
||||
}
|
||||
|
||||
public void testInitialize_reintialiseAfterFailure() throws Exception {
|
||||
MlInitializationService initializationService = new MlInitializationService(Settings.EMPTY, threadPool, clusterService, client);
|
||||
|
||||
// Fail the first cluster state update
|
||||
AtomicBoolean onFailureCalled = new AtomicBoolean(false);
|
||||
Mockito.doAnswer(invocation -> {
|
||||
ClusterStateUpdateTask task = (ClusterStateUpdateTask) invocation.getArguments()[1];
|
||||
task.onFailure("mock a failure", new IllegalStateException());
|
||||
onFailureCalled.set(true);
|
||||
return null;
|
||||
}).when(clusterService).submitStateUpdateTask(eq("install-ml-metadata"), any(ClusterStateUpdateTask.class));
|
||||
|
||||
ClusterState cs = ClusterState.builder(new ClusterName("_name"))
|
||||
.nodes(DiscoveryNodes.builder()
|
||||
.add(new DiscoveryNode("_node_id", new TransportAddress(InetAddress.getLoopbackAddress(), 9200), Version.CURRENT))
|
||||
.localNodeId("_node_id")
|
||||
.masterNodeId("_node_id"))
|
||||
.metaData(MetaData.builder())
|
||||
.build();
|
||||
initializationService.clusterChanged(new ClusterChangedEvent("_source", cs, cs));
|
||||
assertTrue("Something went wrong mocking the cluster update task", onFailureCalled.get());
|
||||
verify(clusterService, times(1)).submitStateUpdateTask(eq("install-ml-metadata"), any(ClusterStateUpdateTask.class));
|
||||
|
||||
// 2nd update succeeds
|
||||
AtomicReference<ClusterState> clusterStateHolder = new AtomicReference<>();
|
||||
Mockito.doAnswer(invocation -> {
|
||||
ClusterStateUpdateTask task = (ClusterStateUpdateTask) invocation.getArguments()[1];
|
||||
clusterStateHolder.set(task.execute(cs));
|
||||
return null;
|
||||
}).when(clusterService).submitStateUpdateTask(eq("install-ml-metadata"), any(ClusterStateUpdateTask.class));
|
||||
|
||||
initializationService.clusterChanged(new ClusterChangedEvent("_source", cs, cs));
|
||||
assertTrue("Something went wrong mocking the sucessful cluster update task", clusterStateHolder.get() != null);
|
||||
verify(clusterService, times(2)).submitStateUpdateTask(eq("install-ml-metadata"), any(ClusterStateUpdateTask.class));
|
||||
|
||||
// 3rd update won't be called as ML Metadata has been installed
|
||||
initializationService.clusterChanged(new ClusterChangedEvent("_source", clusterStateHolder.get(), clusterStateHolder.get()));
|
||||
verify(clusterService, times(2)).submitStateUpdateTask(eq("install-ml-metadata"), any(ClusterStateUpdateTask.class));
|
||||
}
|
||||
|
||||
public void testNodeGoesFromMasterToNonMasterAndBack() throws Exception {
|
||||
public void testNodeGoesFromMasterToNonMasterAndBack() {
|
||||
MlInitializationService initializationService = new MlInitializationService(Settings.EMPTY, threadPool, clusterService, client);
|
||||
MlDailyMaintenanceService initialDailyMaintenanceService = mock(MlDailyMaintenanceService.class);
|
||||
initializationService.setDailyMaintenanceService(initialDailyMaintenanceService);
|
||||
|
|
|
@ -251,11 +251,11 @@ public class DatafeedManagerTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testDatafeedTaskWaitsUntilJobIsOpened() {
|
||||
PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder();
|
||||
PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder();
|
||||
addJobTask("job_id", "node_id", JobState.OPENING, tasksBuilder);
|
||||
ClusterState.Builder cs = ClusterState.builder(clusterService.state())
|
||||
.metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, clusterService.state().getMetaData()
|
||||
.custom(MLMetadataField.TYPE)).putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()));
|
||||
.metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, MlMetadata.getMlMetadata(clusterService.state()))
|
||||
.putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()));
|
||||
when(clusterService.state()).thenReturn(cs.build());
|
||||
|
||||
Consumer<Exception> handler = mockConsumer();
|
||||
|
@ -269,8 +269,8 @@ public class DatafeedManagerTests extends ESTestCase {
|
|||
addJobTask("job_id", "node_id", JobState.OPENING, tasksBuilder);
|
||||
addJobTask("another_job", "node_id", JobState.OPENED, tasksBuilder);
|
||||
ClusterState.Builder anotherJobCs = ClusterState.builder(clusterService.state())
|
||||
.metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, clusterService.state().getMetaData()
|
||||
.custom(MLMetadataField.TYPE)).putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()));
|
||||
.metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, MlMetadata.getMlMetadata(clusterService.state()))
|
||||
.putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()));
|
||||
|
||||
capturedClusterStateListener.getValue().clusterChanged(new ClusterChangedEvent("_source", anotherJobCs.build(), cs.build()));
|
||||
|
||||
|
@ -280,8 +280,8 @@ public class DatafeedManagerTests extends ESTestCase {
|
|||
tasksBuilder = PersistentTasksCustomMetaData.builder();
|
||||
addJobTask("job_id", "node_id", JobState.OPENED, tasksBuilder);
|
||||
ClusterState.Builder jobOpenedCs = ClusterState.builder(clusterService.state())
|
||||
.metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, clusterService.state().getMetaData()
|
||||
.custom(MLMetadataField.TYPE)).putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()));
|
||||
.metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, MlMetadata.getMlMetadata(clusterService.state()))
|
||||
.putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()));
|
||||
|
||||
capturedClusterStateListener.getValue().clusterChanged(
|
||||
new ClusterChangedEvent("_source", jobOpenedCs.build(), anotherJobCs.build()));
|
||||
|
@ -294,8 +294,8 @@ public class DatafeedManagerTests extends ESTestCase {
|
|||
PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder();
|
||||
addJobTask("job_id", "node_id", JobState.OPENING, tasksBuilder);
|
||||
ClusterState.Builder cs = ClusterState.builder(clusterService.state())
|
||||
.metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, clusterService.state().getMetaData()
|
||||
.custom(MLMetadataField.TYPE)).putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()));
|
||||
.metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, MlMetadata.getMlMetadata(clusterService.state()))
|
||||
.putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()));
|
||||
when(clusterService.state()).thenReturn(cs.build());
|
||||
|
||||
Consumer<Exception> handler = mockConsumer();
|
||||
|
@ -308,8 +308,8 @@ public class DatafeedManagerTests extends ESTestCase {
|
|||
tasksBuilder = PersistentTasksCustomMetaData.builder();
|
||||
addJobTask("job_id", "node_id", JobState.FAILED, tasksBuilder);
|
||||
ClusterState.Builder updatedCs = ClusterState.builder(clusterService.state())
|
||||
.metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, clusterService.state().getMetaData()
|
||||
.custom(MLMetadataField.TYPE)).putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()));
|
||||
.metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, MlMetadata.getMlMetadata(clusterService.state()))
|
||||
.putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()));
|
||||
|
||||
capturedClusterStateListener.getValue().clusterChanged(new ClusterChangedEvent("_source", updatedCs.build(), cs.build()));
|
||||
|
||||
|
@ -322,8 +322,8 @@ public class DatafeedManagerTests extends ESTestCase {
|
|||
PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder();
|
||||
addJobTask("job_id", "node_id", JobState.OPENING, tasksBuilder);
|
||||
ClusterState.Builder cs = ClusterState.builder(clusterService.state())
|
||||
.metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, clusterService.state().getMetaData()
|
||||
.custom(MLMetadataField.TYPE)).putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()));
|
||||
.metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, MlMetadata.getMlMetadata(clusterService.state()))
|
||||
.putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()));
|
||||
when(clusterService.state()).thenReturn(cs.build());
|
||||
|
||||
Consumer<Exception> handler = mockConsumer();
|
||||
|
@ -340,8 +340,8 @@ public class DatafeedManagerTests extends ESTestCase {
|
|||
tasksBuilder = PersistentTasksCustomMetaData.builder();
|
||||
addJobTask("job_id", "node_id", JobState.OPENED, tasksBuilder);
|
||||
ClusterState.Builder updatedCs = ClusterState.builder(clusterService.state())
|
||||
.metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, clusterService.state().getMetaData()
|
||||
.custom(MLMetadataField.TYPE)).putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()));
|
||||
.metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, MlMetadata.getMlMetadata(clusterService.state()))
|
||||
.putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()));
|
||||
|
||||
capturedClusterStateListener.getValue().clusterChanged(new ClusterChangedEvent("_source", cs.build(), updatedCs.build()));
|
||||
|
||||
|
|
|
@ -103,7 +103,7 @@ public class DeleteJobIT extends BaseMlIntegTestCase {
|
|||
}
|
||||
|
||||
private ClusterState markJobAsDeleted(String jobId, ClusterState currentState) {
|
||||
MlMetadata mlMetadata = currentState.metaData().custom(MLMetadataField.TYPE);
|
||||
MlMetadata mlMetadata = MlMetadata.getMlMetadata(currentState);
|
||||
assertNotNull(mlMetadata);
|
||||
|
||||
MlMetadata.Builder builder = new MlMetadata.Builder(mlMetadata);
|
||||
|
@ -116,7 +116,7 @@ public class DeleteJobIT extends BaseMlIntegTestCase {
|
|||
}
|
||||
|
||||
private ClusterState removeJobFromClusterState(String jobId, ClusterState currentState) {
|
||||
MlMetadata.Builder builder = new MlMetadata.Builder(currentState.metaData().custom(MLMetadataField.TYPE));
|
||||
MlMetadata.Builder builder = new MlMetadata.Builder(MlMetadata.getMlMetadata(currentState));
|
||||
builder.deleteJob(jobId, currentState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE));
|
||||
|
||||
ClusterState.Builder newState = ClusterState.builder(currentState);
|
||||
|
|
|
@ -244,7 +244,7 @@ public class JobProviderIT extends MlSingleNodeTestCase {
|
|||
throws Exception {
|
||||
CountDownLatch latch = new CountDownLatch(1);
|
||||
AtomicReference<Exception> exceptionHolder = new AtomicReference<>();
|
||||
jobProvider.updateCalendar(calendarId, idsToAdd, idsToRemove, mlMetadata,
|
||||
jobProvider.updateCalendar(calendarId, idsToAdd, idsToRemove,
|
||||
r -> latch.countDown(),
|
||||
e -> {
|
||||
exceptionHolder.set(e);
|
||||
|
|
|
@ -326,7 +326,7 @@ public class JobManagerTests extends ESTestCase {
|
|||
|
||||
private ClusterState createClusterState() {
|
||||
ClusterState.Builder builder = ClusterState.builder(new ClusterName("_name"));
|
||||
builder.metaData(MetaData.builder().putCustom(MLMetadataField.TYPE, MlMetadata.EMPTY_METADATA));
|
||||
builder.metaData(MetaData.builder());
|
||||
return builder.build();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,8 +39,6 @@ import org.elasticsearch.search.SearchHit;
|
|||
import org.elasticsearch.search.SearchHits;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.core.ml.action.util.QueryPage;
|
||||
import org.elasticsearch.xpack.core.ml.job.config.Job;
|
||||
import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex;
|
||||
|
@ -93,7 +91,7 @@ public class JobProviderTests extends ESTestCase {
|
|||
AtomicReference<Boolean> resultHolder = new AtomicReference<>();
|
||||
|
||||
ClusterState cs = ClusterState.builder(new ClusterName("_name"))
|
||||
.metaData(MetaData.builder().putCustom(MLMetadataField.TYPE, MlMetadata.EMPTY_METADATA).indices(ImmutableOpenMap.of()))
|
||||
.metaData(MetaData.builder().indices(ImmutableOpenMap.of()))
|
||||
.build();
|
||||
|
||||
ClusterService clusterService = mock(ClusterService.class);
|
||||
|
@ -157,7 +155,7 @@ public class JobProviderTests extends ESTestCase {
|
|||
.fPut(AnomalyDetectorsIndex.jobResultsAliasedName("foo"), indexMetaData).build();
|
||||
|
||||
ClusterState cs2 = ClusterState.builder(new ClusterName("_name"))
|
||||
.metaData(MetaData.builder().putCustom(MLMetadataField.TYPE, MlMetadata.EMPTY_METADATA).indices(indexMap)).build();
|
||||
.metaData(MetaData.builder().indices(indexMap)).build();
|
||||
|
||||
ClusterService clusterService = mock(ClusterService.class);
|
||||
|
||||
|
@ -209,7 +207,7 @@ public class JobProviderTests extends ESTestCase {
|
|||
ImmutableOpenMap<String, IndexMetaData> indexMap = ImmutableOpenMap.<String, IndexMetaData>builder().build();
|
||||
|
||||
ClusterState cs = ClusterState.builder(new ClusterName("_name"))
|
||||
.metaData(MetaData.builder().putCustom(MLMetadataField.TYPE, MlMetadata.EMPTY_METADATA).indices(indexMap)).build();
|
||||
.metaData(MetaData.builder().indices(indexMap)).build();
|
||||
|
||||
ClusterService clusterService = mock(ClusterService.class);
|
||||
|
||||
|
|
|
@ -28,7 +28,6 @@ import org.elasticsearch.test.MockHttpTransport;
|
|||
import org.elasticsearch.test.discovery.TestZenDiscovery;
|
||||
import org.elasticsearch.xpack.core.XPackSettings;
|
||||
import org.elasticsearch.xpack.ml.LocalStateMachineLearning;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.ml.MachineLearning;
|
||||
import org.elasticsearch.xpack.core.ml.MachineLearningField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
|
@ -272,8 +271,7 @@ public abstract class BaseMlIntegTestCase extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
public static void deleteAllDatafeeds(Logger logger, Client client) throws Exception {
|
||||
MetaData metaData = client.admin().cluster().prepareState().get().getState().getMetaData();
|
||||
MlMetadata mlMetadata = metaData.custom(MLMetadataField.TYPE);
|
||||
MlMetadata mlMetadata = MlMetadata.getMlMetadata(client.admin().cluster().prepareState().get().getState());
|
||||
try {
|
||||
logger.info("Closing all datafeeds (using _all)");
|
||||
StopDatafeedAction.Response stopResponse = client
|
||||
|
@ -312,8 +310,7 @@ public abstract class BaseMlIntegTestCase extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
public static void deleteAllJobs(Logger logger, Client client) throws Exception {
|
||||
MetaData metaData = client.admin().cluster().prepareState().get().getState().getMetaData();
|
||||
MlMetadata mlMetadata = metaData.custom(MLMetadataField.TYPE);
|
||||
MlMetadata mlMetadata = MlMetadata.getMlMetadata(client.admin().cluster().prepareState().get().getState());
|
||||
|
||||
try {
|
||||
CloseJobAction.Request closeRequest = new CloseJobAction.Request(MetaData.ALL);
|
||||
|
|
|
@ -4,17 +4,7 @@
|
|||
# or more contributor license agreements. Licensed under the Elastic License;
|
||||
# you may not use this file except in compliance with the Elastic License.
|
||||
|
||||
source "`dirname "$0"`"/elasticsearch-env
|
||||
|
||||
source "`dirname "$0"`"/x-pack-security-env
|
||||
|
||||
exec \
|
||||
"$JAVA" \
|
||||
$ES_JAVA_OPTS \
|
||||
-Des.path.home="$ES_HOME" \
|
||||
-Des.path.conf="$ES_PATH_CONF" \
|
||||
-Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \
|
||||
-Des.distribution.type="$ES_DISTRIBUTION_TYPE" \
|
||||
-cp "$ES_CLASSPATH" \
|
||||
ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \
|
||||
"`dirname "$0"`"/elasticsearch-cli \
|
||||
org.elasticsearch.xpack.core.ssl.CertificateGenerateTool \
|
||||
"$@"
|
||||
|
|
|
@ -4,17 +4,7 @@
|
|||
# or more contributor license agreements. Licensed under the Elastic License;
|
||||
# you may not use this file except in compliance with the Elastic License.
|
||||
|
||||
source "`dirname "$0"`"/elasticsearch-env
|
||||
|
||||
source "`dirname "$0"`"/x-pack-security-env
|
||||
|
||||
exec \
|
||||
"$JAVA" \
|
||||
$ES_JAVA_OPTS \
|
||||
-Des.path.home="$ES_HOME" \
|
||||
-Des.path.conf="$ES_PATH_CONF" \
|
||||
-Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \
|
||||
-Des.distribution.type="$ES_DISTRIBUTION_TYPE" \
|
||||
-cp "$ES_CLASSPATH" \
|
||||
ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \
|
||||
"`dirname "$0"`"/elasticsearch-cli \
|
||||
org.elasticsearch.xpack.core.ssl.CertificateTool \
|
||||
"$@"
|
||||
|
|
|
@ -4,17 +4,7 @@
|
|||
# or more contributor license agreements. Licensed under the Elastic License;
|
||||
# you may not use this file except in compliance with the Elastic License.
|
||||
|
||||
source "`dirname "$0"`"/elasticsearch-env
|
||||
|
||||
source "`dirname "$0"`"/x-pack-security-env
|
||||
|
||||
exec \
|
||||
"$JAVA" \
|
||||
$ES_JAVA_OPTS \
|
||||
-Des.path.home="$ES_HOME" \
|
||||
-Des.path.conf="$ES_PATH_CONF" \
|
||||
-Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \
|
||||
-Des.distribution.type="$ES_DISTRIBUTION_TYPE" \
|
||||
-cp "$ES_CLASSPATH" \
|
||||
ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \
|
||||
"`dirname "$0"`"/elasticsearch-cli \
|
||||
org.elasticsearch.xpack.security.authc.esnative.ESNativeRealmMigrateTool \
|
||||
"$@"
|
||||
"$@"
|
|
@ -4,17 +4,7 @@
|
|||
# or more contributor license agreements. Licensed under the Elastic License;
|
||||
# you may not use this file except in compliance with the Elastic License.
|
||||
|
||||
source "`dirname "$0"`"/elasticsearch-env
|
||||
|
||||
source "`dirname "$0"`"/x-pack-security-env
|
||||
|
||||
exec \
|
||||
"$JAVA" \
|
||||
$ES_JAVA_OPTS \
|
||||
-Des.path.home="$ES_HOME" \
|
||||
-Des.path.conf="$ES_PATH_CONF" \
|
||||
-Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \
|
||||
-Des.distribution.type="$ES_DISTRIBUTION_TYPE" \
|
||||
-cp "$ES_CLASSPATH" \
|
||||
ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \
|
||||
"`dirname "$0"`"/elasticsearch-cli \
|
||||
org.elasticsearch.xpack.security.authc.saml.SamlMetadataCommand \
|
||||
"$@"
|
||||
|
|
|
@ -4,17 +4,7 @@
|
|||
# or more contributor license agreements. Licensed under the Elastic License;
|
||||
# you may not use this file except in compliance with the Elastic License.
|
||||
|
||||
source "`dirname "$0"`"/elasticsearch-env
|
||||
|
||||
source "`dirname "$0"`"/x-pack-security-env
|
||||
|
||||
exec \
|
||||
"$JAVA" \
|
||||
$ES_JAVA_OPTS \
|
||||
-Des.path.home="$ES_HOME" \
|
||||
-Des.path.conf="$ES_PATH_CONF" \
|
||||
-Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \
|
||||
-Des.distribution.type="$ES_DISTRIBUTION_TYPE" \
|
||||
-cp "$ES_CLASSPATH" \
|
||||
ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \
|
||||
"`dirname "$0"`"/elasticsearch-cli \
|
||||
org.elasticsearch.xpack.security.authc.esnative.tool.SetupPasswordTool \
|
||||
"$@"
|
||||
|
|
|
@ -4,17 +4,7 @@
|
|||
# or more contributor license agreements. Licensed under the Elastic License;
|
||||
# you may not use this file except in compliance with the Elastic License.
|
||||
|
||||
source "`dirname "$0"`"/elasticsearch-env
|
||||
|
||||
source "`dirname "$0"`"/x-pack-security-env
|
||||
|
||||
exec \
|
||||
"$JAVA" \
|
||||
$ES_JAVA_OPTS \
|
||||
-Des.path.home="$ES_HOME" \
|
||||
-Des.path.conf="$ES_PATH_CONF" \
|
||||
-Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \
|
||||
-Des.distribution.type="$ES_DISTRIBUTION_TYPE" \
|
||||
-cp "$ES_CLASSPATH" \
|
||||
ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \
|
||||
"`dirname "$0"`"/elasticsearch-cli \
|
||||
org.elasticsearch.xpack.security.crypto.tool.SystemKeyTool \
|
||||
"$@"
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue