Merge branch 'master' into index-lifecycle

This commit is contained in:
Tal Levy 2018-07-19 12:33:10 -07:00
commit 1acdd37968
117 changed files with 2626 additions and 1269 deletions

View File

@ -222,7 +222,7 @@ subprojects {
"org.elasticsearch.gradle:build-tools:${version}": ':build-tools',
"org.elasticsearch:rest-api-spec:${version}": ':rest-api-spec',
"org.elasticsearch:elasticsearch:${version}": ':server',
"org.elasticsearch:elasticsearch-cli:${version}": ':libs:cli',
"org.elasticsearch:elasticsearch-cli:${version}": ':libs:elasticsearch-cli',
"org.elasticsearch:elasticsearch-core:${version}": ':libs:core',
"org.elasticsearch:elasticsearch-nio:${version}": ':libs:nio',
"org.elasticsearch:elasticsearch-x-content:${version}": ':libs:x-content',
@ -622,6 +622,21 @@ gradle.projectsEvaluated {
}
}
}
// Having the same group and name for distinct projects causes Gradle to consider them equal when resolving
// dependencies leading to hard to debug failures. Run a check across all project to prevent this from happening.
// see: https://github.com/gradle/gradle/issues/847
Map coordsToProject = [:]
project.allprojects.forEach { p ->
String coords = "${p.group}:${p.name}"
if (false == coordsToProject.putIfAbsent(coords, p)) {
throw new GradleException(
"Detected that two projects: ${p.path} and ${coordsToProject[coords].path} " +
"have the same name and group: ${coords}. " +
"This doesn't currently work correctly in Gradle, see: " +
"https://github.com/gradle/gradle/issues/847"
)
}
}
}
if (System.properties.get("build.compare") != null) {

View File

@ -75,10 +75,10 @@ public class PluginBuildPlugin extends BuildPlugin {
// and generate a different pom for the zip
addClientJarPomGeneration(project)
addClientJarTask(project)
} else {
// no client plugin, so use the pom file from nebula, without jar, for the zip
project.ext.set("nebulaPublish.maven.jar", false)
}
// while the jar isn't normally published, we still at least build a pom of deps
// in case it is published, for instance when other plugins extend this plugin
configureJarPom(project)
project.integTestCluster.dependsOn(project.bundlePlugin)
project.tasks.run.dependsOn(project.bundlePlugin)
@ -94,7 +94,6 @@ public class PluginBuildPlugin extends BuildPlugin {
}
if (isModule == false || isXPackModule) {
addZipPomGeneration(project)
addNoticeGeneration(project)
}
@ -239,36 +238,15 @@ public class PluginBuildPlugin extends BuildPlugin {
}
}
/** Adds a task to generate a pom file for the zip distribution. */
public static void addZipPomGeneration(Project project) {
/** Configure the pom for the main jar of this plugin */
protected static void configureJarPom(Project project) {
project.plugins.apply(ScmInfoPlugin.class)
project.plugins.apply(MavenPublishPlugin.class)
project.publishing {
publications {
zip(MavenPublication) {
artifact project.bundlePlugin
}
/* HUGE HACK: the underlying maven publication library refuses to deploy any attached artifacts
* when the packaging type is set to 'pom'. But Sonatype's OSS repositories require source files
* for artifacts that are of type 'zip'. We already publish the source and javadoc for Elasticsearch
* under the various other subprojects. So here we create another publication using the same
* name that has the "real" pom, and rely on the fact that gradle will execute the publish tasks
* in alphabetical order. This lets us publish the zip file and even though the pom says the
* type is 'pom' instead of 'zip'. We cannot setup a dependency between the tasks because the
* publishing tasks are created *extremely* late in the configuration phase, so that we cannot get
* ahold of the actual task. Furthermore, this entire hack only exists so we can make publishing to
* maven local work, since we publish to maven central externally. */
zipReal(MavenPublication) {
artifactId = project.pluginProperties.extension.name
pom.withXml { XmlProvider xml ->
Node root = xml.asNode()
root.appendNode('name', project.pluginProperties.extension.name)
root.appendNode('description', project.pluginProperties.extension.description)
root.appendNode('url', urlFromOrigin(project.scminfo.origin))
Node scmNode = root.appendNode('scm')
scmNode.appendNode('url', project.scminfo.origin)
}
nebula(MavenPublication) {
artifactId project.pluginProperties.extension.name
}
}
}

View File

@ -142,6 +142,8 @@ class ClusterConfiguration {
// there are cases when value depends on task that is not executed yet on configuration stage
Map<String, Object> systemProperties = new HashMap<>()
Map<String, Object> environmentVariables = new HashMap<>()
Map<String, Object> settings = new HashMap<>()
Map<String, String> keystoreSettings = new HashMap<>()
@ -164,6 +166,11 @@ class ClusterConfiguration {
systemProperties.put(property, value)
}
@Input
void environment(String variable, Object value) {
environmentVariables.put(variable, value)
}
@Input
void setting(String name, Object value) {
settings.put(name, value)

View File

@ -181,6 +181,7 @@ class NodeInfo {
args.addAll("-E", "node.portsfile=true")
env = [:]
env.putAll(config.environmentVariables)
for (Map.Entry<String, String> property : System.properties.entrySet()) {
if (property.key.startsWith('tests.es.')) {
args.add("-E")

View File

@ -70,3 +70,7 @@ forbiddenApisMain {
signaturesURLs += [PrecommitTasks.getResource('/forbidden/http-signatures.txt')]
signaturesURLs += [file('src/main/resources/forbidden/rest-high-level-signatures.txt').toURI().toURL()]
}
integTestCluster {
setting 'xpack.license.self_generated.type', 'trial'
}

View File

@ -106,6 +106,7 @@ import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.rankeval.RankEvalRequest;
import org.elasticsearch.protocol.xpack.XPackInfoRequest;
import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest;
import org.elasticsearch.protocol.xpack.XPackUsageRequest;
import org.elasticsearch.rest.action.search.RestSearchAction;
import org.elasticsearch.script.mustache.MultiSearchTemplateRequest;
@ -1097,6 +1098,25 @@ final class RequestConverters {
return request;
}
static Request xPackWatcherPutWatch(PutWatchRequest putWatchRequest) {
String endpoint = new EndpointBuilder()
.addPathPartAsIs("_xpack")
.addPathPartAsIs("watcher")
.addPathPartAsIs("watch")
.addPathPart(putWatchRequest.getId())
.build();
Request request = new Request(HttpPut.METHOD_NAME, endpoint);
Params params = new Params(request).withVersion(putWatchRequest.getVersion());
if (putWatchRequest.isActive() == false) {
params.putParam("active", "false");
}
ContentType contentType = createContentType(putWatchRequest.xContentType());
BytesReference source = putWatchRequest.getSource();
request.setEntity(new ByteArrayEntity(source.toBytesRef().bytes, 0, source.length(), contentType));
return request;
}
static Request xpackUsage(XPackUsageRequest usageRequest) {
Request request = new Request(HttpGet.METHOD_NAME, "/_xpack/usage");
Params parameters = new Params(request);

View File

@ -0,0 +1,64 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest;
import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse;
import java.io.IOException;
import static java.util.Collections.emptySet;
public final class WatcherClient {
private final RestHighLevelClient restHighLevelClient;
WatcherClient(RestHighLevelClient restHighLevelClient) {
this.restHighLevelClient = restHighLevelClient;
}
/**
* Put a watch into the cluster
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-put-watch.html">
* the docs</a> for more.
* @param request the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/
public PutWatchResponse putWatch(PutWatchRequest request, RequestOptions options) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(request, RequestConverters::xPackWatcherPutWatch, options,
PutWatchResponse::fromXContent, emptySet());
}
/**
* Asynchronously put a watch into the cluster
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-put-watch.html">
* the docs</a> for more.
* @param request the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @param listener the listener to be notified upon request completion
*/
public void putWatchAsync(PutWatchRequest request, RequestOptions options,
ActionListener<PutWatchResponse> listener) {
restHighLevelClient.performRequestAsyncAndParseEntity(request, RequestConverters::xPackWatcherPutWatch, options,
PutWatchResponse::fromXContent, listener, emptySet());
}
}

View File

@ -39,10 +39,17 @@ import static java.util.Collections.emptySet;
* X-Pack APIs on elastic.co</a> for more information.
*/
public final class XPackClient {
private final RestHighLevelClient restHighLevelClient;
private final WatcherClient watcherClient;
XPackClient(RestHighLevelClient restHighLevelClient) {
this.restHighLevelClient = restHighLevelClient;
this.watcherClient = new WatcherClient(restHighLevelClient);
}
public WatcherClient watcher() {
return watcherClient;
}
/**

View File

@ -66,13 +66,13 @@ public class PingAndInfoIT extends ESRestHighLevelClientTestCase {
assertEquals(mainResponse.getBuild().shortHash(), info.getBuildInfo().getHash());
assertEquals("basic", info.getLicenseInfo().getType());
assertEquals("basic", info.getLicenseInfo().getMode());
assertEquals("trial", info.getLicenseInfo().getType());
assertEquals("trial", info.getLicenseInfo().getMode());
assertEquals(LicenseStatus.ACTIVE, info.getLicenseInfo().getStatus());
FeatureSet graph = info.getFeatureSetsInfo().getFeatureSets().get("graph");
assertNotNull(graph.description());
assertFalse(graph.available());
assertTrue(graph.available());
assertTrue(graph.enabled());
assertNull(graph.nativeCodeInfo());
FeatureSet monitoring = info.getFeatureSetsInfo().getFeatureSets().get("monitoring");
@ -82,7 +82,7 @@ public class PingAndInfoIT extends ESRestHighLevelClientTestCase {
assertNull(monitoring.nativeCodeInfo());
FeatureSet ml = info.getFeatureSetsInfo().getFeatureSets().get("ml");
assertNotNull(ml.description());
assertFalse(ml.available());
assertTrue(ml.available());
assertTrue(ml.enabled());
assertEquals(mainResponse.getVersion().toString(),
ml.nativeCodeInfo().get("version").toString().replace("-SNAPSHOT", ""));

View File

@ -41,9 +41,9 @@ import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequ
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest;
import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest;
import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest;
import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequest;
import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest;
import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequest;
import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequest;
import org.elasticsearch.action.admin.indices.alias.Alias;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions;
@ -125,6 +125,7 @@ import org.elasticsearch.index.rankeval.RankEvalSpec;
import org.elasticsearch.index.rankeval.RatedRequest;
import org.elasticsearch.index.rankeval.RestRankEvalAction;
import org.elasticsearch.protocol.xpack.XPackInfoRequest;
import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest;
import org.elasticsearch.repositories.fs.FsRepository;
import org.elasticsearch.rest.action.search.RestSearchAction;
import org.elasticsearch.script.ScriptType;
@ -145,6 +146,7 @@ import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.RandomObjects;
import org.hamcrest.CoreMatchers;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.charset.StandardCharsets;
@ -2523,6 +2525,35 @@ public class RequestConvertersTests extends ESTestCase {
assertEquals(expectedParams, request.getParameters());
}
public void testXPackPutWatch() throws Exception {
PutWatchRequest putWatchRequest = new PutWatchRequest();
String watchId = randomAlphaOfLength(10);
putWatchRequest.setId(watchId);
String body = randomAlphaOfLength(20);
putWatchRequest.setSource(new BytesArray(body), XContentType.JSON);
Map<String, String> expectedParams = new HashMap<>();
if (randomBoolean()) {
putWatchRequest.setActive(false);
expectedParams.put("active", "false");
}
if (randomBoolean()) {
long version = randomLongBetween(10, 100);
putWatchRequest.setVersion(version);
expectedParams.put("version", String.valueOf(version));
}
Request request = RequestConverters.xPackWatcherPutWatch(putWatchRequest);
assertEquals(HttpPut.METHOD_NAME, request.getMethod());
assertEquals("/_xpack/watcher/watch/" + watchId, request.getEndpoint());
assertEquals(expectedParams, request.getParameters());
assertThat(request.getEntity().getContentType().getValue(), is(XContentType.JSON.mediaTypeWithoutParameters()));
ByteArrayOutputStream bos = new ByteArrayOutputStream();
request.getEntity().writeTo(bos);
assertThat(bos.toString("UTF-8"), is(body));
}
/**
* Randomize the {@link FetchSourceContext} request parameters.
*/

View File

@ -767,7 +767,9 @@ public class RestHighLevelClientTests extends ESTestCase {
private static Stream<Tuple<String, Method>> getSubClientMethods(String namespace, Class<?> clientClass) {
return Arrays.stream(clientClass.getMethods()).filter(method -> method.getDeclaringClass().equals(clientClass))
.map(method -> Tuple.tuple(namespace + "." + toSnakeCase(method.getName()), method));
.map(method -> Tuple.tuple(namespace + "." + toSnakeCase(method.getName()), method))
.flatMap(tuple -> tuple.v2().getReturnType().getName().endsWith("Client")
? getSubClientMethods(tuple.v1(), tuple.v2().getReturnType()) : Stream.of(tuple));
}
private static String toSnakeCase(String camelCase) {

View File

@ -0,0 +1,46 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest;
import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse;
import static org.hamcrest.Matchers.is;
public class WatcherIT extends ESRestHighLevelClientTestCase {
public void testPutWatch() throws Exception {
String watchId = randomAlphaOfLength(10);
String json = "{ \n" +
" \"trigger\": { \"schedule\": { \"interval\": \"10h\" } },\n" +
" \"input\": { \"none\": {} },\n" +
" \"actions\": { \"logme\": { \"logging\": { \"text\": \"{{ctx.payload}}\" } } }\n" +
"}";
BytesReference bytesReference = new BytesArray(json);
PutWatchRequest putWatchRequest = new PutWatchRequest(watchId, bytesReference, XContentType.JSON);
PutWatchResponse putWatchResponse = highLevelClient().xpack().watcher().putWatch(putWatchRequest, RequestOptions.DEFAULT);
assertThat(putWatchResponse.isCreated(), is(true));
assertThat(putWatchResponse.getId(), is(watchId));
assertThat(putWatchResponse.getVersion(), is(1L));
}
}

View File

@ -39,11 +39,13 @@ import org.elasticsearch.protocol.xpack.XPackUsageRequest;
import org.elasticsearch.protocol.xpack.XPackUsageResponse;
import java.io.IOException;
import java.time.Instant;
import java.util.EnumSet;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.is;
/**
@ -97,8 +99,7 @@ public class MiscellaneousDocumentationIT extends ESRestHighLevelClientTestCase
//tag::x-pack-info-response
BuildInfo build = response.getBuildInfo(); // <1>
LicenseInfo license = response.getLicenseInfo(); // <2>
assertEquals(XPackInfoResponse.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS,
license.getExpiryDate()); // <3>
assertThat(license.getExpiryDate(), is(greaterThan(Instant.now().toEpochMilli()))); // <3>
FeatureSetsInfo features = response.getFeatureSetsInfo(); // <4>
//end::x-pack-info-response

View File

@ -0,0 +1,92 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.documentation;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.LatchedActionListener;
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest;
import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
public class WatcherDocumentationIT extends ESRestHighLevelClientTestCase {
public void testPutWatch() throws Exception {
RestHighLevelClient client = highLevelClient();
{
//tag::x-pack-put-watch-execute
// you can also use the WatchSourceBuilder from org.elasticsearch.plugin:x-pack-core to create a watch programmatically
BytesReference watch = new BytesArray("{ \n" +
" \"trigger\": { \"schedule\": { \"interval\": \"10h\" } },\n" +
" \"input\": { \"simple\": { \"foo\" : \"bar\" } },\n" +
" \"actions\": { \"logme\": { \"logging\": { \"text\": \"{{ctx.payload}}\" } } }\n" +
"}");
PutWatchRequest request = new PutWatchRequest("my_watch_id", watch, XContentType.JSON);
request.setActive(false); // <1>
PutWatchResponse response = client.xpack().watcher().putWatch(request, RequestOptions.DEFAULT);
//end::x-pack-put-watch-execute
//tag::x-pack-put-watch-response
String watchId = response.getId(); // <1>
boolean isCreated = response.isCreated(); // <2>
long version = response.getVersion(); // <3>
//end::x-pack-put-watch-response
}
{
BytesReference watch = new BytesArray("{ \n" +
" \"trigger\": { \"schedule\": { \"interval\": \"10h\" } },\n" +
" \"input\": { \"simple\": { \"foo\" : \"bar\" } },\n" +
" \"actions\": { \"logme\": { \"logging\": { \"text\": \"{{ctx.payload}}\" } } }\n" +
"}");
PutWatchRequest request = new PutWatchRequest("my_other_watch_id", watch, XContentType.JSON);
// tag::x-pack-put-watch-execute-listener
ActionListener<PutWatchResponse> listener = new ActionListener<PutWatchResponse>() {
@Override
public void onResponse(PutWatchResponse response) {
// <1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
};
// end::x-pack-put-watch-execute-listener
// Replace the empty listener by a blocking listener in test
final CountDownLatch latch = new CountDownLatch(1);
listener = new LatchedActionListener<>(listener, latch);
// tag::x-pack-put-watch-execute-async
client.xpack().watcher().putWatchAsync(request, RequestOptions.DEFAULT, listener); // <1>
// end::x-pack-put-watch-execute-async
assertTrue(latch.await(30L, TimeUnit.SECONDS));
}
}
}

View File

@ -25,6 +25,8 @@ apply plugin: 'elasticsearch.build'
targetCompatibility = JavaVersion.VERSION_1_7
sourceCompatibility = JavaVersion.VERSION_1_7
group = "${group}.client.test"
dependencies {
compile "org.apache.httpcomponents:httpcore:${versions.httpcore}"
compile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"

View File

@ -57,7 +57,8 @@ The Java High Level REST Client supports the following Miscellaneous APIs:
include::miscellaneous/main.asciidoc[]
include::miscellaneous/ping.asciidoc[]
include::miscellaneous/x-pack-info.asciidoc[]
include::x-pack/x-pack-info.asciidoc[]
include::x-pack/watcher/put-watch.asciidoc[]
== Indices APIs

View File

@ -0,0 +1,55 @@
[[java-rest-high-x-pack-watcher-put-watch]]
=== X-Pack Info API
[[java-rest-high-x-pack-watcher-put-watch-execution]]
==== Execution
General information about the installed {watcher} features can be retrieved
using the `watcher()` method:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/WatcherDocumentationIT.java[x-pack-put-watch-execute]
--------------------------------------------------
<1> Allows to store the watch, but to not trigger it. Defaults to `true`
[[java-rest-high-x-pack-watcher-put-watch-response]]
==== Response
The returned `XPackPutWatchResponse` contain `created`, `id`,
and `version` information.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/WatcherDocumentationIT.java[x-pack-put-watch-response]
--------------------------------------------------
<1> `_id` contains id of the watch
<2> `created` is a boolean indicating whether the watch was created for the first time
<3> `_version` returns the newly created version
[[java-rest-high-x-pack-watcher-put-watch-async]]
==== Asynchronous Execution
This request can be executed asynchronously:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/WatcherDocumentationIT.java[x-pack-put-watch-execute-async]
--------------------------------------------------
<1> The `XPackPutWatchRequest` to execute and the `ActionListener` to use when
the execution completes
The asynchronous method does not block and returns immediately. Once it is
completed the `ActionListener` is called back using the `onResponse` method
if the execution successfully completed or using the `onFailure` method if
it failed.
A typical listener for `XPackPutWatchResponse` looks like:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/WatcherDocumentationIT.java[x-pack-put-watch-execute-listener]
--------------------------------------------------
<1> Called when the execution is successfully completed. The response is
provided as an argument
<2> Called in case of failure. The raised exception is provided as an argument

View File

@ -13,8 +13,8 @@ include::install_remove.asciidoc[]
==== Getting started with AWS
The plugin provides a repository type named `s3` which may be used when creating a repository.
The repository defaults to using
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html[IAM Role]
The repository defaults to using https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html[ECS IAM Role] or
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html[EC2 IAM Role]
credentials for authentication. The only mandatory setting is the bucket name:
[source,js]

View File

@ -59,7 +59,8 @@ It also uses the common
[float]
==== TCP Transport Profiles
Elasticsearch allows you to bind to multiple ports on different interfaces by the use of transport profiles. See this example configuration
Elasticsearch allows you to bind to multiple ports on different interfaces by
the use of transport profiles. See this example configuration
[source,yaml]
--------------
@ -71,10 +72,12 @@ transport.profiles.dmz.port: 9700-9800
transport.profiles.dmz.bind_host: 172.16.1.2
--------------
The `default` profile is a special. It is used as fallback for any other profiles, if those do not have a specific configuration setting set.
Note that the default profile is how other nodes in the cluster will connect to this node usually. In the future this feature will allow to enable node-to-node communication via multiple interfaces.
The `default` profile is special. It is used as a fallback for any other
profiles, if those do not have a specific configuration setting set, and is how
this node connects to other nodes in the cluster.
The following parameters can be configured like that
The following parameters can be configured on each transport profile, as in the
example above:
* `port`: The port to bind to
* `bind_host`: The host to bind

View File

@ -24,16 +24,6 @@ apply plugin: 'nebula.optional-base'
apply plugin: 'nebula.maven-base-publish'
apply plugin: 'nebula.maven-scm'
publishing {
publications {
nebula {
artifactId 'elasticsearch-cli'
}
}
}
archivesBaseName = 'elasticsearch-cli'
dependencies {
compile 'net.sf.jopt-simple:jopt-simple:5.0.2'
compile "org.elasticsearch:elasticsearch-core:${version}"

View File

@ -0,0 +1 @@
group = "${group}.plugins.discovery-ec2.qa"

View File

@ -0,0 +1 @@
group = "${group}.plugins.repository-azure.qa"

View File

@ -0,0 +1 @@
group = "${group}.plugins.repository-gcs.qa"

View File

@ -92,11 +92,15 @@ String s3TemporaryBasePath = System.getenv("amazon_s3_base_path_temporary")
String s3EC2Bucket = System.getenv("amazon_s3_bucket_ec2")
String s3EC2BasePath = System.getenv("amazon_s3_base_path_ec2")
String s3ECSBucket = System.getenv("amazon_s3_bucket_ecs")
String s3ECSBasePath = System.getenv("amazon_s3_base_path_ecs")
// If all these variables are missing then we are testing against the internal fixture instead, which has the following
// credentials hard-coded in.
if (!s3PermanentAccessKey && !s3PermanentSecretKey && !s3PermanentBucket && !s3PermanentBasePath
&& !s3EC2Bucket && !s3EC2BasePath) {
&& !s3EC2Bucket && !s3EC2BasePath
&& !s3ECSBucket && !s3ECSBasePath) {
s3PermanentAccessKey = 's3_integration_test_permanent_access_key'
s3PermanentSecretKey = 's3_integration_test_permanent_secret_key'
s3PermanentBucket = 'permanent-bucket-test'
@ -105,10 +109,14 @@ if (!s3PermanentAccessKey && !s3PermanentSecretKey && !s3PermanentBucket && !s3P
s3EC2Bucket = 'ec2-bucket-test'
s3EC2BasePath = 'integration_test'
s3ECSBucket = 'ecs-bucket-test'
s3ECSBasePath = 'integration_test'
useFixture = true
} else if (!s3PermanentAccessKey || !s3PermanentSecretKey || !s3PermanentBucket || !s3PermanentBasePath
|| !s3EC2Bucket || !s3EC2BasePath) {
|| !s3EC2Bucket || !s3EC2BasePath
|| !s3ECSBucket || !s3ECSBasePath) {
throw new IllegalArgumentException("not all options specified to run against external S3 service")
}
@ -284,7 +292,8 @@ if (useFixture && minioDistribution) {
// Minio only supports a single access key, see https://github.com/minio/minio/pull/5968
integTestMinioRunner.systemProperty 'tests.rest.blacklist', [
'repository_s3/30_repository_temporary_credentials/*',
'repository_s3/40_repository_ec2_credentials/*'
'repository_s3/40_repository_ec2_credentials/*',
'repository_s3/50_repository_ecs_credentials/*'
].join(",")
project.check.dependsOn(integTestMinio)
@ -302,7 +311,8 @@ task s3FixtureProperties {
"s3Fixture.temporary_bucket_name" : s3TemporaryBucket,
"s3Fixture.temporary_key" : s3TemporaryAccessKey,
"s3Fixture.temporary_session_token": s3TemporarySessionToken,
"s3Fixture.ec2_bucket_name" : s3EC2Bucket
"s3Fixture.ec2_bucket_name" : s3EC2Bucket,
"s3Fixture.ecs_bucket_name" : s3ECSBucket
]
doLast {
@ -327,7 +337,9 @@ Map<String, Object> expansions = [
'temporary_bucket': s3TemporaryBucket,
'temporary_base_path': s3TemporaryBasePath,
'ec2_bucket': s3EC2Bucket,
'ec2_base_path': s3EC2BasePath
'ec2_base_path': s3EC2BasePath,
'ecs_bucket': s3ECSBucket,
'ecs_base_path': s3ECSBasePath
]
processTestResources {
@ -364,6 +376,34 @@ integTestCluster {
}
}
integTestRunner.systemProperty 'tests.rest.blacklist', 'repository_s3/50_repository_ecs_credentials/*'
///
RestIntegTestTask integTestECS = project.tasks.create('integTestECS', RestIntegTestTask.class) {
description = "Runs tests using the ECS repository."
}
// The following closure must execute before the afterEvaluate block in the constructor of the following integrationTest tasks:
project.afterEvaluate {
ClusterConfiguration cluster = project.extensions.getByName('integTestECSCluster') as ClusterConfiguration
cluster.dependsOn(project.s3Fixture)
cluster.setting 's3.client.integration_test_ecs.endpoint', "http://${-> s3Fixture.addressAndPort}"
Task integTestECSTask = project.tasks.getByName('integTestECS')
integTestECSTask.clusterConfig.plugin(project.path)
integTestECSTask.clusterConfig.environment 'AWS_CONTAINER_CREDENTIALS_FULL_URI',
"http://${-> s3Fixture.addressAndPort}/ecs_credentials_endpoint"
integTestECSRunner.systemProperty 'tests.rest.blacklist', [
'repository_s3/10_basic/*',
'repository_s3/20_repository_permanent_credentials/*',
'repository_s3/30_repository_temporary_credentials/*',
'repository_s3/40_repository_ec2_credentials/*'
].join(",")
}
project.check.dependsOn(integTestECS)
///
thirdPartyAudit.excludes = [
// classes are missing
'javax.servlet.ServletContextEvent',

View File

@ -22,7 +22,7 @@ package org.elasticsearch.repositories.s3;
import com.amazonaws.ClientConfiguration;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.InstanceProfileCredentialsProvider;
import com.amazonaws.auth.EC2ContainerCredentialsProviderWrapper;
import com.amazonaws.http.IdleConnectionReaper;
import com.amazonaws.internal.StaticCredentialsProvider;
import com.amazonaws.services.s3.AmazonS3;
@ -156,10 +156,11 @@ class S3Service extends AbstractComponent implements Closeable {
}
static class PrivilegedInstanceProfileCredentialsProvider implements AWSCredentialsProvider {
private final InstanceProfileCredentialsProvider credentials;
private final AWSCredentialsProvider credentials;
private PrivilegedInstanceProfileCredentialsProvider() {
this.credentials = new InstanceProfileCredentialsProvider();
// InstanceProfileCredentialsProvider as last item of chain
this.credentials = new EC2ContainerCredentialsProviderWrapper();
}
@Override

View File

@ -88,7 +88,10 @@ public class AmazonS3Fixture extends AbstractHttpFixture {
final Bucket ec2Bucket = new Bucket("s3Fixture.ec2",
randomAsciiAlphanumOfLength(random, 10), randomAsciiAlphanumOfLength(random, 10));
this.handlers = defaultHandlers(buckets, ec2Bucket);
final Bucket ecsBucket = new Bucket("s3Fixture.ecs",
randomAsciiAlphanumOfLength(random, 10), randomAsciiAlphanumOfLength(random, 10));
this.handlers = defaultHandlers(buckets, ec2Bucket, ecsBucket);
}
private static String nonAuthPath(Request request) {
@ -174,7 +177,7 @@ public class AmazonS3Fixture extends AbstractHttpFixture {
}
/** Builds the default request handlers **/
private PathTrie<RequestHandler> defaultHandlers(final Map<String, Bucket> buckets, final Bucket ec2Bucket) {
private PathTrie<RequestHandler> defaultHandlers(final Map<String, Bucket> buckets, final Bucket ec2Bucket, final Bucket ecsBucket) {
final PathTrie<RequestHandler> handlers = new PathTrie<>(RestUtils.REST_DECODER);
// HEAD Object
@ -400,11 +403,18 @@ public class AmazonS3Fixture extends AbstractHttpFixture {
handlers.insert(nonAuthPath(HttpGet.METHOD_NAME, "/latest/meta-data/iam/security-credentials/{profileName}"), (request) -> {
final String profileName = request.getParam("profileName");
if (EC2_PROFILE.equals(profileName) == false) {
return new Response(RestStatus.NOT_FOUND.getStatus(), new HashMap<>(), "unknown credentials".getBytes(UTF_8));
return new Response(RestStatus.NOT_FOUND.getStatus(), new HashMap<>(), "unknown profile".getBytes(UTF_8));
}
return credentialResponseFunction.apply(profileName, ec2Bucket.key, ec2Bucket.token);
});
// GET
//
// https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html
handlers.insert(nonAuthPath(HttpGet.METHOD_NAME, "/ecs_credentials_endpoint"),
(request) -> credentialResponseFunction.apply("CPV_ECS", ecsBucket.key, ecsBucket.token));
return handlers;
}

View File

@ -0,0 +1,243 @@
# Integration tests for repository-s3
---
setup:
# Register repository with ecs credentials
- do:
snapshot.create_repository:
repository: repository_ecs
body:
type: s3
settings:
bucket: ${ecs_bucket}
client: integration_test_ecs
base_path: ${ecs_base_path}
canned_acl: private
storage_class: standard
---
"Snapshot and Restore with repository-s3 using ecs credentials":
# Get repository
- do:
snapshot.get_repository:
repository: repository_ecs
- match: { repository_ecs.settings.bucket : ${ecs_bucket} }
- match: { repository_ecs.settings.client : "integration_test_ecs" }
- match: { repository_ecs.settings.base_path : ${ecs_base_path} }
- match: { repository_ecs.settings.canned_acl : "private" }
- match: { repository_ecs.settings.storage_class : "standard" }
- is_false: repository_ecs.settings.access_key
- is_false: repository_ecs.settings.secret_key
- is_false: repository_ecs.settings.session_token
# Index documents
- do:
bulk:
refresh: true
body:
- index:
_index: docs
_type: doc
_id: 1
- snapshot: one
- index:
_index: docs
_type: doc
_id: 2
- snapshot: one
- index:
_index: docs
_type: doc
_id: 3
- snapshot: one
- do:
count:
index: docs
- match: {count: 3}
# Create a first snapshot
- do:
snapshot.create:
repository: repository_ecs
snapshot: snapshot-one
wait_for_completion: true
- match: { snapshot.snapshot: snapshot-one }
- match: { snapshot.state : SUCCESS }
- match: { snapshot.include_global_state: true }
- match: { snapshot.shards.failed : 0 }
- do:
snapshot.status:
repository: repository_ecs
snapshot: snapshot-one
- is_true: snapshots
- match: { snapshots.0.snapshot: snapshot-one }
- match: { snapshots.0.state : SUCCESS }
# Index more documents
- do:
bulk:
refresh: true
body:
- index:
_index: docs
_type: doc
_id: 4
- snapshot: two
- index:
_index: docs
_type: doc
_id: 5
- snapshot: two
- index:
_index: docs
_type: doc
_id: 6
- snapshot: two
- index:
_index: docs
_type: doc
_id: 7
- snapshot: two
- do:
count:
index: docs
- match: {count: 7}
# Create a second snapshot
- do:
snapshot.create:
repository: repository_ecs
snapshot: snapshot-two
wait_for_completion: true
- match: { snapshot.snapshot: snapshot-two }
- match: { snapshot.state : SUCCESS }
- match: { snapshot.shards.failed : 0 }
- do:
snapshot.get:
repository: repository_ecs
snapshot: snapshot-one,snapshot-two
- is_true: snapshots
- match: { snapshots.0.state : SUCCESS }
- match: { snapshots.1.state : SUCCESS }
# Delete the index
- do:
indices.delete:
index: docs
# Restore the second snapshot
- do:
snapshot.restore:
repository: repository_ecs
snapshot: snapshot-two
wait_for_completion: true
- do:
count:
index: docs
- match: {count: 7}
# Delete the index again
- do:
indices.delete:
index: docs
# Restore the first snapshot
- do:
snapshot.restore:
repository: repository_ecs
snapshot: snapshot-one
wait_for_completion: true
- do:
count:
index: docs
- match: {count: 3}
# Remove the snapshots
- do:
snapshot.delete:
repository: repository_ecs
snapshot: snapshot-two
- do:
snapshot.delete:
repository: repository_ecs
snapshot: snapshot-one
---
"Register a repository with a non existing bucket":
- do:
catch: /repository_exception/
snapshot.create_repository:
repository: repository_ecs
body:
type: s3
settings:
bucket: zHHkfSqlbnBsbpSgvCYtxrEfFLqghXtyPvvvKPNBnRCicNHQLE
client: integration_test_temporary
---
"Register a repository with a non existing client":
- do:
catch: /repository_exception/
snapshot.create_repository:
repository: repository_ecs
body:
type: s3
settings:
bucket: repository_ecs
client: unknown
---
"Get a non existing snapshot":
- do:
catch: /snapshot_missing_exception/
snapshot.get:
repository: repository_ecs
snapshot: missing
---
"Delete a non existing snapshot":
- do:
catch: /snapshot_missing_exception/
snapshot.delete:
repository: repository_ecs
snapshot: missing
---
"Restore a non existing snapshot":
- do:
catch: /snapshot_restore_exception/
snapshot.restore:
repository: repository_ecs
snapshot: missing
wait_for_completion: true
---
teardown:
# Remove our repository
- do:
snapshot.delete_repository:
repository: repository_ecs

View File

@ -28,7 +28,8 @@ plugins {
dependencies {
compile "junit:junit:${versions.junit}"
compile "org.hamcrest:hamcrest-all:${versions.hamcrest}"
compile "org.hamcrest:hamcrest-core:${versions.hamcrest}"
compile "org.hamcrest:hamcrest-library:${versions.hamcrest}"
compile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"
compile "org.apache.httpcomponents:httpcore:${versions.httpcore}"
@ -91,13 +92,5 @@ tasks.thirdPartyAudit.excludes = [
'org.apache.log4j.Priority',
// commons-logging provided dependencies
'javax.servlet.ServletContextEvent',
'javax.servlet.ServletContextListener',
// from randomized testing
'org.apache.tools.ant.BuildException',
'org.apache.tools.ant.DirectoryScanner',
'org.apache.tools.ant.Task',
'org.apache.tools.ant.types.FileSet',
'org.easymock.EasyMock',
'org.easymock.IArgumentMatcher',
'org.jmock.core.Constraint'
'javax.servlet.ServletContextListener'
]

View File

@ -19,10 +19,19 @@
package org.elasticsearch.packaging;
import org.elasticsearch.packaging.test.DefaultDebPreservationTests;
import org.elasticsearch.packaging.test.DefaultDebBasicTests;
import org.elasticsearch.packaging.test.DefaultRpmPreservationTests;
import org.elasticsearch.packaging.test.DefaultRpmBasicTests;
import org.elasticsearch.packaging.test.OssDebPreservationTests;
import org.elasticsearch.packaging.test.OssDebBasicTests;
import org.elasticsearch.packaging.test.OssRpmPreservationTests;
import org.elasticsearch.packaging.test.OssRpmBasicTests;
import org.elasticsearch.packaging.test.OssTarTests;
import org.elasticsearch.packaging.test.OssZipTests;
import org.elasticsearch.packaging.test.DefaultTarTests;
import org.elasticsearch.packaging.test.DefaultZipTests;
import org.elasticsearch.packaging.test.PackageDependenciesTests;
import org.junit.runner.RunWith;
import org.junit.runners.Suite;
@ -31,8 +40,17 @@ import org.junit.runners.Suite.SuiteClasses;
@RunWith(Suite.class)
@SuiteClasses({
DefaultTarTests.class,
DefaultZipTests.class,
OssTarTests.class,
OssZipTests.class
DefaultZipTests.class,
OssZipTests.class,
PackageDependenciesTests.class,
DefaultRpmBasicTests.class,
OssRpmBasicTests.class,
DefaultDebBasicTests.class,
OssDebBasicTests.class,
DefaultDebPreservationTests.class,
OssDebPreservationTests.class,
DefaultRpmPreservationTests.class,
OssRpmPreservationTests.class
})
public class PackagingTests {}

View File

@ -0,0 +1,127 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.packaging.test;
import com.carrotsearch.randomizedtesting.annotations.TestCaseOrdering;
import org.elasticsearch.packaging.util.Distribution;
import org.elasticsearch.packaging.util.Installation;
import org.elasticsearch.packaging.util.Shell;
import org.junit.Before;
import org.junit.BeforeClass;
import java.nio.file.Files;
import java.nio.file.Paths;
import static org.elasticsearch.packaging.util.Cleanup.cleanEverything;
import static org.elasticsearch.packaging.util.FileUtils.assertPathsDontExist;
import static org.elasticsearch.packaging.util.FileUtils.assertPathsExist;
import static org.elasticsearch.packaging.util.Packages.SYSVINIT_SCRIPT;
import static org.elasticsearch.packaging.util.Packages.assertInstalled;
import static org.elasticsearch.packaging.util.Packages.assertRemoved;
import static org.elasticsearch.packaging.util.Packages.install;
import static org.elasticsearch.packaging.util.Packages.remove;
import static org.elasticsearch.packaging.util.Packages.packageStatus;
import static org.elasticsearch.packaging.util.Packages.verifyPackageInstallation;
import static org.elasticsearch.packaging.util.Platforms.isDPKG;
import static org.hamcrest.CoreMatchers.notNullValue;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeThat;
import static org.junit.Assume.assumeTrue;
@TestCaseOrdering(TestCaseOrdering.AlphabeticOrder.class)
public abstract class DebPreservationTestCase extends PackagingTestCase {
private static Installation installation;
protected abstract Distribution distribution();
@BeforeClass
public static void cleanup() {
installation = null;
cleanEverything();
}
@Before
public void onlyCompatibleDistributions() {
assumeTrue("only dpkg platforms", isDPKG());
assumeTrue("only compatible distributions", distribution().packaging.compatible);
}
public void test10Install() {
assertRemoved(distribution());
installation = install(distribution());
assertInstalled(distribution());
verifyPackageInstallation(installation, distribution());
}
public void test20Remove() {
assumeThat(installation, is(notNullValue()));
remove(distribution());
// some config files were not removed
assertPathsExist(
installation.config,
installation.config("elasticsearch.yml"),
installation.config("jvm.options"),
installation.config("log4j2.properties")
);
// keystore was removed
assertPathsDontExist(
installation.config("elasticsearch.keystore"),
installation.config(".elasticsearch.keystore.initial_md5sum")
);
// doc files were removed
assertPathsDontExist(
Paths.get("/usr/share/doc/" + distribution().flavor.name),
Paths.get("/usr/share/doc/" + distribution().flavor.name + "/copyright")
);
// sysvinit service file was not removed
assertTrue(Files.exists(SYSVINIT_SCRIPT));
// defaults file was not removed
assertTrue(Files.exists(installation.envFile));
}
public void test30Purge() {
assumeThat(installation, is(notNullValue()));
final Shell sh = new Shell();
sh.run("dpkg --purge " + distribution().flavor.name);
assertRemoved(distribution());
assertPathsDontExist(
installation.config,
installation.envFile,
SYSVINIT_SCRIPT
);
assertThat(packageStatus(distribution()).exitCode, is(1));
}
}

View File

@ -0,0 +1,31 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.packaging.test;
import org.elasticsearch.packaging.util.Distribution;
public class DefaultDebBasicTests extends PackageTestCase {
@Override
protected Distribution distribution() {
return Distribution.DEFAULT_DEB;
}
}

View File

@ -0,0 +1,30 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.packaging.test;
import org.elasticsearch.packaging.util.Distribution;
public class DefaultDebPreservationTests extends DebPreservationTestCase {
@Override
protected Distribution distribution() {
return Distribution.DEFAULT_DEB;
}
}

View File

@ -0,0 +1,30 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.packaging.test;
import org.elasticsearch.packaging.util.Distribution;
public class DefaultRpmBasicTests extends PackageTestCase {
@Override
protected Distribution distribution() {
return Distribution.DEFAULT_RPM;
}
}

View File

@ -0,0 +1,30 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.packaging.test;
import org.elasticsearch.packaging.util.Distribution;
public class DefaultRpmPreservationTests extends RpmPreservationTestCase {
@Override
protected Distribution distribution() {
return Distribution.DEFAULT_RPM;
}
}

View File

@ -0,0 +1,30 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.packaging.test;
import org.elasticsearch.packaging.util.Distribution;
public class OssDebBasicTests extends PackageTestCase {
@Override
protected Distribution distribution() {
return Distribution.OSS_DEB;
}
}

View File

@ -0,0 +1,30 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.packaging.test;
import org.elasticsearch.packaging.util.Distribution;
public class OssDebPreservationTests extends DebPreservationTestCase {
@Override
protected Distribution distribution() {
return Distribution.OSS_DEB;
}
}

View File

@ -0,0 +1,30 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.packaging.test;
import org.elasticsearch.packaging.util.Distribution;
public class OssRpmBasicTests extends PackageTestCase {
@Override
protected Distribution distribution() {
return Distribution.OSS_RPM;
}
}

View File

@ -0,0 +1,30 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.packaging.test;
import org.elasticsearch.packaging.util.Distribution;
public class OssRpmPreservationTests extends RpmPreservationTestCase {
@Override
protected Distribution distribution() {
return Distribution.OSS_RPM;
}
}

View File

@ -0,0 +1,73 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.packaging.test;
import org.elasticsearch.packaging.util.Platforms;
import org.elasticsearch.packaging.util.Shell;
import org.elasticsearch.packaging.util.Shell.Result;
import java.util.regex.Pattern;
import static junit.framework.TestCase.assertTrue;
import static org.elasticsearch.packaging.util.Distribution.DEFAULT_DEB;
import static org.elasticsearch.packaging.util.Distribution.DEFAULT_RPM;
import static org.elasticsearch.packaging.util.Distribution.OSS_DEB;
import static org.elasticsearch.packaging.util.Distribution.OSS_RPM;
import static org.elasticsearch.packaging.util.FileUtils.getDistributionFile;
import static org.junit.Assume.assumeTrue;
/**
* Tests that linux packages correctly declare their dependencies and their conflicts
*/
public class PackageDependenciesTests extends PackagingTestCase {
public void testDebDependencies() {
assumeTrue(Platforms.isDPKG());
final Shell sh = new Shell();
final Result defaultResult = sh.run("dpkg -I " + getDistributionFile(DEFAULT_DEB));
final Result ossResult = sh.run("dpkg -I " + getDistributionFile(OSS_DEB));
assertTrue(Pattern.compile("(?m)^ Depends:.*bash.*").matcher(defaultResult.stdout).find());
assertTrue(Pattern.compile("(?m)^ Depends:.*bash.*").matcher(ossResult.stdout).find());
assertTrue(Pattern.compile("(?m)^ Conflicts: elasticsearch-oss$").matcher(defaultResult.stdout).find());
assertTrue(Pattern.compile("(?m)^ Conflicts: elasticsearch$").matcher(ossResult.stdout).find());
}
public void testRpmDependencies() {
assumeTrue(Platforms.isRPM());
final Shell sh = new Shell();
final Result defaultDeps = sh.run("rpm -qpR " + getDistributionFile(DEFAULT_RPM));
final Result ossDeps = sh.run("rpm -qpR " + getDistributionFile(OSS_RPM));
assertTrue(Pattern.compile("(?m)^/bin/bash\\s*$").matcher(defaultDeps.stdout).find());
assertTrue(Pattern.compile("(?m)^/bin/bash\\s*$").matcher(ossDeps.stdout).find());
final Result defaultConflicts = sh.run("rpm -qp --conflicts " + getDistributionFile(DEFAULT_RPM));
final Result ossConflicts = sh.run("rpm -qp --conflicts " + getDistributionFile(OSS_RPM));
assertTrue(Pattern.compile("(?m)^elasticsearch-oss\\s*$").matcher(defaultConflicts.stdout).find());
assertTrue(Pattern.compile("(?m)^elasticsearch\\s*$").matcher(ossConflicts.stdout).find());
}
}

View File

@ -0,0 +1,168 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.packaging.test;
import com.carrotsearch.randomizedtesting.annotations.TestCaseOrdering;
import org.elasticsearch.packaging.util.Distribution;
import org.elasticsearch.packaging.util.Installation;
import org.elasticsearch.packaging.util.Shell;
import org.elasticsearch.packaging.util.Shell.Result;
import org.junit.Before;
import org.junit.BeforeClass;
import java.io.IOException;
import java.nio.file.Files;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import static org.elasticsearch.packaging.util.Cleanup.cleanEverything;
import static org.elasticsearch.packaging.util.FileUtils.assertPathsDontExist;
import static org.elasticsearch.packaging.util.Packages.SYSTEMD_SERVICE;
import static org.elasticsearch.packaging.util.Packages.assertInstalled;
import static org.elasticsearch.packaging.util.Packages.assertRemoved;
import static org.elasticsearch.packaging.util.Packages.install;
import static org.elasticsearch.packaging.util.Packages.remove;
import static org.elasticsearch.packaging.util.Packages.startElasticsearch;
import static org.elasticsearch.packaging.util.Packages.verifyPackageInstallation;
import static org.elasticsearch.packaging.util.Platforms.getOsRelease;
import static org.elasticsearch.packaging.util.Platforms.isSystemd;
import static org.elasticsearch.packaging.util.ServerUtils.runElasticsearchTests;
import static org.hamcrest.CoreMatchers.not;
import static org.hamcrest.CoreMatchers.notNullValue;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.isEmptyString;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertFalse;
import static org.junit.Assume.assumeThat;
import static org.junit.Assume.assumeTrue;
@TestCaseOrdering(TestCaseOrdering.AlphabeticOrder.class)
public abstract class PackageTestCase extends PackagingTestCase {
private static Installation installation;
protected abstract Distribution distribution();
@BeforeClass
public static void cleanup() {
installation = null;
cleanEverything();
}
@Before
public void onlyCompatibleDistributions() {
assumeTrue("only compatible distributions", distribution().packaging.compatible);
}
public void test10InstallPackage() {
assertRemoved(distribution());
installation = install(distribution());
assertInstalled(distribution());
verifyPackageInstallation(installation, distribution());
}
public void test20PluginsCommandWhenNoPlugins() {
assumeThat(installation, is(notNullValue()));
final Shell sh = new Shell();
assertThat(sh.run(installation.bin("elasticsearch-plugin") + " list").stdout, isEmptyString());
}
public void test30InstallDoesNotStartServer() {
assumeThat(installation, is(notNullValue()));
final Shell sh = new Shell();
assertThat(sh.run("ps aux").stdout, not(containsString("org.elasticsearch.bootstrap.Elasticsearch")));
}
public void test40StartServer() throws IOException {
assumeThat(installation, is(notNullValue()));
startElasticsearch();
runElasticsearchTests();
verifyPackageInstallation(installation, distribution()); // check startup script didn't change permissions
}
public void test50Remove() {
assumeThat(installation, is(notNullValue()));
remove(distribution());
// removing must stop the service
final Shell sh = new Shell();
assertThat(sh.run("ps aux").stdout, not(containsString("org.elasticsearch.bootstrap.Elasticsearch")));
if (isSystemd()) {
final int statusExitCode;
// Before version 231 systemctl returned exit code 3 for both services that were stopped, and nonexistent
// services [1]. In version 231 and later it returns exit code 4 for non-existent services.
//
// The exception is Centos 7 and oel 7 where it returns exit code 4 for non-existent services from a systemd reporting a version
// earlier than 231. Centos 6 does not have an /etc/os-release, but that's fine because it also doesn't use systemd.
//
// [1] https://github.com/systemd/systemd/pull/3385
if (getOsRelease().contains("ID=\"centos\"") || getOsRelease().contains("ID=\"ol\"")) {
statusExitCode = 4;
} else {
final Result versionResult = sh.run("systemctl --version");
final Matcher matcher = Pattern.compile("^systemd (\\d+)\n").matcher(versionResult.stdout);
matcher.find();
final int version = Integer.parseInt(matcher.group(1));
statusExitCode = version < 231
? 3
: 4;
}
assertThat(sh.runIgnoreExitCode("systemctl status elasticsearch.service").exitCode, is(statusExitCode));
assertThat(sh.runIgnoreExitCode("systemctl is-enabled elasticsearch.service").exitCode, is(1));
}
assertPathsDontExist(
installation.bin,
installation.lib,
installation.modules,
installation.plugins,
installation.logs,
installation.pidDir
);
assertFalse(Files.exists(SYSTEMD_SERVICE));
}
public void test60Reinstall() {
assumeThat(installation, is(notNullValue()));
installation = install(distribution());
assertInstalled(distribution());
verifyPackageInstallation(installation, distribution());
remove(distribution());
assertRemoved(distribution());
}
}

View File

@ -0,0 +1,141 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.packaging.test;
import com.carrotsearch.randomizedtesting.annotations.TestCaseOrdering;
import org.elasticsearch.packaging.util.Distribution;
import org.elasticsearch.packaging.util.Installation;
import org.elasticsearch.packaging.util.Shell;
import org.junit.Before;
import org.junit.BeforeClass;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.stream.Stream;
import static org.elasticsearch.packaging.util.Cleanup.cleanEverything;
import static org.elasticsearch.packaging.util.FileUtils.append;
import static org.elasticsearch.packaging.util.FileUtils.assertPathsDontExist;
import static org.elasticsearch.packaging.util.Packages.SYSTEMD_SERVICE;
import static org.elasticsearch.packaging.util.Packages.SYSVINIT_SCRIPT;
import static org.elasticsearch.packaging.util.Packages.assertInstalled;
import static org.elasticsearch.packaging.util.Packages.assertRemoved;
import static org.elasticsearch.packaging.util.Packages.install;
import static org.elasticsearch.packaging.util.Packages.remove;
import static org.elasticsearch.packaging.util.Packages.verifyPackageInstallation;
import static org.elasticsearch.packaging.util.Platforms.isRPM;
import static org.elasticsearch.packaging.util.Platforms.isSystemd;
import static org.hamcrest.CoreMatchers.notNullValue;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeThat;
import static org.junit.Assume.assumeTrue;
@TestCaseOrdering(TestCaseOrdering.AlphabeticOrder.class)
public abstract class RpmPreservationTestCase extends PackagingTestCase {
private static Installation installation;
protected abstract Distribution distribution();
@BeforeClass
public static void cleanup() {
installation = null;
cleanEverything();
}
@Before
public void onlyCompatibleDistributions() {
assumeTrue("only rpm platforms", isRPM());
assumeTrue("only compatible distributions", distribution().packaging.compatible);
}
public void test10Install() {
assertRemoved(distribution());
installation = install(distribution());
assertInstalled(distribution());
verifyPackageInstallation(installation, distribution());
}
public void test20Remove() {
assumeThat(installation, is(notNullValue()));
remove(distribution());
// config was removed
assertFalse(Files.exists(installation.config));
// sysvinit service file was removed
assertFalse(Files.exists(SYSVINIT_SCRIPT));
// defaults file was removed
assertFalse(Files.exists(installation.envFile));
}
public void test30PreserveConfig() {
final Shell sh = new Shell();
installation = install(distribution());
assertInstalled(distribution());
verifyPackageInstallation(installation, distribution());
sh.run("echo foobar | " + installation.executables().elasticsearchKeystore + " add --stdin foo.bar");
Stream.of(
installation.config("elasticsearch.yml"),
installation.config("jvm.options"),
installation.config("log4j2.properties")
).forEach(path -> append(path, "# foo"));
remove(distribution());
assertRemoved(distribution());
if (isSystemd()) {
assertThat(sh.runIgnoreExitCode("systemctl is-enabled elasticsearch.service").exitCode, is(1));
}
assertPathsDontExist(
installation.bin,
installation.lib,
installation.modules,
installation.plugins,
installation.logs,
installation.pidDir,
installation.envFile,
SYSVINIT_SCRIPT,
SYSTEMD_SERVICE
);
assertTrue(Files.exists(installation.config));
assertTrue(Files.exists(installation.config("elasticsearch.keystore")));
Stream.of(
"elasticsearch.yml",
"jvm.options",
"log4j2.properties"
).forEach(configFile -> {
final Path original = installation.config(configFile);
final Path saved = installation.config(configFile + ".rpmsave");
assertFalse(original + " should not exist", Files.exists(original));
assertTrue(saved + " should exist", Files.exists(saved));
});
}
}

View File

@ -35,7 +35,7 @@ import static org.elasticsearch.packaging.util.FileMatcher.p660;
import static org.elasticsearch.packaging.util.FileMatcher.p755;
import static org.elasticsearch.packaging.util.FileUtils.getCurrentVersion;
import static org.elasticsearch.packaging.util.FileUtils.getDefaultArchiveInstallPath;
import static org.elasticsearch.packaging.util.FileUtils.getPackagingArchivesDir;
import static org.elasticsearch.packaging.util.FileUtils.getDistributionFile;
import static org.elasticsearch.packaging.util.FileUtils.lsGlob;
import static org.elasticsearch.packaging.util.FileUtils.mv;
@ -66,7 +66,7 @@ public class Archives {
public static Installation installArchive(Distribution distribution, Path fullInstallPath, String version) {
final Shell sh = new Shell();
final Path distributionFile = getPackagingArchivesDir().resolve(distribution.filename(version));
final Path distributionFile = getDistributionFile(distribution);
final Path baseInstallPath = fullInstallPath.getParent();
final Path extractedPath = baseInstallPath.resolve("elasticsearch-" + version);
@ -106,7 +106,7 @@ public class Archives {
Platforms.onLinux(() -> setupArchiveUsersLinux(fullInstallPath));
Platforms.onWindows(() -> setupArchiveUsersWindows(fullInstallPath));
return new Installation(fullInstallPath);
return Installation.ofArchive(fullInstallPath);
}
private static void setupArchiveUsersLinux(Path installPath) {
@ -176,7 +176,6 @@ public class Archives {
).forEach(dir -> assertThat(dir, file(Directory, owner, owner, p755)));
assertThat(Files.exists(es.data), is(false));
assertThat(Files.exists(es.scripts), is(false));
assertThat(es.bin, file(Directory, owner, owner, p755));
assertThat(es.lib, file(Directory, owner, owner, p755));
@ -209,7 +208,7 @@ public class Archives {
"elasticsearch.yml",
"jvm.options",
"log4j2.properties"
).forEach(config -> assertThat(es.config(config), file(File, owner, owner, p660)));
).forEach(configFile -> assertThat(es.config(configFile), file(File, owner, owner, p660)));
Stream.of(
"NOTICE.txt",
@ -252,7 +251,7 @@ public class Archives {
"roles.yml",
"role_mapping.yml",
"log4j2.properties"
).forEach(config -> assertThat(es.config(config), file(File, owner, owner, p660)));
).forEach(configFile -> assertThat(es.config(configFile), file(File, owner, owner, p660)));
}
public static void runElasticsearch(Installation installation) throws IOException {

View File

@ -27,11 +27,9 @@ import java.util.List;
import static org.elasticsearch.packaging.util.FileUtils.getTempDir;
import static org.elasticsearch.packaging.util.FileUtils.lsGlob;
import static org.elasticsearch.packaging.util.Platforms.isAptGet;
import static org.elasticsearch.packaging.util.Platforms.isDPKG;
import static org.elasticsearch.packaging.util.Platforms.isRPM;
import static org.elasticsearch.packaging.util.Platforms.isSystemd;
import static org.elasticsearch.packaging.util.Platforms.isYUM;
public class Cleanup {
@ -100,19 +98,14 @@ public class Cleanup {
final Shell sh = new Shell();
if (isRPM()) {
sh.runIgnoreExitCode("rpm --quiet -e elasticsearch elasticsearch-oss");
}
if (isYUM()) {
sh.runIgnoreExitCode("yum remove -y elasticsearch elasticsearch-oss");
// Doing rpm erase on both packages in one command will remove neither since both cannot be installed
// this may leave behind config files in /etc/elasticsearch, but a later step in this cleanup will get them
sh.runIgnoreExitCode("rpm --quiet -e elasticsearch");
sh.runIgnoreExitCode("rpm --quiet -e elasticsearch-oss");
}
if (isDPKG()) {
sh.runIgnoreExitCode("dpkg --purge elasticsearch elasticsearch-oss");
}
if (isAptGet()) {
sh.runIgnoreExitCode("apt-get --quiet --yes purge elasticsearch elasticsearch-oss");
}
}
}

View File

@ -47,6 +47,7 @@ public class FileMatcher extends TypeSafeMatcher<Path> {
public enum Fileness { File, Directory }
public static final Set<PosixFilePermission> p755 = fromString("rwxr-xr-x");
public static final Set<PosixFilePermission> p750 = fromString("rwxr-x---");
public static final Set<PosixFilePermission> p660 = fromString("rw-rw----");
public static final Set<PosixFilePermission> p644 = fromString("rw-r--r--");

View File

@ -33,11 +33,14 @@ import java.nio.file.attribute.BasicFileAttributes;
import java.nio.file.attribute.FileOwnerAttributeView;
import java.nio.file.attribute.PosixFileAttributes;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.core.IsNot.not;
import static org.hamcrest.text.IsEmptyString.isEmptyOrNullString;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
/**
* Wrappers and convenience methods for common filesystem operations
@ -160,4 +163,20 @@ public class FileUtils {
assertThat(fromEnv, not(isEmptyOrNullString()));
return Paths.get(fromEnv);
}
public static Path getDistributionFile(Distribution distribution) {
return getDistributionFile(distribution, getCurrentVersion());
}
public static Path getDistributionFile(Distribution distribution, String version) {
return getPackagingArchivesDir().resolve(distribution.filename(version));
}
public static void assertPathsExist(Path... paths) {
Arrays.stream(paths).forEach(path -> assertTrue(path + " should exist", Files.exists(path)));
}
public static void assertPathsDontExist(Path... paths) {
Arrays.stream(paths).forEach(path -> assertFalse(path + " should not exist", Files.exists(path)));
}
}

View File

@ -20,6 +20,7 @@
package org.elasticsearch.packaging.util;
import java.nio.file.Path;
import java.nio.file.Paths;
/**
* Represents an installation of Elasticsearch
@ -34,9 +35,10 @@ public class Installation {
public final Path logs;
public final Path plugins;
public final Path modules;
public final Path scripts;
public final Path pidDir;
public final Path envFile;
public Installation(Path home, Path config, Path data, Path logs, Path plugins, Path modules, Path scripts) {
public Installation(Path home, Path config, Path data, Path logs, Path plugins, Path modules, Path pidDir, Path envFile) {
this.home = home;
this.bin = home.resolve("bin");
this.lib = home.resolve("lib");
@ -46,18 +48,38 @@ public class Installation {
this.logs = logs;
this.plugins = plugins;
this.modules = modules;
this.scripts = scripts;
this.pidDir = pidDir;
this.envFile = envFile;
}
public Installation(Path home) {
this(
public static Installation ofArchive(Path home) {
return new Installation(
home,
home.resolve("config"),
home.resolve("data"),
home.resolve("logs"),
home.resolve("plugins"),
home.resolve("modules"),
home.resolve("scripts")
null,
null
);
}
public static Installation ofPackage(Distribution.Packaging packaging) {
final Path envFile = (packaging == Distribution.Packaging.RPM)
? Paths.get("/etc/sysconfig/elasticsearch")
: Paths.get("/etc/default/elasticsearch");
return new Installation(
Paths.get("/usr/share/elasticsearch"),
Paths.get("/etc/elasticsearch"),
Paths.get("/var/lib/elasticsearch"),
Paths.get("/var/log/elasticsearch"),
Paths.get("/usr/share/elasticsearch/plugins"),
Paths.get("/usr/share/elasticsearch/modules"),
Paths.get("/var/run/elasticsearch"),
envFile
);
}

View File

@ -0,0 +1,259 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.packaging.util;
import org.elasticsearch.packaging.util.Shell.Result;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.regex.Pattern;
import java.util.stream.Stream;
import static org.elasticsearch.packaging.util.FileMatcher.Fileness.Directory;
import static org.elasticsearch.packaging.util.FileMatcher.Fileness.File;
import static org.elasticsearch.packaging.util.FileMatcher.file;
import static org.elasticsearch.packaging.util.FileMatcher.p644;
import static org.elasticsearch.packaging.util.FileMatcher.p660;
import static org.elasticsearch.packaging.util.FileMatcher.p750;
import static org.elasticsearch.packaging.util.FileMatcher.p755;
import static org.elasticsearch.packaging.util.FileUtils.getCurrentVersion;
import static org.elasticsearch.packaging.util.FileUtils.getDistributionFile;
import static org.elasticsearch.packaging.util.Platforms.isSysVInit;
import static org.elasticsearch.packaging.util.Platforms.isSystemd;
import static org.elasticsearch.packaging.util.ServerUtils.waitForElasticsearch;
import static org.hamcrest.CoreMatchers.anyOf;
import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
public class Packages {
public static final Path SYSVINIT_SCRIPT = Paths.get("/etc/init.d/elasticsearch");
public static final Path SYSTEMD_SERVICE = Paths.get("/usr/lib/systemd/system/elasticsearch.service");
public static void assertInstalled(Distribution distribution) {
final Result status = packageStatus(distribution);
assertThat(status.exitCode, is(0));
Platforms.onDPKG(() -> assertFalse(Pattern.compile("(?m)^Status:.+deinstall ok").matcher(status.stdout).find()));
}
public static void assertRemoved(Distribution distribution) {
final Result status = packageStatus(distribution);
Platforms.onRPM(() -> assertThat(status.exitCode, is(1)));
Platforms.onDPKG(() -> {
assertThat(status.exitCode, anyOf(is(0), is(1)));
if (status.exitCode == 0) {
assertTrue(Pattern.compile("(?m)^Status:.+deinstall ok").matcher(status.stdout).find());
}
});
}
public static Result packageStatus(Distribution distribution) {
final Shell sh = new Shell();
final Result result;
if (distribution.packaging == Distribution.Packaging.RPM) {
result = sh.runIgnoreExitCode("rpm -qe " + distribution.flavor.name);
} else {
result = sh.runIgnoreExitCode("dpkg -s " + distribution.flavor.name);
}
return result;
}
public static Installation install(Distribution distribution) {
return install(distribution, getCurrentVersion());
}
public static Installation install(Distribution distribution, String version) {
final Shell sh = new Shell();
final Path distributionFile = getDistributionFile(distribution, version);
Platforms.onRPM(() -> sh.run("rpm -i " + distributionFile));
Platforms.onDPKG(() -> sh.run("dpkg -i " + distributionFile));
return Installation.ofPackage(distribution.packaging);
}
public static void remove(Distribution distribution) {
final Shell sh = new Shell();
Platforms.onRPM(() -> {
sh.run("rpm -e " + distribution.flavor.name);
final Result status = packageStatus(distribution);
assertThat(status.exitCode, is(1));
});
Platforms.onDPKG(() -> {
sh.run("dpkg -r " + distribution.flavor.name);
final Result status = packageStatus(distribution);
assertThat(status.exitCode, is(0));
assertTrue(Pattern.compile("(?m)^Status:.+deinstall ok").matcher(status.stdout).find());
});
}
public static void verifyPackageInstallation(Installation installation, Distribution distribution) {
verifyOssInstallation(installation, distribution);
if (distribution.flavor == Distribution.Flavor.DEFAULT) {
verifyDefaultInstallation(installation);
}
}
private static void verifyOssInstallation(Installation es, Distribution distribution) {
final Shell sh = new Shell();
sh.run("id elasticsearch");
sh.run("getent group elasticsearch");
final Result passwdResult = sh.run("getent passwd elasticsearch");
final Path homeDir = Paths.get(passwdResult.stdout.trim().split(":")[5]);
assertFalse("elasticsearch user home directory must not exist", Files.exists(homeDir));
Stream.of(
es.home,
es.plugins,
es.modules
).forEach(dir -> assertThat(dir, file(Directory, "root", "root", p755)));
assertThat(es.pidDir, file(Directory, "elasticsearch", "elasticsearch", p755));
Stream.of(
es.data,
es.logs
).forEach(dir -> assertThat(dir, file(Directory, "elasticsearch", "elasticsearch", p750)));
// we shell out here because java's posix file permission view doesn't support special modes
assertThat(es.config, file(Directory, "root", "elasticsearch", p750));
assertThat(sh.run("find \"" + es.config + "\" -maxdepth 0 -printf \"%m\"").stdout, containsString("2750"));
Stream.of(
"elasticsearch.keystore",
"elasticsearch.yml",
"jvm.options",
"log4j2.properties"
).forEach(configFile -> assertThat(es.config(configFile), file(File, "root", "elasticsearch", p660)));
assertThat(es.config(".elasticsearch.keystore.initial_md5sum"), file(File, "root", "elasticsearch", p644));
assertThat(sh.run("sudo -u elasticsearch " + es.bin("elasticsearch-keystore") + " list").stdout, containsString("keystore.seed"));
Stream.of(
es.bin,
es.lib
).forEach(dir -> assertThat(dir, file(Directory, "root", "root", p755)));
Stream.of(
"elasticsearch",
"elasticsearch-plugin",
"elasticsearch-keystore",
"elasticsearch-translog"
).forEach(executable -> assertThat(es.bin(executable), file(File, "root", "root", p755)));
Stream.of(
"NOTICE.txt",
"README.textile"
).forEach(doc -> assertThat(es.home.resolve(doc), file(File, "root", "root", p644)));
assertThat(es.envFile, file(File, "root", "elasticsearch", p660));
if (distribution.packaging == Distribution.Packaging.RPM) {
assertThat(es.home.resolve("LICENSE.txt"), file(File, "root", "root", p644));
} else {
Path copyrightDir = Paths.get(sh.run("readlink -f /usr/share/doc/" + distribution.flavor.name).stdout.trim());
assertThat(copyrightDir, file(Directory, "root", "root", p755));
assertThat(copyrightDir.resolve("copyright"), file(File, "root", "root", p644));
}
if (isSystemd()) {
Stream.of(
SYSTEMD_SERVICE,
Paths.get("/usr/lib/tmpfiles.d/elasticsearch.conf"),
Paths.get("/usr/lib/sysctl.d/elasticsearch.conf")
).forEach(confFile -> assertThat(confFile, file(File, "root", "root", p644)));
final String sysctlExecutable = (distribution.packaging == Distribution.Packaging.RPM)
? "/usr/sbin/sysctl"
: "/sbin/sysctl";
assertThat(sh.run(sysctlExecutable + " vm.max_map_count").stdout, containsString("vm.max_map_count = 262144"));
}
if (isSysVInit()) {
assertThat(SYSVINIT_SCRIPT, file(File, "root", "root", p750));
}
}
private static void verifyDefaultInstallation(Installation es) {
Stream.of(
"elasticsearch-certgen",
"elasticsearch-certutil",
"elasticsearch-croneval",
"elasticsearch-migrate",
"elasticsearch-saml-metadata",
"elasticsearch-setup-passwords",
"elasticsearch-sql-cli",
"elasticsearch-syskeygen",
"elasticsearch-users",
"x-pack-env",
"x-pack-security-env",
"x-pack-watcher-env"
).forEach(executable -> assertThat(es.bin(executable), file(File, "root", "root", p755)));
// at this time we only install the current version of archive distributions, but if that changes we'll need to pass
// the version through here
assertThat(es.bin("elasticsearch-sql-cli-" + getCurrentVersion() + ".jar"), file(File, "root", "root", p755));
Stream.of(
"users",
"users_roles",
"roles.yml",
"role_mapping.yml",
"log4j2.properties"
).forEach(configFile -> assertThat(es.config(configFile), file(File, "root", "elasticsearch", p660)));
}
public static void startElasticsearch() throws IOException {
final Shell sh = new Shell();
if (isSystemd()) {
sh.run("systemctl daemon-reload");
sh.run("systemctl enable elasticsearch.service");
sh.run("systemctl is-enabled elasticsearch.service");
sh.run("systemctl start elasticsearch.service");
} else {
sh.run("service elasticsearch start");
}
waitForElasticsearch();
if (isSystemd()) {
sh.run("systemctl is-active elasticsearch.service");
sh.run("systemctl status elasticsearch.service");
} else {
sh.run("service elasticsearch status");
}
}
}

View File

@ -19,11 +19,23 @@
package org.elasticsearch.packaging.util;
import java.nio.file.Paths;
import static org.elasticsearch.packaging.util.FileUtils.slurp;
public class Platforms {
public static final String OS_NAME = System.getProperty("os.name");
public static final boolean LINUX = OS_NAME.startsWith("Linux");
public static final boolean WINDOWS = OS_NAME.startsWith("Windows");
public static String getOsRelease() {
if (LINUX) {
return slurp(Paths.get("/etc/os-release"));
} else {
throw new RuntimeException("os-release is only supported on linux");
}
}
public static boolean isDPKG() {
if (WINDOWS) {
return false;
@ -31,13 +43,6 @@ public class Platforms {
return new Shell().runIgnoreExitCode("which dpkg").isSuccess();
}
public static boolean isAptGet() {
if (WINDOWS) {
return false;
}
return new Shell().runIgnoreExitCode("which apt-get").isSuccess();
}
public static boolean isRPM() {
if (WINDOWS) {
return false;
@ -45,13 +50,6 @@ public class Platforms {
return new Shell().runIgnoreExitCode("which rpm").isSuccess();
}
public static boolean isYUM() {
if (WINDOWS) {
return false;
}
return new Shell().runIgnoreExitCode("which yum").isSuccess();
}
public static boolean isSystemd() {
if (WINDOWS) {
return false;
@ -78,6 +76,18 @@ public class Platforms {
}
}
public static void onRPM(PlatformAction action) {
if (isRPM()) {
action.run();
}
}
public static void onDPKG(PlatformAction action) {
if (isDPKG()) {
action.run();
}
}
/**
* Essentially a Runnable, but we make the distinction so it's more clear that these are synchronous
*/

View File

@ -72,7 +72,7 @@ public class ServerUtils {
} catch (HttpHostConnectException e) {
// we want to retry if the connection is refused
LOG.info("Got connection refused when waiting for cluster health", e);
LOG.debug("Got connection refused when waiting for cluster health", e);
}
timeElapsed = System.currentTimeMillis() - startTime;

View File

@ -1,233 +0,0 @@
#!/usr/bin/env bats
# This file is used to test the installation and removal
# of a Debian package.
# WARNING: This testing file must be executed as root and can
# dramatically change your system. It should only be executed
# in a throw-away VM like those made by the Vagrantfile at
# the root of the Elasticsearch source code. This should
# cause the script to fail if it is executed any other way:
[ -f /etc/is_vagrant_vm ] || {
>&2 echo "must be run on a vagrant VM"
exit 1
}
# The test case can be executed with the Bash Automated
# Testing System tool available at https://github.com/sstephenson/bats
# Thanks to Sam Stephenson!
# Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Load test utilities
load $BATS_UTILS/utils.bash
load $BATS_UTILS/packages.bash
load $BATS_UTILS/plugins.bash
# Cleans everything for the 1st execution
setup() {
skip_not_dpkg
export_elasticsearch_paths
}
@test "[DEB] package depends on bash" {
dpkg -I elasticsearch-oss-$(cat version).deb | grep "Depends:.*bash.*"
}
@test "[DEB] package conflicts" {
dpkg -I elasticsearch-oss-$(cat version).deb | grep "^ Conflicts: elasticsearch$"
dpkg -I elasticsearch-$(cat version).deb | grep "^ Conflicts: elasticsearch-oss$"
}
##################################
# Install DEB package
##################################
@test "[DEB] dpkg command is available" {
clean_before_test
dpkg --version
}
@test "[DEB] package is available" {
count=$(ls elasticsearch-oss-$(cat version).deb | wc -l)
[ "$count" -eq 1 ]
}
@test "[DEB] package is not installed" {
run dpkg -s 'elasticsearch-oss'
[ "$status" -eq 1 ]
}
@test "[DEB] temporarily remove java and ensure the install fails" {
move_java
run dpkg -i elasticsearch-oss-$(cat version).deb
output=$status
unmove_java
[ "$output" -eq 1 ]
}
@test "[DEB] install package" {
dpkg -i elasticsearch-oss-$(cat version).deb
}
@test "[DEB] package is installed" {
dpkg -s 'elasticsearch-oss'
}
@test "[DEB] verify package installation" {
verify_package_installation
}
@test "[DEB] verify elasticsearch-plugin list runs without any plugins installed" {
local plugins_list=`$ESHOME/bin/elasticsearch-plugin list`
[[ -z $plugins_list ]]
}
@test "[DEB] elasticsearch isn't started by package install" {
# Wait a second to give Elasticsearch a change to start if it is going to.
# This isn't perfect by any means but its something.
sleep 1
! ps aux | grep elasticsearch | grep java
# You might be tempted to use jps instead of the above but that'd have to
# look like:
# ! sudo -u elasticsearch jps | grep -i elasticsearch
# which isn't really easier to read than the above.
}
@test "[DEB] test elasticsearch" {
start_elasticsearch_service
run_elasticsearch_tests
}
@test "[DEB] verify package installation after start" {
# Checks that the startup scripts didn't change the permissions
verify_package_installation
}
##################################
# Uninstall DEB package
##################################
@test "[DEB] remove package" {
dpkg -r 'elasticsearch-oss'
}
@test "[DEB] package has been removed" {
run dpkg -s 'elasticsearch-oss'
[ "$status" -eq 0 ]
echo "$output" | grep -i "status" | grep -i "deinstall ok"
}
@test "[DEB] verify package removal" {
# The removal must stop the service
count=$(ps | grep Elasticsearch | wc -l)
[ "$count" -eq 0 ]
# The removal must disable the service
# see prerm file
if is_systemd; then
missing_exit_code=4
if [ $(systemctl --version | head -1 | awk '{print $2}') -lt 231 ]; then
# systemd before version 231 used exit code 3 when the service did not exist
missing_exit_code=3
fi
run systemctl status elasticsearch.service
[ "$status" -eq $missing_exit_code ]
run systemctl is-enabled elasticsearch.service
[ "$status" -eq 1 ]
fi
# Those directories are deleted when removing the package
# see postrm file
assert_file_not_exist "/var/log/elasticsearch"
assert_file_not_exist "/usr/share/elasticsearch/plugins"
assert_file_not_exist "/usr/share/elasticsearch/modules"
assert_file_not_exist "/var/run/elasticsearch"
# Those directories are removed by the package manager
assert_file_not_exist "/usr/share/elasticsearch/bin"
assert_file_not_exist "/usr/share/elasticsearch/lib"
assert_file_not_exist "/usr/share/elasticsearch/modules"
assert_file_not_exist "/usr/share/elasticsearch/modules/lang-painless"
# The configuration files are still here
assert_file_exist "/etc/elasticsearch"
# TODO: use ucf to handle these better for Debian-based systems
assert_file_not_exist "/etc/elasticsearch/elasticsearch.keystore"
assert_file_not_exist "/etc/elasticsearch/.elasticsearch.keystore.initial_md5sum"
assert_file_exist "/etc/elasticsearch/elasticsearch.yml"
assert_file_exist "/etc/elasticsearch/jvm.options"
assert_file_exist "/etc/elasticsearch/log4j2.properties"
# The env file is still here
assert_file_exist "/etc/default/elasticsearch"
# The service files are still here
assert_file_exist "/etc/init.d/elasticsearch"
}
@test "[DEB] purge package" {
# User installed scripts aren't removed so we'll just get them ourselves
rm -rf $ESSCRIPTS
dpkg --purge 'elasticsearch-oss'
}
@test "[DEB] verify package purge" {
# all remaining files are deleted by the purge
assert_file_not_exist "/etc/elasticsearch"
assert_file_not_exist "/etc/elasticsearch/elasticsearch.keystore"
assert_file_not_exist "/etc/elasticsearch/.elasticsearch.keystore.initial_md5sum"
assert_file_not_exist "/etc/elasticsearch/elasticsearch.yml"
assert_file_not_exist "/etc/elasticsearch/jvm.options"
assert_file_not_exist "/etc/elasticsearch/log4j2.properties"
assert_file_not_exist "/etc/default/elasticsearch"
assert_file_not_exist "/etc/init.d/elasticsearch"
assert_file_not_exist "/usr/lib/systemd/system/elasticsearch.service"
assert_file_not_exist "/usr/share/elasticsearch"
assert_file_not_exist "/usr/share/doc/elasticsearch-oss"
assert_file_not_exist "/usr/share/doc/elasticsearch-oss/copyright"
}
@test "[DEB] package has been completly removed" {
run dpkg -s 'elasticsearch-oss'
[ "$status" -eq 1 ]
}
@test "[DEB] reinstall package" {
dpkg -i elasticsearch-oss-$(cat version).deb
}
@test "[DEB] package is installed by reinstall" {
dpkg -s 'elasticsearch-oss'
}
@test "[DEB] verify package reinstallation" {
verify_package_installation
}
@test "[DEB] repurge package" {
dpkg --purge 'elasticsearch-oss'
}
@test "[DEB] package has been completly removed again" {
run dpkg -s 'elasticsearch-oss'
[ "$status" -eq 1 ]
}

View File

@ -1,220 +0,0 @@
#!/usr/bin/env bats
# This file is used to test the installation of a RPM package.
# WARNING: This testing file must be executed as root and can
# dramatically change your system. It should only be executed
# in a throw-away VM like those made by the Vagrantfile at
# the root of the Elasticsearch source code. This should
# cause the script to fail if it is executed any other way:
[ -f /etc/is_vagrant_vm ] || {
>&2 echo "must be run on a vagrant VM"
exit 1
}
# The test case can be executed with the Bash Automated
# Testing System tool available at https://github.com/sstephenson/bats
# Thanks to Sam Stephenson!
# Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Load test utilities
load $BATS_UTILS/utils.bash
load $BATS_UTILS/packages.bash
load $BATS_UTILS/plugins.bash
# Cleans everything for the 1st execution
setup() {
skip_not_rpm
export_elasticsearch_paths
}
@test "[RPM] package depends on bash" {
rpm -qpR elasticsearch-oss-$(cat version).rpm | grep '/bin/bash'
}
@test "[RPM] package conflicts" {
rpm -qp --conflicts elasticsearch-oss-$(cat version).rpm | grep "^elasticsearch\s*$"
rpm -qp --conflicts elasticsearch-$(cat version).rpm | grep "^elasticsearch-oss\s*$"
}
##################################
# Install RPM package
##################################
@test "[RPM] rpm command is available" {
clean_before_test
rpm --version
}
@test "[RPM] package is available" {
count=$(ls elasticsearch-oss-$(cat version).rpm | wc -l)
[ "$count" -eq 1 ]
}
@test "[RPM] package is not installed" {
run rpm -qe 'elasticsearch-oss'
[ "$status" -eq 1 ]
}
@test "[RPM] temporarily remove java and ensure the install fails" {
move_java
run rpm -i elasticsearch-oss-$(cat version).rpm
output=$status
unmove_java
[ "$output" -eq 1 ]
}
@test "[RPM] install package" {
rpm -i elasticsearch-oss-$(cat version).rpm
}
@test "[RPM] package is installed" {
rpm -qe 'elasticsearch-oss'
}
@test "[RPM] verify package installation" {
verify_package_installation
}
@test "[RPM] verify elasticsearch-plugin list runs without any plugins installed" {
local plugins_list=`$ESHOME/bin/elasticsearch-plugin list`
[[ -z $plugins_list ]]
}
@test "[RPM] elasticsearch isn't started by package install" {
# Wait a second to give Elasticsearch a change to start if it is going to.
# This isn't perfect by any means but its something.
sleep 1
! ps aux | grep elasticsearch | grep java
}
@test "[RPM] test elasticsearch" {
start_elasticsearch_service
run_elasticsearch_tests
}
@test "[RPM] verify package installation after start" {
# Checks that the startup scripts didn't change the permissions
verify_package_installation
}
@test "[RPM] remove package" {
# User installed scripts aren't removed so we'll just get them ourselves
rm -rf $ESSCRIPTS
rpm -e 'elasticsearch-oss'
}
@test "[RPM] package has been removed" {
run rpm -qe 'elasticsearch-oss'
[ "$status" -eq 1 ]
}
@test "[RPM] verify package removal" {
# The removal must stop the service
count=$(ps | grep Elasticsearch | wc -l)
[ "$count" -eq 0 ]
# The removal must disable the service
# see prerm file
if is_systemd; then
run systemctl is-enabled elasticsearch.service
[ "$status" -eq 1 ]
fi
# Those directories are deleted when removing the package
# see postrm file
assert_file_not_exist "/var/log/elasticsearch"
assert_file_not_exist "/usr/share/elasticsearch/plugins"
assert_file_not_exist "/var/run/elasticsearch"
# Those directories are removed by the package manager
assert_file_not_exist "/usr/share/elasticsearch/bin"
assert_file_not_exist "/usr/share/elasticsearch/lib"
assert_file_not_exist "/usr/share/elasticsearch/modules"
assert_file_not_exist "/etc/elasticsearch"
assert_file_not_exist "/etc/init.d/elasticsearch"
assert_file_not_exist "/usr/lib/systemd/system/elasticsearch.service"
assert_file_not_exist "/etc/sysconfig/elasticsearch"
}
@test "[RPM] reinstall package" {
rpm -i elasticsearch-oss-$(cat version).rpm
}
@test "[RPM] package is installed by reinstall" {
rpm -qe 'elasticsearch-oss'
}
@test "[RPM] verify package reinstallation" {
verify_package_installation
}
@test "[RPM] reremove package" {
echo foobar | "$ESHOME/bin/elasticsearch-keystore" add --stdin foo.bar
echo "# ping" >> "/etc/elasticsearch/elasticsearch.yml"
echo "# ping" >> "/etc/elasticsearch/jvm.options"
echo "# ping" >> "/etc/elasticsearch/log4j2.properties"
rpm -e 'elasticsearch-oss'
}
@test "[RPM] verify preservation" {
# The removal must disable the service
# see prerm file
if is_systemd; then
run systemctl is-enabled elasticsearch.service
[ "$status" -eq 1 ]
fi
# Those directories are deleted when removing the package
# see postrm file
assert_file_not_exist "/var/log/elasticsearch"
assert_file_not_exist "/usr/share/elasticsearch/plugins"
assert_file_not_exist "/usr/share/elasticsearch/modules"
assert_file_not_exist "/var/run/elasticsearch"
assert_file_not_exist "/usr/share/elasticsearch/bin"
assert_file_not_exist "/usr/share/elasticsearch/lib"
assert_file_not_exist "/usr/share/elasticsearch/modules"
assert_file_not_exist "/usr/share/elasticsearch/modules/lang-painless"
assert_file_exist "/etc/elasticsearch/elasticsearch.keystore"
assert_file_not_exist "/etc/elasticsearch/elasticsearch.yml"
assert_file_exist "/etc/elasticsearch/elasticsearch.yml.rpmsave"
assert_file_not_exist "/etc/elasticsearch/jvm.options"
assert_file_exist "/etc/elasticsearch/jvm.options.rpmsave"
assert_file_not_exist "/etc/elasticsearch/log4j2.properties"
assert_file_exist "/etc/elasticsearch/log4j2.properties.rpmsave"
assert_file_not_exist "/etc/init.d/elasticsearch"
assert_file_not_exist "/usr/lib/systemd/system/elasticsearch.service"
assert_file_not_exist "/etc/sysconfig/elasticsearch"
}
@test "[RPM] finalize package removal" {
# cleanup
rm -rf /etc/elasticsearch
}
@test "[RPM] package has been removed again" {
run rpm -qe 'elasticsearch-oss'
[ "$status" -eq 1 ]
}

View File

@ -38,7 +38,8 @@ setup:
- gt: { snapshots.0.stats.total.file_count: 0 }
- gt: { snapshots.0.stats.total.size_in_bytes: 0 }
- is_true: snapshots.0.stats.start_time_in_millis
- is_true: snapshots.0.stats.time_in_millis
## fast in memory snapshots can take less than one millisecond to complete.
- gte: { snapshots.0.stats.time_in_millis: 0 }
---
"Get missing snapshot status throws an exception":

View File

@ -295,7 +295,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
TransportUpdateAction.resolveAndValidateRouting(metaData, concreteIndex.getName(), (UpdateRequest) docWriteRequest);
break;
case DELETE:
docWriteRequest.routing(metaData.resolveIndexRouting(docWriteRequest.routing(), docWriteRequest.index()));
docWriteRequest.routing(metaData.resolveWriteIndexRouting(docWriteRequest.routing(), docWriteRequest.index()));
// check if routing is required, if so, throw error if routing wasn't specified
if (docWriteRequest.routing() == null && metaData.routingRequired(concreteIndex.getName(), docWriteRequest.type())) {
throw new RoutingMissingException(concreteIndex.getName(), docWriteRequest.type(), docWriteRequest.id());
@ -474,7 +474,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
Index resolveIfAbsent(DocWriteRequest<?> request) {
Index concreteIndex = indices.get(request.index());
if (concreteIndex == null) {
concreteIndex = indexNameExpressionResolver.concreteSingleIndex(state, request);
concreteIndex = indexNameExpressionResolver.concreteWriteIndex(state, request);
indices.put(request.index(), concreteIndex);
}
return concreteIndex;

View File

@ -496,7 +496,7 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
/* resolve the routing if needed */
public void resolveRouting(MetaData metaData) {
routing(metaData.resolveIndexRouting(routing, index));
routing(metaData.resolveWriteIndexRouting(routing, index));
}
@Override

View File

@ -104,7 +104,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
}
public static void resolveAndValidateRouting(MetaData metaData, String concreteIndex, UpdateRequest request) {
request.routing((metaData.resolveIndexRouting(request.routing(), request.index())));
request.routing((metaData.resolveWriteIndexRouting(request.routing(), request.index())));
// Fail fast on the node that received the request, rather than failing when translating on the index or delete request.
if (request.routing() == null && metaData.routingRequired(concreteIndex, request.type())) {
throw new RoutingMissingException(concreteIndex, request.type(), request.id());

View File

@ -42,7 +42,6 @@ import org.joda.time.format.DateTimeFormatter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
@ -103,7 +102,7 @@ public class IndexNameExpressionResolver extends AbstractComponent {
return concreteIndexNames(context, indexExpressions);
}
/**
/**
* Translates the provided index expression into actual concrete indices, properly deduplicated.
*
* @param state the cluster state containing all the data to resolve to expressions to concrete indices
@ -117,7 +116,7 @@ public class IndexNameExpressionResolver extends AbstractComponent {
* indices options in the context don't allow such a case.
*/
public Index[] concreteIndices(ClusterState state, IndicesOptions options, String... indexExpressions) {
Context context = new Context(state, options);
Context context = new Context(state, options, false, false);
return concreteIndices(context, indexExpressions);
}
@ -193,30 +192,40 @@ public class IndexNameExpressionResolver extends AbstractComponent {
}
}
Collection<IndexMetaData> resolvedIndices = aliasOrIndex.getIndices();
if (resolvedIndices.size() > 1 && !options.allowAliasesToMultipleIndices()) {
String[] indexNames = new String[resolvedIndices.size()];
int i = 0;
for (IndexMetaData indexMetaData : resolvedIndices) {
indexNames[i++] = indexMetaData.getIndex().getName();
if (aliasOrIndex.isAlias() && context.isResolveToWriteIndex()) {
AliasOrIndex.Alias alias = (AliasOrIndex.Alias) aliasOrIndex;
IndexMetaData writeIndex = alias.getWriteIndex();
if (writeIndex == null) {
throw new IllegalArgumentException("no write index is defined for alias [" + alias.getAliasName() + "]." +
" The write index may be explicitly disabled using is_write_index=false or the alias points to multiple" +
" indices without one being designated as a write index");
}
throw new IllegalArgumentException("Alias [" + expression + "] has more than one indices associated with it [" +
Arrays.toString(indexNames) + "], can't execute a single index op");
}
for (IndexMetaData index : resolvedIndices) {
if (index.getState() == IndexMetaData.State.CLOSE) {
if (failClosed) {
throw new IndexClosedException(index.getIndex());
} else {
if (options.forbidClosedIndices() == false) {
concreteIndices.add(index.getIndex());
}
concreteIndices.add(writeIndex.getIndex());
} else {
if (aliasOrIndex.getIndices().size() > 1 && !options.allowAliasesToMultipleIndices()) {
String[] indexNames = new String[aliasOrIndex.getIndices().size()];
int i = 0;
for (IndexMetaData indexMetaData : aliasOrIndex.getIndices()) {
indexNames[i++] = indexMetaData.getIndex().getName();
}
throw new IllegalArgumentException("Alias [" + expression + "] has more than one indices associated with it [" +
Arrays.toString(indexNames) + "], can't execute a single index op");
}
for (IndexMetaData index : aliasOrIndex.getIndices()) {
if (index.getState() == IndexMetaData.State.CLOSE) {
if (failClosed) {
throw new IndexClosedException(index.getIndex());
} else {
if (options.forbidClosedIndices() == false) {
concreteIndices.add(index.getIndex());
}
}
} else if (index.getState() == IndexMetaData.State.OPEN) {
concreteIndices.add(index.getIndex());
} else {
throw new IllegalStateException("index state [" + index.getState() + "] not supported");
}
} else if (index.getState() == IndexMetaData.State.OPEN) {
concreteIndices.add(index.getIndex());
} else {
throw new IllegalStateException("index state [" + index.getState() + "] not supported");
}
}
}
@ -255,6 +264,28 @@ public class IndexNameExpressionResolver extends AbstractComponent {
return indices[0];
}
/**
* Utility method that allows to resolve an index expression to its corresponding single write index.
*
* @param state the cluster state containing all the data to resolve to expression to a concrete index
* @param request The request that defines how the an alias or an index need to be resolved to a concrete index
* and the expression that can be resolved to an alias or an index name.
* @throws IllegalArgumentException if the index resolution does not lead to an index, or leads to more than one index
* @return the write index obtained as a result of the index resolution
*/
public Index concreteWriteIndex(ClusterState state, IndicesRequest request) {
if (request.indices() == null || (request.indices() != null && request.indices().length != 1)) {
throw new IllegalArgumentException("indices request must specify a single index expression");
}
Context context = new Context(state, request.indicesOptions(), false, true);
Index[] indices = concreteIndices(context, request.indices()[0]);
if (indices.length != 1) {
throw new IllegalArgumentException("The index expression [" + request.indices()[0] +
"] and options provided did not point to a single write-index");
}
return indices[0];
}
/**
* @return whether the specified alias or index exists. If the alias or index contains datemath then that is resolved too.
*/
@ -292,7 +323,7 @@ public class IndexNameExpressionResolver extends AbstractComponent {
String... expressions) {
// expand the aliases wildcard
List<String> resolvedExpressions = expressions != null ? Arrays.asList(expressions) : Collections.emptyList();
Context context = new Context(state, IndicesOptions.lenientExpandOpen(), true);
Context context = new Context(state, IndicesOptions.lenientExpandOpen(), true, false);
for (ExpressionResolver expressionResolver : expressionResolvers) {
resolvedExpressions = expressionResolver.resolve(context, resolvedExpressions);
}
@ -512,24 +543,26 @@ public class IndexNameExpressionResolver extends AbstractComponent {
private final IndicesOptions options;
private final long startTime;
private final boolean preserveAliases;
private final boolean resolveToWriteIndex;
Context(ClusterState state, IndicesOptions options) {
this(state, options, System.currentTimeMillis());
}
Context(ClusterState state, IndicesOptions options, boolean preserveAliases) {
this(state, options, System.currentTimeMillis(), preserveAliases);
Context(ClusterState state, IndicesOptions options, boolean preserveAliases, boolean resolveToWriteIndex) {
this(state, options, System.currentTimeMillis(), preserveAliases, resolveToWriteIndex);
}
Context(ClusterState state, IndicesOptions options, long startTime) {
this(state, options, startTime, false);
this(state, options, startTime, false, false);
}
Context(ClusterState state, IndicesOptions options, long startTime, boolean preserveAliases) {
Context(ClusterState state, IndicesOptions options, long startTime, boolean preserveAliases, boolean resolveToWriteIndex) {
this.state = state;
this.options = options;
this.startTime = startTime;
this.preserveAliases = preserveAliases;
this.resolveToWriteIndex = resolveToWriteIndex;
}
public ClusterState getState() {
@ -552,6 +585,14 @@ public class IndexNameExpressionResolver extends AbstractComponent {
boolean isPreserveAliases() {
return preserveAliases;
}
/**
* This is used to require that aliases resolve to their write-index. It is currently not used in conjunction
* with <code>preserveAliases</code>.
*/
boolean isResolveToWriteIndex() {
return resolveToWriteIndex;
}
}
private interface ExpressionResolver {

View File

@ -471,6 +471,42 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, To
return allClosedIndices;
}
/**
* Returns indexing routing for the given <code>aliasOrIndex</code>. Resolves routing from the alias metadata used
* in the write index.
*/
public String resolveWriteIndexRouting(@Nullable String routing, String aliasOrIndex) {
if (aliasOrIndex == null) {
return routing;
}
AliasOrIndex result = getAliasAndIndexLookup().get(aliasOrIndex);
if (result == null || result.isAlias() == false) {
return routing;
}
AliasOrIndex.Alias alias = (AliasOrIndex.Alias) result;
IndexMetaData writeIndex = alias.getWriteIndex();
if (writeIndex == null) {
throw new IllegalArgumentException("alias [" + aliasOrIndex + "] does not have a write index");
}
AliasMetaData aliasMd = writeIndex.getAliases().get(alias.getAliasName());
if (aliasMd.indexRouting() != null) {
if (aliasMd.indexRouting().indexOf(',') != -1) {
throw new IllegalArgumentException("index/alias [" + aliasOrIndex + "] provided with routing value ["
+ aliasMd.getIndexRouting() + "] that resolved to several routing values, rejecting operation");
}
if (routing != null) {
if (!routing.equals(aliasMd.indexRouting())) {
throw new IllegalArgumentException("Alias [" + aliasOrIndex + "] has index routing associated with it ["
+ aliasMd.indexRouting() + "], and was provided with routing value [" + routing + "], rejecting operation");
}
}
// Alias routing overrides the parent routing (if any).
return aliasMd.indexRouting();
}
return routing;
}
/**
* Returns indexing routing for the given index.
*/

View File

@ -20,13 +20,20 @@
package org.elasticsearch.action.bulk;
import org.elasticsearch.action.admin.indices.alias.Alias;
import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.test.ESIntegTestCase;
import java.nio.charset.StandardCharsets;
import java.util.Collections;
import java.util.Map;
import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
import static org.hamcrest.Matchers.equalTo;
public class BulkIntegrationIT extends ESIntegTestCase {
public void testBulkIndexCreatesMapping() throws Exception {
@ -40,4 +47,38 @@ public class BulkIntegrationIT extends ESIntegTestCase {
assertTrue(mappingsResponse.getMappings().get("logstash-2014.03.30").containsKey("logs"));
});
}
/**
* This tests that the {@link TransportBulkAction} evaluates alias routing values correctly when dealing with
* an alias pointing to multiple indices, while a write index exits.
*/
public void testBulkWithWriteIndexAndRouting() {
Map<String, Integer> twoShardsSettings = Collections.singletonMap(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 2);
client().admin().indices().prepareCreate("index1")
.addAlias(new Alias("alias1").indexRouting("0")).setSettings(twoShardsSettings).get();
client().admin().indices().prepareCreate("index2")
.addAlias(new Alias("alias1").indexRouting("0").writeIndex(randomFrom(false, null)))
.setSettings(twoShardsSettings).get();
client().admin().indices().prepareCreate("index3")
.addAlias(new Alias("alias1").indexRouting("1").writeIndex(true)).setSettings(twoShardsSettings).get();
IndexRequest indexRequestWithAlias = new IndexRequest("alias1", "type", "id");
if (randomBoolean()) {
indexRequestWithAlias.routing("1");
}
indexRequestWithAlias.source(Collections.singletonMap("foo", "baz"));
BulkResponse bulkResponse = client().prepareBulk().add(indexRequestWithAlias).get();
assertThat(bulkResponse.getItems()[0].getResponse().getIndex(), equalTo("index3"));
assertThat(bulkResponse.getItems()[0].getResponse().getShardId().getId(), equalTo(0));
assertThat(bulkResponse.getItems()[0].getResponse().getVersion(), equalTo(1L));
assertThat(bulkResponse.getItems()[0].getResponse().status(), equalTo(RestStatus.CREATED));
assertThat(client().prepareGet("index3", "type", "id").setRouting("1").get().getSource().get("foo"), equalTo("baz"));
bulkResponse = client().prepareBulk().add(client().prepareUpdate("alias1", "type", "id").setDoc("foo", "updated")).get();
assertFalse(bulkResponse.hasFailures());
assertThat(client().prepareGet("index3", "type", "id").setRouting("1").get().getSource().get("foo"), equalTo("updated"));
bulkResponse = client().prepareBulk().add(client().prepareDelete("alias1", "type", "id")).get();
assertFalse(bulkResponse.hasFailures());
assertFalse(client().prepareGet("index3", "type", "id").setRouting("1").get().isExists());
}
}

View File

@ -25,6 +25,7 @@ import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasA
import org.elasticsearch.action.admin.indices.alias.exists.AliasesExistResponse;
import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
import org.elasticsearch.action.delete.DeleteResponse;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy;
@ -57,6 +58,7 @@ import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import static org.elasticsearch.client.Requests.createIndexRequest;
import static org.elasticsearch.client.Requests.deleteRequest;
import static org.elasticsearch.client.Requests.indexRequest;
import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_METADATA_BLOCK;
import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_READ_ONLY_BLOCK;
@ -85,6 +87,17 @@ public class IndexAliasesIT extends ESIntegTestCase {
ensureGreen();
logger.info("--> aliasing index [test] with [alias1]");
assertAcked(admin().indices().prepareAliases().addAlias("test", "alias1", false));
logger.info("--> indexing against [alias1], should fail now");
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class,
() -> client().index(indexRequest("alias1").type("type1").id("1").source(source("2", "test"),
XContentType.JSON)).actionGet());
assertThat(exception.getMessage(), equalTo("no write index is defined for alias [alias1]." +
" The write index may be explicitly disabled using is_write_index=false or the alias points to multiple" +
" indices without one being designated as a write index"));
logger.info("--> aliasing index [test] with [alias1]");
assertAcked(admin().indices().prepareAliases().addAlias("test", "alias1"));
@ -98,6 +111,44 @@ public class IndexAliasesIT extends ESIntegTestCase {
ensureGreen();
logger.info("--> add index [test_x] with [alias1]");
assertAcked(admin().indices().prepareAliases().addAlias("test_x", "alias1"));
logger.info("--> indexing against [alias1], should fail now");
exception = expectThrows(IllegalArgumentException.class,
() -> client().index(indexRequest("alias1").type("type1").id("1").source(source("2", "test"),
XContentType.JSON)).actionGet());
assertThat(exception.getMessage(), equalTo("no write index is defined for alias [alias1]." +
" The write index may be explicitly disabled using is_write_index=false or the alias points to multiple" +
" indices without one being designated as a write index"));
logger.info("--> deleting against [alias1], should fail now");
exception = expectThrows(IllegalArgumentException.class,
() -> client().delete(deleteRequest("alias1").type("type1").id("1")).actionGet());
assertThat(exception.getMessage(), equalTo("no write index is defined for alias [alias1]." +
" The write index may be explicitly disabled using is_write_index=false or the alias points to multiple" +
" indices without one being designated as a write index"));
logger.info("--> remove aliasing index [test_x] with [alias1]");
assertAcked(admin().indices().prepareAliases().removeAlias("test_x", "alias1"));
logger.info("--> indexing against [alias1], should work now");
indexResponse = client().index(indexRequest("alias1").type("type1").id("1")
.source(source("1", "test"), XContentType.JSON)).actionGet();
assertThat(indexResponse.getIndex(), equalTo("test"));
logger.info("--> add index [test_x] with [alias1] as write-index");
assertAcked(admin().indices().prepareAliases().addAlias("test_x", "alias1", true));
logger.info("--> indexing against [alias1], should work now");
indexResponse = client().index(indexRequest("alias1").type("type1").id("1")
.source(source("1", "test"), XContentType.JSON)).actionGet();
assertThat(indexResponse.getIndex(), equalTo("test_x"));
logger.info("--> deleting against [alias1], should fail now");
DeleteResponse deleteResponse = client().delete(deleteRequest("alias1").type("type1").id("1")).actionGet();
assertThat(deleteResponse.getIndex(), equalTo("test_x"));
logger.info("--> remove [alias1], Aliasing index [test_x] with [alias1]");
assertAcked(admin().indices().prepareAliases().removeAlias("test", "alias1").addAlias("test_x", "alias1"));

View File

@ -20,14 +20,20 @@
package org.elasticsearch.cluster.metadata;
import org.elasticsearch.Version;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData.State;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.indices.IndexClosedException;
import org.elasticsearch.indices.InvalidIndexNameException;
@ -37,6 +43,7 @@ import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.function.Function;
import static org.elasticsearch.common.util.set.Sets.newHashSet;
import static org.hamcrest.Matchers.arrayContaining;
@ -44,6 +51,7 @@ import static org.hamcrest.Matchers.arrayContainingInAnyOrder;
import static org.hamcrest.Matchers.arrayWithSize;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.emptyArray;
import static org.hamcrest.Matchers.endsWith;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.notNullValue;
@ -996,6 +1004,152 @@ public class IndexNameExpressionResolverTests extends ESTestCase {
assertArrayEquals(new String[] {"test-alias-0", "test-alias-1", "test-alias-non-filtering"}, strings);
}
public void testConcreteWriteIndexSuccessful() {
boolean testZeroWriteIndex = randomBoolean();
MetaData.Builder mdBuilder = MetaData.builder()
.put(indexBuilder("test-0").state(State.OPEN)
.putAlias(AliasMetaData.builder("test-alias").writeIndex(testZeroWriteIndex ? true : null)));
ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build();
String[] strings = indexNameExpressionResolver
.indexAliases(state, "test-0", x -> true, true, "test-*");
Arrays.sort(strings);
assertArrayEquals(new String[] {"test-alias"}, strings);
IndicesRequest request = new IndicesRequest() {
@Override
public String[] indices() {
return new String[] { "test-alias" };
}
@Override
public IndicesOptions indicesOptions() {
return IndicesOptions.strictSingleIndexNoExpandForbidClosed();
}
};
Index writeIndex = indexNameExpressionResolver.concreteWriteIndex(state, request);
assertThat(writeIndex.getName(), equalTo("test-0"));
state = ClusterState.builder(state).metaData(MetaData.builder(state.metaData())
.put(indexBuilder("test-1").putAlias(AliasMetaData.builder("test-alias")
.writeIndex(testZeroWriteIndex ? randomFrom(false, null) : true)))).build();
writeIndex = indexNameExpressionResolver.concreteWriteIndex(state, request);
assertThat(writeIndex.getName(), equalTo(testZeroWriteIndex ? "test-0" : "test-1"));
}
public void testConcreteWriteIndexWithInvalidIndicesRequest() {
MetaData.Builder mdBuilder = MetaData.builder()
.put(indexBuilder("test-0").state(State.OPEN)
.putAlias(AliasMetaData.builder("test-alias")));
ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build();
Function<String[], IndicesRequest> requestGen = (indices) -> new IndicesRequest() {
@Override
public String[] indices() {
return indices;
}
@Override
public IndicesOptions indicesOptions() {
return IndicesOptions.strictSingleIndexNoExpandForbidClosed();
}
};
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class,
() -> indexNameExpressionResolver.concreteWriteIndex(state, requestGen.apply(null)));
assertThat(exception.getMessage(), equalTo("indices request must specify a single index expression"));
exception = expectThrows(IllegalArgumentException.class,
() -> indexNameExpressionResolver.concreteWriteIndex(state, requestGen.apply(new String[] {"too", "many"})));
assertThat(exception.getMessage(), equalTo("indices request must specify a single index expression"));
}
public void testConcreteWriteIndexWithWildcardExpansion() {
boolean testZeroWriteIndex = randomBoolean();
MetaData.Builder mdBuilder = MetaData.builder()
.put(indexBuilder("test-1").state(State.OPEN)
.putAlias(AliasMetaData.builder("test-alias").writeIndex(testZeroWriteIndex ? true : null)))
.put(indexBuilder("test-0").state(State.OPEN)
.putAlias(AliasMetaData.builder("test-alias").writeIndex(testZeroWriteIndex ? randomFrom(false, null) : true)));
ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build();
String[] strings = indexNameExpressionResolver
.indexAliases(state, "test-0", x -> true, true, "test-*");
Arrays.sort(strings);
assertArrayEquals(new String[] {"test-alias"}, strings);
IndicesRequest request = new IndicesRequest() {
@Override
public String[] indices() {
return new String[] { "test-*"};
}
@Override
public IndicesOptions indicesOptions() {
return IndicesOptions.strictExpandOpenAndForbidClosed();
}
};
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class,
() -> indexNameExpressionResolver.concreteWriteIndex(state, request));
assertThat(exception.getMessage(),
equalTo("The index expression [test-*] and options provided did not point to a single write-index"));
}
public void testConcreteWriteIndexWithNoWriteIndexWithSingleIndex() {
MetaData.Builder mdBuilder = MetaData.builder()
.put(indexBuilder("test-0").state(State.OPEN)
.putAlias(AliasMetaData.builder("test-alias").writeIndex(false)));
ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build();
String[] strings = indexNameExpressionResolver
.indexAliases(state, "test-0", x -> true, true, "test-*");
Arrays.sort(strings);
assertArrayEquals(new String[] {"test-alias"}, strings);
DocWriteRequest request = randomFrom(new IndexRequest("test-alias"),
new UpdateRequest("test-alias", "_type", "_id"), new DeleteRequest("test-alias"));
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class,
() -> indexNameExpressionResolver.concreteWriteIndex(state, request));
assertThat(exception.getMessage(), equalTo("no write index is defined for alias [test-alias]." +
" The write index may be explicitly disabled using is_write_index=false or the alias points to multiple" +
" indices without one being designated as a write index"));
}
public void testConcreteWriteIndexWithNoWriteIndexWithMultipleIndices() {
MetaData.Builder mdBuilder = MetaData.builder()
.put(indexBuilder("test-0").state(State.OPEN)
.putAlias(AliasMetaData.builder("test-alias").writeIndex(randomFrom(false, null))))
.put(indexBuilder("test-1").state(State.OPEN)
.putAlias(AliasMetaData.builder("test-alias").writeIndex(randomFrom(false, null))));
ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build();
String[] strings = indexNameExpressionResolver
.indexAliases(state, "test-0", x -> true, true, "test-*");
Arrays.sort(strings);
assertArrayEquals(new String[] {"test-alias"}, strings);
DocWriteRequest request = randomFrom(new IndexRequest("test-alias"),
new UpdateRequest("test-alias", "_type", "_id"), new DeleteRequest("test-alias"));
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class,
() -> indexNameExpressionResolver.concreteWriteIndex(state, request));
assertThat(exception.getMessage(), equalTo("no write index is defined for alias [test-alias]." +
" The write index may be explicitly disabled using is_write_index=false or the alias points to multiple" +
" indices without one being designated as a write index"));
}
public void testAliasResolutionNotAllowingMultipleIndices() {
boolean test0WriteIndex = randomBoolean();
MetaData.Builder mdBuilder = MetaData.builder()
.put(indexBuilder("test-0").state(State.OPEN)
.putAlias(AliasMetaData.builder("test-alias").writeIndex(randomFrom(test0WriteIndex, null))))
.put(indexBuilder("test-1").state(State.OPEN)
.putAlias(AliasMetaData.builder("test-alias").writeIndex(randomFrom(!test0WriteIndex, null))));
ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build();
String[] strings = indexNameExpressionResolver
.indexAliases(state, "test-0", x -> true, true, "test-*");
Arrays.sort(strings);
assertArrayEquals(new String[] {"test-alias"}, strings);
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class,
() -> indexNameExpressionResolver.concreteIndexNames(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed(),
"test-alias"));
assertThat(exception.getMessage(), endsWith(", can't execute a single index op"));
}
public void testDeleteIndexIgnoresAliases() {
MetaData.Builder mdBuilder = MetaData.builder()
.put(indexBuilder("test-index").state(State.OPEN)

View File

@ -172,6 +172,87 @@ public class MetaDataTests extends ESTestCase {
} catch (IllegalArgumentException ex) {
assertThat(ex.getMessage(), is("index/alias [alias2] provided with routing value [1,2] that resolved to several routing values, rejecting operation"));
}
IndexMetaData.Builder builder2 = IndexMetaData.builder("index2")
.settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT))
.numberOfShards(1)
.numberOfReplicas(0)
.putAlias(AliasMetaData.builder("alias0").build());
MetaData metaDataTwoIndices = MetaData.builder(metaData).put(builder2).build();
// alias with multiple indices
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class,
() -> metaDataTwoIndices.resolveIndexRouting("1", "alias0"));
assertThat(exception.getMessage(), startsWith("Alias [alias0] has more than one index associated with it"));
}
public void testResolveWriteIndexRouting() {
AliasMetaData.Builder aliasZeroBuilder = AliasMetaData.builder("alias0");
if (randomBoolean()) {
aliasZeroBuilder.writeIndex(true);
}
IndexMetaData.Builder builder = IndexMetaData.builder("index")
.settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT))
.numberOfShards(1)
.numberOfReplicas(0)
.putAlias(aliasZeroBuilder.build())
.putAlias(AliasMetaData.builder("alias1").routing("1").build())
.putAlias(AliasMetaData.builder("alias2").routing("1,2").build())
.putAlias(AliasMetaData.builder("alias3").writeIndex(false).build())
.putAlias(AliasMetaData.builder("alias4").routing("1,2").writeIndex(true).build());
MetaData metaData = MetaData.builder().put(builder).build();
// no alias, no index
assertEquals(metaData.resolveWriteIndexRouting(null, null), null);
assertEquals(metaData.resolveWriteIndexRouting("0", null), "0");
// index, no alias
assertEquals(metaData.resolveWriteIndexRouting(null, "index"), null);
assertEquals(metaData.resolveWriteIndexRouting("0", "index"), "0");
// alias with no index routing
assertEquals(metaData.resolveWriteIndexRouting(null, "alias0"), null);
assertEquals(metaData.resolveWriteIndexRouting("0", "alias0"), "0");
// alias with index routing.
assertEquals(metaData.resolveWriteIndexRouting(null, "alias1"), "1");
Exception exception = expectThrows(IllegalArgumentException.class, () -> metaData.resolveWriteIndexRouting("0", "alias1"));
assertThat(exception.getMessage(),
is("Alias [alias1] has index routing associated with it [1], and was provided with routing value [0], rejecting operation"));
// alias with invalid index routing.
exception = expectThrows(IllegalArgumentException.class, () -> metaData.resolveWriteIndexRouting(null, "alias2"));
assertThat(exception.getMessage(),
is("index/alias [alias2] provided with routing value [1,2] that resolved to several routing values, rejecting operation"));
exception = expectThrows(IllegalArgumentException.class, () -> metaData.resolveWriteIndexRouting("1", "alias2"));
assertThat(exception.getMessage(),
is("index/alias [alias2] provided with routing value [1,2] that resolved to several routing values, rejecting operation"));
exception = expectThrows(IllegalArgumentException.class, () -> metaData.resolveWriteIndexRouting(randomFrom("1", null), "alias4"));
assertThat(exception.getMessage(),
is("index/alias [alias4] provided with routing value [1,2] that resolved to several routing values, rejecting operation"));
// alias with no write index
exception = expectThrows(IllegalArgumentException.class, () -> metaData.resolveWriteIndexRouting("1", "alias3"));
assertThat(exception.getMessage(),
is("alias [alias3] does not have a write index"));
// aliases with multiple indices
AliasMetaData.Builder aliasZeroBuilderTwo = AliasMetaData.builder("alias0");
if (randomBoolean()) {
aliasZeroBuilder.writeIndex(false);
}
IndexMetaData.Builder builder2 = IndexMetaData.builder("index2")
.settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT))
.numberOfShards(1)
.numberOfReplicas(0)
.putAlias(aliasZeroBuilderTwo.build())
.putAlias(AliasMetaData.builder("alias1").routing("0").writeIndex(true).build())
.putAlias(AliasMetaData.builder("alias2").writeIndex(true).build());
MetaData metaDataTwoIndices = MetaData.builder(metaData).put(builder2).build();
// verify that new write index is used
assertThat("0", equalTo(metaDataTwoIndices.resolveWriteIndexRouting("0", "alias1")));
}
public void testUnknownFieldClusterMetaData() throws IOException {

View File

@ -29,6 +29,7 @@ import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.get.MultiGetRequest;
import org.elasticsearch.action.get.MultiGetRequestBuilder;
import org.elasticsearch.action.get.MultiGetResponse;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
@ -39,6 +40,7 @@ import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.engine.VersionConflictEngineException;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.InternalSettingsPlugin;
@ -51,6 +53,7 @@ import java.util.Set;
import static java.util.Collections.singleton;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.endsWith;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasKey;
import static org.hamcrest.Matchers.instanceOf;
@ -70,7 +73,7 @@ public class GetActionIT extends ESIntegTestCase {
assertAcked(prepareCreate("test")
.addMapping("type1", "field1", "type=keyword,store=true", "field2", "type=keyword,store=true")
.setSettings(Settings.builder().put("index.refresh_interval", -1))
.addAlias(new Alias("alias")));
.addAlias(new Alias("alias").writeIndex(randomFrom(true, false, null))));
ensureGreen();
GetResponse response = client().prepareGet(indexOrAlias(), "type1", "1").get();
@ -192,12 +195,31 @@ public class GetActionIT extends ESIntegTestCase {
assertThat(response.isExists(), equalTo(false));
}
public void testGetWithAliasPointingToMultipleIndices() {
client().admin().indices().prepareCreate("index1")
.addAlias(new Alias("alias1").indexRouting("0")).get();
if (randomBoolean()) {
client().admin().indices().prepareCreate("index2")
.addAlias(new Alias("alias1").indexRouting("0").writeIndex(randomFrom(false, null))).get();
} else {
client().admin().indices().prepareCreate("index3")
.addAlias(new Alias("alias1").indexRouting("1").writeIndex(true)).get();
}
IndexResponse indexResponse = client().prepareIndex("index1", "type", "id")
.setSource(Collections.singletonMap("foo", "bar")).get();
assertThat(indexResponse.status().getStatus(), equalTo(RestStatus.CREATED.getStatus()));
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () ->
client().prepareGet("alias1", "type", "_alias_id").get());
assertThat(exception.getMessage(), endsWith("can't execute a single index op"));
}
private static String indexOrAlias() {
return randomBoolean() ? "test" : "alias";
}
public void testSimpleMultiGet() throws Exception {
assertAcked(prepareCreate("test").addAlias(new Alias("alias"))
assertAcked(prepareCreate("test").addAlias(new Alias("alias").writeIndex(randomFrom(true, false, null)))
.addMapping("type1", "field", "type=keyword,store=true")
.setSettings(Settings.builder().put("index.refresh_interval", -1)));
ensureGreen();

View File

@ -91,7 +91,7 @@ public class NestedAggregatorTests extends AggregatorTestCase {
private final SeqNoFieldMapper.SequenceIDFields sequenceIDFields = SeqNoFieldMapper.SequenceIDFields.emptySeqID();
/**
* For each provided field type, we also register an alias with name <field>-alias.
* For each provided field type, we also register an alias with name {@code <field>-alias}.
*/
@Override
protected Map<String, MappedFieldType> getFieldAliases(MappedFieldType... fieldTypes) {

View File

@ -59,7 +59,7 @@ public class ReverseNestedAggregatorTests extends AggregatorTestCase {
private static final String MAX_AGG_NAME = "maxAgg";
/**
* For each provided field type, we also register an alias with name <field>-alias.
* For each provided field type, we also register an alias with name {@code <field>-alias}.
*/
@Override
protected Map<String, MappedFieldType> getFieldAliases(MappedFieldType... fieldTypes) {

View File

@ -77,7 +77,7 @@ public class SignificantTermsAggregatorTests extends AggregatorTestCase {
}
/**
* For each provided field type, we also register an alias with name <field>-alias.
* For each provided field type, we also register an alias with name {@code <field>-alias}.
*/
@Override
protected Map<String, MappedFieldType> getFieldAliases(MappedFieldType... fieldTypes) {

View File

@ -53,7 +53,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.signific
public class SignificantTextAggregatorTests extends AggregatorTestCase {
/**
* For each provided field type, we also register an alias with name <field>-alias.
* For each provided field type, we also register an alias with name {@code <field>-alias}.
*/
@Override
protected Map<String, MappedFieldType> getFieldAliases(MappedFieldType... fieldTypes) {

View File

@ -140,8 +140,7 @@ public class UpdateIT extends ESIntegTestCase {
private void createTestIndex() throws Exception {
logger.info("--> creating index test");
assertAcked(prepareCreate("test").addAlias(new Alias("alias")));
assertAcked(prepareCreate("test").addAlias(new Alias("alias").writeIndex(randomFrom(true, null))));
}
public void testUpsert() throws Exception {

View File

@ -131,3 +131,5 @@ if (extraProjects.exists()) {
// enable in preparation for Gradle 5.0
enableFeaturePreview('STABLE_PUBLISHING')
project(":libs:cli").name = 'elasticsearch-cli'

View File

@ -151,7 +151,6 @@ integTestCluster {
setting 'xpack.license.self_generated.type', 'trial'
keystoreSetting 'bootstrap.password', 'x-pack-test-password'
keystoreSetting 'xpack.security.transport.ssl.keystore.secure_password', 'keypass'
keystoreSetting 'xpack.security.ingest.hash.processor.key', 'hmackey'
distribution = 'zip' // this is important since we use the reindex module in ML
setupCommand 'setupTestUser', 'bin/elasticsearch-users', 'useradd', 'x_pack_rest_user', '-p', 'x-pack-test-password', '-r', 'superuser'

View File

@ -6,6 +6,8 @@ import java.nio.file.Paths
import java.nio.file.StandardCopyOption
apply plugin: 'elasticsearch.esplugin'
apply plugin: 'nebula.maven-base-publish'
apply plugin: 'nebula.maven-scm'
archivesBaseName = 'x-pack-core'

View File

@ -8,6 +8,8 @@ package org.elasticsearch.xpack.core.watcher.client;
import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.client.Client;
import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest;
import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse;
import org.elasticsearch.xpack.core.watcher.transport.actions.ack.AckWatchAction;
import org.elasticsearch.xpack.core.watcher.transport.actions.ack.AckWatchRequest;
import org.elasticsearch.xpack.core.watcher.transport.actions.ack.AckWatchRequestBuilder;
@ -29,9 +31,7 @@ import org.elasticsearch.xpack.core.watcher.transport.actions.get.GetWatchReques
import org.elasticsearch.xpack.core.watcher.transport.actions.get.GetWatchRequestBuilder;
import org.elasticsearch.xpack.core.watcher.transport.actions.get.GetWatchResponse;
import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchAction;
import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchRequest;
import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchRequestBuilder;
import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchResponse;
import org.elasticsearch.xpack.core.watcher.transport.actions.service.WatcherServiceAction;
import org.elasticsearch.xpack.core.watcher.transport.actions.service.WatcherServiceRequest;
import org.elasticsearch.xpack.core.watcher.transport.actions.service.WatcherServiceRequestBuilder;

View File

@ -6,6 +6,7 @@
package org.elasticsearch.xpack.core.watcher.transport.actions.put;
import org.elasticsearch.action.Action;
import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse;
/**
* This action puts an watch into the watch index and adds it to the scheduler

View File

@ -9,6 +9,8 @@ import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest;
import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse;
import org.elasticsearch.xpack.core.watcher.client.WatchSourceBuilder;
public class PutWatchRequestBuilder extends ActionRequestBuilder<PutWatchRequest, PutWatchResponse> {
@ -43,7 +45,7 @@ public class PutWatchRequestBuilder extends ActionRequestBuilder<PutWatchRequest
* @param source the source of the watch to be created
*/
public PutWatchRequestBuilder setSource(WatchSourceBuilder source) {
request.setSource(source);
request.setSource(source.buildAsBytes(XContentType.JSON), XContentType.JSON);
return this;
}

View File

@ -1,59 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.core.watcher.transport.actions.put;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import java.io.IOException;
/**
* The Response for a put watch action
*/
public class PutWatchResponse extends ActionResponse {
private String id;
private long version;
private boolean created;
public PutWatchResponse() {
}
public PutWatchResponse(String id, long version, boolean created) {
this.id = id;
this.version = version;
this.created = created;
}
public String getId() {
return id;
}
public long getVersion() {
return version;
}
public boolean isCreated() {
return created;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(id);
out.writeVLong(version);
out.writeBoolean(created);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
id = in.readString();
version = in.readVLong();
created = in.readBoolean();
}
}

View File

@ -39,6 +39,8 @@ import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.ingest.IngestMetadata;
import org.elasticsearch.ingest.PipelineConfiguration;
import org.elasticsearch.license.XPackLicenseState;
import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest;
import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse;
import org.elasticsearch.xpack.core.XPackClient;
import org.elasticsearch.xpack.core.XPackSettings;
import org.elasticsearch.xpack.core.monitoring.MonitoredSystem;
@ -47,8 +49,6 @@ import org.elasticsearch.xpack.core.watcher.client.WatcherClient;
import org.elasticsearch.xpack.core.watcher.transport.actions.delete.DeleteWatchRequest;
import org.elasticsearch.xpack.core.watcher.transport.actions.get.GetWatchRequest;
import org.elasticsearch.xpack.core.watcher.transport.actions.get.GetWatchResponse;
import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchRequest;
import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchResponse;
import org.elasticsearch.xpack.core.watcher.watch.Watch;
import org.elasticsearch.xpack.monitoring.cleaner.CleanerService;
import org.elasticsearch.xpack.monitoring.exporter.ClusterAlertsUtil;

View File

@ -425,7 +425,8 @@ public abstract class RollupIndexer {
assert lowerBound <= maxBoundary;
final RangeQueryBuilder query = new RangeQueryBuilder(fieldName)
.gte(lowerBound)
.lt(maxBoundary);
.lt(maxBoundary)
.format("epoch_millis");
return query;
}
}

View File

@ -29,6 +29,8 @@ import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.SearchResponseSections;
import org.elasticsearch.action.search.ShardSearchFailure;
import org.elasticsearch.common.joda.DateMathParser;
import org.elasticsearch.common.joda.Joda;
import org.elasticsearch.common.rounding.Rounding;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.IndexSettings;
@ -506,6 +508,7 @@ public class RollupIndexerIndexingTests extends AggregatorTestCase {
private Map<String, MappedFieldType> createFieldTypes(RollupJobConfig job) {
Map<String, MappedFieldType> fieldTypes = new HashMap<>();
MappedFieldType fieldType = new DateFieldMapper.Builder(job.getGroupConfig().getDateHisto().getField())
.dateTimeFormatter(Joda.forPattern(randomFrom("basic_date", "date_optional_time", "epoch_second")))
.build(new Mapper.BuilderContext(settings.getSettings(), new ContentPath(0)))
.fieldType();
fieldTypes.put(fieldType.name(), fieldType);
@ -618,7 +621,7 @@ public class RollupIndexerIndexingTests extends AggregatorTestCase {
RangeQueryBuilder range = (RangeQueryBuilder) request.source().query();
final DateTimeZone timeZone = range.timeZone() != null ? DateTimeZone.forID(range.timeZone()) : null;
Query query = timestampField.rangeQuery(range.from(), range.to(), range.includeLower(), range.includeUpper(),
null, timeZone, null, queryShardContext);
null, timeZone, new DateMathParser(Joda.forPattern(range.format())), queryShardContext);
// extract composite agg
assertThat(request.source().aggregations().getAggregatorFactories().size(), equalTo(1));

View File

@ -175,7 +175,6 @@ import org.elasticsearch.xpack.security.authz.accesscontrol.OptOutQueryCache;
import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore;
import org.elasticsearch.xpack.security.authz.store.FileRolesStore;
import org.elasticsearch.xpack.security.authz.store.NativeRolesStore;
import org.elasticsearch.xpack.security.ingest.HashProcessor;
import org.elasticsearch.xpack.security.ingest.SetSecurityUserProcessor;
import org.elasticsearch.xpack.security.rest.SecurityRestFilter;
import org.elasticsearch.xpack.security.rest.action.RestAuthenticateAction;
@ -580,10 +579,6 @@ public class Security extends Plugin implements ActionPlugin, IngestPlugin, Netw
// hide settings
settingsList.add(Setting.listSetting(SecurityField.setting("hide_settings"), Collections.emptyList(), Function.identity(),
Property.NodeScope, Property.Filtered));
// ingest processor settings
settingsList.add(HashProcessor.HMAC_KEY_SETTING);
return settingsList;
}
@ -727,10 +722,7 @@ public class Security extends Plugin implements ActionPlugin, IngestPlugin, Netw
@Override
public Map<String, Processor.Factory> getProcessors(Processor.Parameters parameters) {
Map<String, Processor.Factory> processors = new HashMap<>();
processors.put(SetSecurityUserProcessor.TYPE, new SetSecurityUserProcessor.Factory(parameters.threadContext));
processors.put(HashProcessor.TYPE, new HashProcessor.Factory(parameters.env.settings()));
return processors;
return Collections.singletonMap(SetSecurityUserProcessor.TYPE, new SetSecurityUserProcessor.Factory(parameters.threadContext));
}

View File

@ -1,200 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.security.ingest;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.settings.SecureSetting;
import org.elasticsearch.common.settings.SecureString;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.ingest.AbstractProcessor;
import org.elasticsearch.ingest.ConfigurationUtils;
import org.elasticsearch.ingest.IngestDocument;
import org.elasticsearch.ingest.Processor;
import org.elasticsearch.xpack.core.security.SecurityField;
import javax.crypto.Mac;
import javax.crypto.SecretKeyFactory;
import javax.crypto.spec.PBEKeySpec;
import javax.crypto.spec.SecretKeySpec;
import java.nio.charset.StandardCharsets;
import java.security.InvalidKeyException;
import java.security.NoSuchAlgorithmException;
import java.security.spec.InvalidKeySpecException;
import java.util.Arrays;
import java.util.Base64;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.stream.Collectors;
import static org.elasticsearch.ingest.ConfigurationUtils.newConfigurationException;
/**
* A processor that hashes the contents of a field (or fields) using various hashing algorithms
*/
public final class HashProcessor extends AbstractProcessor {
public static final String TYPE = "hash";
public static final Setting.AffixSetting<SecureString> HMAC_KEY_SETTING = SecureSetting
.affixKeySetting(SecurityField.setting("ingest." + TYPE) + ".", "key",
(key) -> SecureSetting.secureString(key, null));
private final List<String> fields;
private final String targetField;
private final Method method;
private final Mac mac;
private final byte[] salt;
private final boolean ignoreMissing;
HashProcessor(String tag, List<String> fields, String targetField, byte[] salt, Method method, @Nullable Mac mac,
boolean ignoreMissing) {
super(tag);
this.fields = fields;
this.targetField = targetField;
this.method = method;
this.mac = mac;
this.salt = salt;
this.ignoreMissing = ignoreMissing;
}
List<String> getFields() {
return fields;
}
String getTargetField() {
return targetField;
}
byte[] getSalt() {
return salt;
}
@Override
public void execute(IngestDocument document) {
Map<String, String> hashedFieldValues = fields.stream().map(f -> {
String value = document.getFieldValue(f, String.class, ignoreMissing);
if (value == null && ignoreMissing) {
return new Tuple<String, String>(null, null);
}
try {
return new Tuple<>(f, method.hash(mac, salt, value));
} catch (Exception e) {
throw new IllegalArgumentException("field[" + f + "] could not be hashed", e);
}
}).filter(tuple -> Objects.nonNull(tuple.v1())).collect(Collectors.toMap(Tuple::v1, Tuple::v2));
if (fields.size() == 1) {
document.setFieldValue(targetField, hashedFieldValues.values().iterator().next());
} else {
document.setFieldValue(targetField, hashedFieldValues);
}
}
@Override
public String getType() {
return TYPE;
}
public static final class Factory implements Processor.Factory {
private final Settings settings;
private final Map<String, SecureString> secureKeys;
public Factory(Settings settings) {
this.settings = settings;
this.secureKeys = new HashMap<>();
HMAC_KEY_SETTING.getAllConcreteSettings(settings).forEach(k -> {
secureKeys.put(k.getKey(), k.get(settings));
});
}
private static Mac createMac(Method method, SecureString password, byte[] salt, int iterations) {
try {
SecretKeyFactory secretKeyFactory = SecretKeyFactory.getInstance("PBKDF2With" + method.getAlgorithm());
PBEKeySpec keySpec = new PBEKeySpec(password.getChars(), salt, iterations, 128);
byte[] pbkdf2 = secretKeyFactory.generateSecret(keySpec).getEncoded();
Mac mac = Mac.getInstance(method.getAlgorithm());
mac.init(new SecretKeySpec(pbkdf2, method.getAlgorithm()));
return mac;
} catch (NoSuchAlgorithmException | InvalidKeySpecException | InvalidKeyException e) {
throw new IllegalArgumentException("invalid settings", e);
}
}
@Override
public HashProcessor create(Map<String, Processor.Factory> registry, String processorTag, Map<String, Object> config) {
boolean ignoreMissing = ConfigurationUtils.readBooleanProperty(TYPE, processorTag, config, "ignore_missing", false);
List<String> fields = ConfigurationUtils.readList(TYPE, processorTag, config, "fields");
if (fields.isEmpty()) {
throw ConfigurationUtils.newConfigurationException(TYPE, processorTag, "fields", "must specify at least one field");
} else if (fields.stream().anyMatch(Strings::isNullOrEmpty)) {
throw ConfigurationUtils.newConfigurationException(TYPE, processorTag, "fields",
"a field-name entry is either empty or null");
}
String targetField = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "target_field");
String keySettingName = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "key_setting");
SecureString key = secureKeys.get(keySettingName);
if (key == null) {
throw ConfigurationUtils.newConfigurationException(TYPE, processorTag, "key_setting",
"key [" + keySettingName + "] must match [xpack.security.ingest.hash.*.key]. It is not set");
}
String saltString = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "salt");
byte[] salt = saltString.getBytes(StandardCharsets.UTF_8);
String methodProperty = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "method", "SHA256");
Method method = Method.fromString(processorTag, "method", methodProperty);
int iterations = ConfigurationUtils.readIntProperty(TYPE, processorTag, config, "iterations", 5);
Mac mac = createMac(method, key, salt, iterations);
return new HashProcessor(processorTag, fields, targetField, salt, method, mac, ignoreMissing);
}
}
enum Method {
SHA1("HmacSHA1"),
SHA256("HmacSHA256"),
SHA384("HmacSHA384"),
SHA512("HmacSHA512");
private final String algorithm;
Method(String algorithm) {
this.algorithm = algorithm;
}
public String getAlgorithm() {
return algorithm;
}
@Override
public String toString() {
return name().toLowerCase(Locale.ROOT);
}
public String hash(Mac mac, byte[] salt, String input) {
try {
byte[] encrypted = mac.doFinal(input.getBytes(StandardCharsets.UTF_8));
byte[] messageWithSalt = new byte[salt.length + encrypted.length];
System.arraycopy(salt, 0, messageWithSalt, 0, salt.length);
System.arraycopy(encrypted, 0, messageWithSalt, salt.length, encrypted.length);
return Base64.getEncoder().encodeToString(messageWithSalt);
} catch (IllegalStateException e) {
throw new ElasticsearchException("error hashing data", e);
}
}
public static Method fromString(String processorTag, String propertyName, String type) {
try {
return Method.valueOf(type.toUpperCase(Locale.ROOT));
} catch(IllegalArgumentException e) {
throw newConfigurationException(TYPE, processorTag, propertyName, "type [" + type +
"] not supported, cannot convert field. Valid hash methods: " + Arrays.toString(Method.values()));
}
}
}
}

View File

@ -1,136 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.security.ingest;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.settings.MockSecureSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.ESTestCase;
import java.nio.charset.StandardCharsets;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import static org.hamcrest.Matchers.equalTo;
public class HashProcessorFactoryTests extends ESTestCase {
public void testProcessor() {
MockSecureSettings mockSecureSettings = new MockSecureSettings();
mockSecureSettings.setString("xpack.security.ingest.hash.processor.key", "my_key");
Settings settings = Settings.builder().setSecureSettings(mockSecureSettings).build();
HashProcessor.Factory factory = new HashProcessor.Factory(settings);
Map<String, Object> config = new HashMap<>();
config.put("fields", Collections.singletonList("_field"));
config.put("target_field", "_target");
config.put("salt", "_salt");
config.put("key_setting", "xpack.security.ingest.hash.processor.key");
for (HashProcessor.Method method : HashProcessor.Method.values()) {
config.put("method", method.toString());
HashProcessor processor = factory.create(null, "_tag", new HashMap<>(config));
assertThat(processor.getFields(), equalTo(Collections.singletonList("_field")));
assertThat(processor.getTargetField(), equalTo("_target"));
assertArrayEquals(processor.getSalt(), "_salt".getBytes(StandardCharsets.UTF_8));
}
}
public void testProcessorNoFields() {
MockSecureSettings mockSecureSettings = new MockSecureSettings();
mockSecureSettings.setString("xpack.security.ingest.hash.processor.key", "my_key");
Settings settings = Settings.builder().setSecureSettings(mockSecureSettings).build();
HashProcessor.Factory factory = new HashProcessor.Factory(settings);
Map<String, Object> config = new HashMap<>();
config.put("target_field", "_target");
config.put("salt", "_salt");
config.put("key_setting", "xpack.security.ingest.hash.processor.key");
config.put("method", HashProcessor.Method.SHA1.toString());
ElasticsearchException e = expectThrows(ElasticsearchException.class,
() -> factory.create(null, "_tag", config));
assertThat(e.getMessage(), equalTo("[fields] required property is missing"));
}
public void testProcessorNoTargetField() {
MockSecureSettings mockSecureSettings = new MockSecureSettings();
mockSecureSettings.setString("xpack.security.ingest.hash.processor.key", "my_key");
Settings settings = Settings.builder().setSecureSettings(mockSecureSettings).build();
HashProcessor.Factory factory = new HashProcessor.Factory(settings);
Map<String, Object> config = new HashMap<>();
config.put("fields", Collections.singletonList("_field"));
config.put("salt", "_salt");
config.put("key_setting", "xpack.security.ingest.hash.processor.key");
config.put("method", HashProcessor.Method.SHA1.toString());
ElasticsearchException e = expectThrows(ElasticsearchException.class,
() -> factory.create(null, "_tag", config));
assertThat(e.getMessage(), equalTo("[target_field] required property is missing"));
}
public void testProcessorFieldsIsEmpty() {
MockSecureSettings mockSecureSettings = new MockSecureSettings();
mockSecureSettings.setString("xpack.security.ingest.hash.processor.key", "my_key");
Settings settings = Settings.builder().setSecureSettings(mockSecureSettings).build();
HashProcessor.Factory factory = new HashProcessor.Factory(settings);
Map<String, Object> config = new HashMap<>();
config.put("fields", Collections.singletonList(randomBoolean() ? "" : null));
config.put("salt", "_salt");
config.put("target_field", "_target");
config.put("key_setting", "xpack.security.ingest.hash.processor.key");
config.put("method", HashProcessor.Method.SHA1.toString());
ElasticsearchException e = expectThrows(ElasticsearchException.class,
() -> factory.create(null, "_tag", config));
assertThat(e.getMessage(), equalTo("[fields] a field-name entry is either empty or null"));
}
public void testProcessorMissingSalt() {
MockSecureSettings mockSecureSettings = new MockSecureSettings();
mockSecureSettings.setString("xpack.security.ingest.hash.processor.key", "my_key");
Settings settings = Settings.builder().setSecureSettings(mockSecureSettings).build();
HashProcessor.Factory factory = new HashProcessor.Factory(settings);
Map<String, Object> config = new HashMap<>();
config.put("fields", Collections.singletonList("_field"));
config.put("target_field", "_target");
config.put("key_setting", "xpack.security.ingest.hash.processor.key");
ElasticsearchException e = expectThrows(ElasticsearchException.class,
() -> factory.create(null, "_tag", config));
assertThat(e.getMessage(), equalTo("[salt] required property is missing"));
}
public void testProcessorInvalidMethod() {
MockSecureSettings mockSecureSettings = new MockSecureSettings();
mockSecureSettings.setString("xpack.security.ingest.hash.processor.key", "my_key");
Settings settings = Settings.builder().setSecureSettings(mockSecureSettings).build();
HashProcessor.Factory factory = new HashProcessor.Factory(settings);
Map<String, Object> config = new HashMap<>();
config.put("fields", Collections.singletonList("_field"));
config.put("salt", "_salt");
config.put("target_field", "_target");
config.put("key_setting", "xpack.security.ingest.hash.processor.key");
config.put("method", "invalid");
ElasticsearchException e = expectThrows(ElasticsearchException.class,
() -> factory.create(null, "_tag", config));
assertThat(e.getMessage(), equalTo("[method] type [invalid] not supported, cannot convert field. " +
"Valid hash methods: [sha1, sha256, sha384, sha512]"));
}
public void testProcessorInvalidOrMissingKeySetting() {
Settings settings = Settings.builder().setSecureSettings(new MockSecureSettings()).build();
HashProcessor.Factory factory = new HashProcessor.Factory(settings);
Map<String, Object> config = new HashMap<>();
config.put("fields", Collections.singletonList("_field"));
config.put("salt", "_salt");
config.put("target_field", "_target");
config.put("key_setting", "invalid");
config.put("method", HashProcessor.Method.SHA1.toString());
ElasticsearchException e = expectThrows(ElasticsearchException.class,
() -> factory.create(null, "_tag", new HashMap<>(config)));
assertThat(e.getMessage(),
equalTo("[key_setting] key [invalid] must match [xpack.security.ingest.hash.*.key]. It is not set"));
config.remove("key_setting");
ElasticsearchException ex = expectThrows(ElasticsearchException.class,
() -> factory.create(null, "_tag", config));
assertThat(ex.getMessage(), equalTo("[key_setting] required property is missing"));
}
}

View File

@ -1,130 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.security.ingest;
import org.elasticsearch.ingest.IngestDocument;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.xpack.security.ingest.HashProcessor.Method;
import javax.crypto.Mac;
import javax.crypto.SecretKeyFactory;
import javax.crypto.spec.PBEKeySpec;
import javax.crypto.spec.SecretKeySpec;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Base64;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.hamcrest.Matchers.equalTo;
public class HashProcessorTests extends ESTestCase {
@SuppressWarnings("unchecked")
public void testIgnoreMissing() throws Exception {
Method method = randomFrom(Method.values());
Mac mac = createMac(method);
Map<String, Object> fields = new HashMap<>();
fields.put("one", "foo");
HashProcessor processor = new HashProcessor("_tag", Arrays.asList("one", "two"),
"target", "_salt".getBytes(StandardCharsets.UTF_8), Method.SHA1, mac, true);
IngestDocument ingestDocument = new IngestDocument(fields, new HashMap<>());
processor.execute(ingestDocument);
Map<String, String> target = ingestDocument.getFieldValue("target", Map.class);
assertThat(target.size(), equalTo(1));
assertNotNull(target.get("one"));
HashProcessor failProcessor = new HashProcessor("_tag", Arrays.asList("one", "two"),
"target", "_salt".getBytes(StandardCharsets.UTF_8), Method.SHA1, mac, false);
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> failProcessor.execute(ingestDocument));
assertThat(exception.getMessage(), equalTo("field [two] not present as part of path [two]"));
}
public void testStaticKeyAndSalt() throws Exception {
byte[] salt = "_salt".getBytes(StandardCharsets.UTF_8);
SecretKeyFactory secretKeyFactory = SecretKeyFactory.getInstance("PBKDF2WithHmacSHA1");
PBEKeySpec keySpec = new PBEKeySpec("hmackey".toCharArray(), salt, 5, 128);
byte[] pbkdf2 = secretKeyFactory.generateSecret(keySpec).getEncoded();
Mac mac = Mac.getInstance(Method.SHA1.getAlgorithm());
mac.init(new SecretKeySpec(pbkdf2, Method.SHA1.getAlgorithm()));
Map<String, Object> fields = new HashMap<>();
fields.put("field", "0123456789");
HashProcessor processor = new HashProcessor("_tag", Collections.singletonList("field"),
"target", salt, Method.SHA1, mac, false);
IngestDocument ingestDocument = new IngestDocument(fields, new HashMap<>());
processor.execute(ingestDocument);
assertThat(ingestDocument.getFieldValue("target", String.class), equalTo("X3NhbHQMW0oHJGEEE9obGcGv5tGd7HFyDw=="));
}
public void testProcessorSingleField() throws Exception {
List<String> fields = Collections.singletonList(randomAlphaOfLength(6));
Map<String, Object> docFields = new HashMap<>();
for (String field : fields) {
docFields.put(field, randomAlphaOfLengthBetween(2, 10));
}
String targetField = randomAlphaOfLength(6);
Method method = randomFrom(Method.values());
Mac mac = createMac(method);
byte[] salt = randomByteArrayOfLength(5);
HashProcessor processor = new HashProcessor("_tag", fields, targetField, salt, method, mac, false);
IngestDocument ingestDocument = new IngestDocument(docFields, new HashMap<>());
processor.execute(ingestDocument);
String targetFieldValue = ingestDocument.getFieldValue(targetField, String.class);
Object expectedTargetFieldValue = method.hash(mac, salt, ingestDocument.getFieldValue(fields.get(0), String.class));
assertThat(targetFieldValue, equalTo(expectedTargetFieldValue));
byte[] bytes = Base64.getDecoder().decode(targetFieldValue);
byte[] actualSaltPrefix = new byte[salt.length];
System.arraycopy(bytes, 0, actualSaltPrefix, 0, salt.length);
assertArrayEquals(salt, actualSaltPrefix);
}
@SuppressWarnings("unchecked")
public void testProcessorMultipleFields() throws Exception {
List<String> fields = new ArrayList<>();
for (int i = 0; i < randomIntBetween(2, 10); i++) {
fields.add(randomAlphaOfLength(5 + i));
}
Map<String, Object> docFields = new HashMap<>();
for (String field : fields) {
docFields.put(field, randomAlphaOfLengthBetween(2, 10));
}
String targetField = randomAlphaOfLength(6);
Method method = randomFrom(Method.values());
Mac mac = createMac(method);
byte[] salt = randomByteArrayOfLength(5);
HashProcessor processor = new HashProcessor("_tag", fields, targetField, salt, method, mac, false);
IngestDocument ingestDocument = new IngestDocument(docFields, new HashMap<>());
processor.execute(ingestDocument);
Map<String, String> targetFieldMap = ingestDocument.getFieldValue(targetField, Map.class);
for (Map.Entry<String, String> entry : targetFieldMap.entrySet()) {
Object expectedTargetFieldValue = method.hash(mac, salt, ingestDocument.getFieldValue(entry.getKey(), String.class));
assertThat(entry.getValue(), equalTo(expectedTargetFieldValue));
byte[] bytes = Base64.getDecoder().decode(entry.getValue());
byte[] actualSaltPrefix = new byte[salt.length];
System.arraycopy(bytes, 0, actualSaltPrefix, 0, salt.length);
assertArrayEquals(salt, actualSaltPrefix);
}
}
private Mac createMac(Method method) throws Exception {
char[] password = randomAlphaOfLengthBetween(1, 10).toCharArray();
byte[] salt = randomAlphaOfLength(5).getBytes(StandardCharsets.UTF_8);
int iterations = randomIntBetween(1, 10);
SecretKeyFactory secretKeyFactory = SecretKeyFactory.getInstance("PBKDF2With" + method.getAlgorithm());
PBEKeySpec keySpec = new PBEKeySpec(password, salt, iterations, 128);
byte[] pbkdf2 = secretKeyFactory.generateSecret(keySpec).getEncoded();
Mac mac = Mac.getInstance(method.getAlgorithm());
mac.init(new SecretKeySpec(pbkdf2, method.getAlgorithm()));
return mac;
}
}

View File

@ -1,51 +0,0 @@
---
teardown:
- do:
ingest.delete_pipeline:
id: "my_pipeline"
ignore: 404
---
"Test Hash Processor":
- do:
cluster.health:
wait_for_status: yellow
- do:
ingest.put_pipeline:
id: "my_pipeline"
body: >
{
"processors": [
{
"hash" : {
"fields" : ["user_ssid"],
"target_field" : "anonymized",
"salt": "_salt",
"iterations": 5,
"method": "sha1",
"key_setting": "xpack.security.ingest.hash.processor.key"
}
}
]
}
- match: { acknowledged: true }
- do:
index:
index: test
type: test
id: 1
pipeline: "my_pipeline"
body: >
{
"user_ssid": "0123456789"
}
- do:
get:
index: test
type: test
id: 1
- match: { _source.anonymized: "X3NhbHQMW0oHJGEEE9obGcGv5tGd7HFyDw==" }

View File

@ -310,3 +310,74 @@ teardown:
index: write_index_2
body: { "query": { "terms": { "_id": [ "19" ] } } }
- match: { hits.total: 1 }
---
"Test bulk indexing into an alias when resolved to write index":
- do:
indices.update_aliases:
body:
actions:
- add:
index: write_index_2
alias: can_write_2
is_write_index: true
- add:
index: write_index_2
alias: can_read_2
is_write_index: true
- add:
index: write_index_1
alias: can_write_3
is_write_index: true
- add:
index: write_index_2
alias: can_write_3
is_write_index: false
- do:
headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user
bulk:
refresh: true
body:
- '{"index": {"_index": "can_read_1", "_type": "doc", "_id": "20"}}'
- '{"name": "doc20"}'
- '{"index": {"_index": "can_write_1", "_type": "doc", "_id": "21"}}'
- '{"name": "doc21"}'
- '{"index": {"_index": "can_read_2", "_type": "doc", "_id": "22"}}'
- '{"name": "doc22"}'
- '{"index": {"_index": "can_write_2", "_type": "doc", "_id": "23"}}'
- '{"name": "doc23"}'
- '{"index": {"_index": "can_write_3", "_type": "doc", "_id": "24"}}'
- '{"name": "doc24"}'
- '{"update": {"_index": "can_write_3", "_type": "doc", "_id": "24"}}'
- '{"doc": { "name": "doc_24"}}'
- '{"delete": {"_index": "can_write_3", "_type": "doc", "_id": "24"}}'
- match: { errors: true }
- match: { items.0.index.status: 403 }
- match: { items.0.index.error.type: "security_exception" }
- match: { items.1.index.status: 201 }
- match: { items.2.index.status: 403 }
- match: { items.2.index.error.type: "security_exception" }
- match: { items.3.index.status: 403 }
- match: { items.3.index.error.type: "security_exception" }
- match: { items.4.index.status: 201 }
- match: { items.5.update.status: 200 }
- match: { items.6.delete.status: 200 }
- do: # superuser
search:
index: write_index_1
body: { "query": { "terms": { "_id": [ "21" ] } } }
- match: { hits.total: 1 }
- do:
indices.delete_alias:
index: "write_index_2"
name: [ "can_write_2", "can_read_2" ]
ignore: 404
- do:
indices.delete_alias:
index: "write_index_1"
name: [ "can_write_3" ]
ignore: 404

View File

@ -9,6 +9,8 @@ import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest;
import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse;
import org.elasticsearch.rest.BytesRestResponse;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest;
@ -17,8 +19,6 @@ import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.rest.action.RestBuilderListener;
import org.elasticsearch.xpack.core.security.rest.RestRequestFilter;
import org.elasticsearch.xpack.core.watcher.client.WatcherClient;
import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchRequest;
import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchResponse;
import org.elasticsearch.xpack.watcher.rest.WatcherRestHandler;
import java.io.IOException;

View File

@ -19,13 +19,13 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.license.XPackLicenseState;
import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest;
import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.core.ClientHelper;
import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherParams;
import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchAction;
import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchRequest;
import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchResponse;
import org.elasticsearch.xpack.core.watcher.watch.Watch;
import org.elasticsearch.xpack.watcher.transport.actions.WatcherTransportAction;
import org.elasticsearch.xpack.watcher.watch.WatchParser;

View File

@ -8,9 +8,9 @@ package org.elasticsearch.xpack.watcher.actions;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse;
import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource;
import org.elasticsearch.xpack.core.watcher.transport.actions.get.GetWatchResponse;
import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchResponse;
import org.elasticsearch.xpack.watcher.actions.index.IndexAction;
import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase;

View File

@ -8,13 +8,13 @@ package org.elasticsearch.xpack.watcher.actions;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.sort.SortBuilders;
import org.elasticsearch.search.sort.SortOrder;
import org.elasticsearch.test.junit.annotations.TestLogging;
import org.elasticsearch.xpack.core.watcher.history.HistoryStoreField;
import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath;
import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchResponse;
import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase;
import java.util.Map;

View File

@ -10,6 +10,7 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest;
import org.elasticsearch.xpack.core.watcher.actions.Action;
import org.elasticsearch.xpack.core.watcher.client.WatchSourceBuilder;
import org.elasticsearch.xpack.core.watcher.execution.ActionExecutionMode;
@ -17,7 +18,6 @@ import org.elasticsearch.xpack.core.watcher.execution.ExecutionState;
import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath;
import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchRequestBuilder;
import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchResponse;
import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchRequest;
import org.elasticsearch.xpack.core.watcher.watch.Watch;
import org.elasticsearch.xpack.watcher.actions.email.EmailAction;
import org.elasticsearch.xpack.watcher.actions.index.IndexAction;

View File

@ -7,6 +7,7 @@ package org.elasticsearch.xpack.watcher.history;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse;
import org.elasticsearch.script.MockScriptPlugin;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptType;
@ -18,7 +19,6 @@ import org.elasticsearch.xpack.core.watcher.condition.Condition;
import org.elasticsearch.xpack.core.watcher.condition.ExecutableCondition;
import org.elasticsearch.xpack.core.watcher.execution.ExecutionState;
import org.elasticsearch.xpack.core.watcher.input.Input;
import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchResponse;
import org.elasticsearch.xpack.watcher.condition.CompareCondition;
import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition;
import org.elasticsearch.xpack.watcher.condition.NeverCondition;

View File

@ -7,12 +7,12 @@ package org.elasticsearch.xpack.watcher.history;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse;
import org.elasticsearch.search.aggregations.Aggregations;
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
import org.elasticsearch.test.junit.annotations.TestLogging;
import org.elasticsearch.xpack.core.watcher.execution.ExecutionState;
import org.elasticsearch.xpack.core.watcher.history.HistoryStoreField;
import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchResponse;
import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition;
import org.elasticsearch.xpack.watcher.notification.email.EmailTemplate;
import org.elasticsearch.xpack.watcher.notification.email.support.EmailServer;

View File

@ -11,6 +11,7 @@ import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse;
import org.elasticsearch.search.aggregations.Aggregations;
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
import org.elasticsearch.test.http.MockResponse;
@ -18,7 +19,6 @@ import org.elasticsearch.test.http.MockWebServer;
import org.elasticsearch.xpack.core.watcher.execution.ExecutionState;
import org.elasticsearch.xpack.core.watcher.history.HistoryStoreField;
import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath;
import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchResponse;
import org.elasticsearch.xpack.watcher.common.http.HttpMethod;
import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate;
import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition;

View File

@ -6,11 +6,11 @@
package org.elasticsearch.xpack.watcher.history;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse;
import org.elasticsearch.search.aggregations.Aggregations;
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
import org.elasticsearch.xpack.core.watcher.execution.ExecutionState;
import org.elasticsearch.xpack.core.watcher.history.HistoryStoreField;
import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchResponse;
import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase;
import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;

View File

@ -8,11 +8,11 @@ package org.elasticsearch.xpack.watcher.history;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse;
import org.elasticsearch.search.aggregations.Aggregations;
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
import org.elasticsearch.xpack.core.watcher.execution.ExecutionState;
import org.elasticsearch.xpack.core.watcher.history.HistoryStoreField;
import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchResponse;
import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition;
import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateRequest;
import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase;

View File

@ -10,9 +10,9 @@ import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse;
import org.elasticsearch.xpack.core.watcher.execution.ExecutionState;
import org.elasticsearch.xpack.core.watcher.history.HistoryStoreField;
import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchResponse;
import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition;
import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase;

View File

@ -11,12 +11,12 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.node.MockNode;
import org.elasticsearch.node.Node;
import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptType;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.xpack.core.watcher.client.WatchSourceBuilder;
import org.elasticsearch.xpack.core.watcher.client.WatcherClient;
import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchRequest;
import org.elasticsearch.xpack.watcher.Watcher;
import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate;
import org.elasticsearch.xpack.watcher.condition.ScriptCondition;

View File

@ -11,6 +11,7 @@ import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptType;
import org.elasticsearch.search.builder.SearchSourceBuilder;
@ -20,7 +21,6 @@ import org.elasticsearch.xpack.core.watcher.client.WatcherClient;
import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource;
import org.elasticsearch.xpack.core.watcher.transport.actions.delete.DeleteWatchResponse;
import org.elasticsearch.xpack.core.watcher.transport.actions.get.GetWatchResponse;
import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchResponse;
import org.elasticsearch.xpack.core.watcher.watch.Watch;
import org.elasticsearch.xpack.watcher.condition.CompareCondition;
import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition;

View File

@ -8,12 +8,12 @@ package org.elasticsearch.xpack.watcher.test.integration;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.common.xcontent.support.XContentMapValues;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse;
import org.elasticsearch.script.MockScriptPlugin;
import org.elasticsearch.xpack.core.watcher.client.WatcherClient;
import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath;
import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource;
import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchResponse;
import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchResponse;
import org.elasticsearch.xpack.watcher.condition.ScriptCondition;
import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase;
import org.hamcrest.Matcher;

View File

@ -10,6 +10,7 @@ import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.sort.SortBuilders;
import org.elasticsearch.search.sort.SortOrder;
@ -17,7 +18,6 @@ import org.elasticsearch.xpack.core.watcher.actions.ActionStatus;
import org.elasticsearch.xpack.core.watcher.client.WatchSourceBuilder;
import org.elasticsearch.xpack.core.watcher.input.Input;
import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource;
import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchResponse;
import org.elasticsearch.xpack.core.watcher.watch.WatchStatus;
import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateRequest;
import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase;

Some files were not shown because too many files have changed in this diff Show More