Merge branch 'master' into index-lifecycle

x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSetti
ngs.java
/Users/colings86/dev/work/git/elasticsearch/.git/worktrees/elasticsearch
-ilm/MERGE_HEAD

buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlu
gin.groovy
client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLe
velClient.java
client/rest-high-level/src/main/java/org/elasticsearch/client/XPackClien
t.java
client/rest-high-level/src/test/java/org/elasticsearch/client/PingAndInf
oIT.java
client/rest-high-level/src/test/java/org/elasticsearch/client/documentat
ion/MiscellaneousDocumentationIT.java
docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc
docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc
modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/Grok
ProcessorTests.java
modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/Json
ProcessorTests.java
qa/vagrant/src/main/java/org/elasticsearch/packaging/test/ArchiveTestCas
e.java
qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Archives.java
qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Cleanup.java
qa/vagrant/src/main/java/org/elasticsearch/packaging/util/FileUtils.java
qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Installation.j
ava
qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Platforms.java
qa/vagrant/src/main/java/org/elasticsearch/packaging/util/ServerUtils.ja
va
qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Shell.java
qa/vagrant/src/test/resources/packaging/tests/20_tar_package.bats
server/src/main/java/org/elasticsearch/index/analysis/ESSolrSynonymParse
r.java
server/src/main/java/org/elasticsearch/index/analysis/ESWordnetSynonymPa
rser.java
server/src/main/java/org/elasticsearch/index/analysis/SynonymGraphTokenF
ilterFactory.java
server/src/main/java/org/elasticsearch/index/analysis/SynonymTokenFilter
Factory.java
server/src/test/java/org/elasticsearch/index/analysis/ESSolrSynonymParse
rTests.java
server/src/test/java/org/elasticsearch/index/analysis/ESWordnetSynonymPa
rserTests.java
server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java
test/framework/src/main/java/org/elasticsearch/ingest/IngestDocumentMatc
her.java
test/framework/src/test/java/org/elasticsearch/ingest/IngestDocumentMatc
herTests.java
x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java
x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseS
tate.java
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClien
tPlugin.java
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField
.java
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSetti
ngs.java
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/beats/Beat
sFeatureSetUsage.java
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/a
uthz/permission/Role.java
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/a
uthz/store/ReservedRolesStore.java
x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/Tra
nsportXPackInfoActionTests.java
x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/a
uthz/store/ReservedRolesStoreTests.java
x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/MlRem
oteLicenseChecker.java
x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/MlRem
oteLicenseCheckerTests.java
x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/au
thz/store/CompositeRolesStore.java
x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/XPackInfo
Response.java
x-pack/protocol/src/main/java/org/elasticsearch/protocol/license/License
Status.java ->
x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/license/L
icenseStatus.java
x-pack/protocol/src/main/java/org/elasticsearch/protocol/license/package
-info.java ->
x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/license/p
ackage-info.java
x-pack/protocol/src/main/java/org/elasticsearch/protocol/security/packag
e-info.java ->
x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/security/
package-info.java
x-pack/protocol/src/main/java/org/elasticsearch/protocol/watcher/package
-info.java ->
x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/watcher/p
ackage-info.java
x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/XPackInfo
ResponseTests.java
x-pack/protocol/src/test/java/org/elasticsearch/protocol/license/License
StatusTests.java ->
x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/license/L
icenseStatusTests.java
x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cl
uster/10_basic.yml
This commit is contained in:
Colin Goodheart-Smithe 2018-07-11 10:26:44 +01:00
commit cd89ca2685
No known key found for this signature in database
GPG Key ID: F975E7BDD739B3C7
52 changed files with 1554 additions and 443 deletions

View File

@ -546,9 +546,15 @@ class VagrantTestPlugin implements Plugin<Project> {
javaPackagingTest.command = 'ssh' javaPackagingTest.command = 'ssh'
javaPackagingTest.args = ['--command', 'sudo bash "$PACKAGING_TESTS/run-tests.sh"'] javaPackagingTest.args = ['--command', 'sudo bash "$PACKAGING_TESTS/run-tests.sh"']
} else { } else {
// powershell sessions run over winrm always run as administrator, whether --elevated is passed or not. however
// remote sessions have some restrictions on what they can do, such as impersonating another user (or the same user
// without administrator elevation), which we need to do for these tests. passing --elevated runs the session
// as a scheduled job locally on the vm as a true administrator to get around this limitation
//
// https://github.com/hashicorp/vagrant/blob/9c299a2a357fcf87f356bb9d56e18a037a53d138/plugins/communicators/winrm/communicator.rb#L195-L225
// https://devops-collective-inc.gitbooks.io/secrets-of-powershell-remoting/content/manuscript/accessing-remote-computers.html
javaPackagingTest.command = 'winrm' javaPackagingTest.command = 'winrm'
// winrm commands run as administrator javaPackagingTest.args = ['--elevated', '--command', 'powershell -File "$Env:PACKAGING_TESTS/run-tests.ps1"']
javaPackagingTest.args = ['--command', 'powershell -File "$Env:PACKAGING_TESTS/run-tests.ps1"']
} }
TaskExecutionAdapter javaPackagingReproListener = createReproListener(project, javaPackagingTest.path) TaskExecutionAdapter javaPackagingReproListener = createReproListener(project, javaPackagingTest.path)

View File

@ -1,5 +1,3 @@
import org.elasticsearch.gradle.precommit.PrecommitTasks
/* /*
* Licensed to Elasticsearch under one or more contributor * Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with * license agreements. See the NOTICE file distributed with
@ -18,30 +16,86 @@ import org.elasticsearch.gradle.precommit.PrecommitTasks
* specific language governing permissions and limitations * specific language governing permissions and limitations
* under the License. * under the License.
*/ */
import org.elasticsearch.gradle.precommit.PrecommitTasks
import org.gradle.api.XmlProvider
import org.gradle.api.publish.maven.MavenPublication
buildscript {
repositories {
maven {
url 'https://plugins.gradle.org/m2/'
}
}
dependencies {
classpath 'com.github.jengelman.gradle.plugins:shadow:2.0.4'
}
}
apply plugin: 'elasticsearch.build' apply plugin: 'elasticsearch.build'
apply plugin: 'elasticsearch.rest-test' apply plugin: 'elasticsearch.rest-test'
apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-base-publish'
apply plugin: 'nebula.maven-scm' apply plugin: 'nebula.maven-scm'
apply plugin: 'com.github.johnrengelman.shadow'
group = 'org.elasticsearch.client' group = 'org.elasticsearch.client'
archivesBaseName = 'elasticsearch-rest-high-level-client' archivesBaseName = 'elasticsearch-rest-high-level-client'
publishing { publishing {
publications { publications {
nebula { nebula(MavenPublication) {
artifact shadowJar
artifactId = archivesBaseName artifactId = archivesBaseName
/*
* Configure the pom to include the "shadow" as compile dependencies
* because that is how we're using them but remove all other dependencies
* because they've been shaded into the jar.
*/
pom.withXml { XmlProvider xml ->
Node root = xml.asNode()
root.remove(root.dependencies)
Node dependenciesNode = root.appendNode('dependencies')
project.configurations.shadow.allDependencies.each {
if (false == it instanceof SelfResolvingDependency) {
Node dependencyNode = dependenciesNode.appendNode('dependency')
dependencyNode.appendNode('groupId', it.group)
dependencyNode.appendNode('artifactId', it.name)
dependencyNode.appendNode('version', it.version)
dependencyNode.appendNode('scope', 'compile')
}
}
}
} }
} }
} }
/*
* We need somewhere to configure dependencies that we don't wish to shade
* into the high level REST client. The shadow plugin creates a "shadow"
* configuration which is *almost* exactly that. It is never bundled into
* the shaded jar but is used for main source compilation. Unfortunately,
* by default it is not used for *test* source compilation and isn't used
* in tests at all. This change makes it available for test compilation.
* A change below makes it available for testing.
*/
sourceSets {
test {
compileClasspath += configurations.shadow
}
}
dependencies { dependencies {
compile "org.elasticsearch:elasticsearch:${version}" /*
compile "org.elasticsearch.client:elasticsearch-rest-client:${version}" * Everything in the "shadow" configuration is *not* copied into the
compile "org.elasticsearch.plugin:parent-join-client:${version}" * shadowJar.
compile "org.elasticsearch.plugin:aggs-matrix-stats-client:${version}" */
compile "org.elasticsearch.plugin:rank-eval-client:${version}" shadow "org.elasticsearch:elasticsearch:${version}"
compile "org.elasticsearch.plugin:lang-mustache-client:${version}" shadow "org.elasticsearch.client:elasticsearch-rest-client:${version}"
compile project(':x-pack:protocol') // TODO bundle into the jar shadow "org.elasticsearch.plugin:parent-join-client:${version}"
shadow "org.elasticsearch.plugin:aggs-matrix-stats-client:${version}"
shadow "org.elasticsearch.plugin:rank-eval-client:${version}"
shadow "org.elasticsearch.plugin:lang-mustache-client:${version}"
compile project(':x-pack:protocol')
testCompile "org.elasticsearch.client:test:${version}" testCompile "org.elasticsearch.client:test:${version}"
testCompile "org.elasticsearch.test:framework:${version}" testCompile "org.elasticsearch.test:framework:${version}"
@ -64,3 +118,48 @@ forbiddenApisMain {
signaturesURLs += [PrecommitTasks.getResource('/forbidden/http-signatures.txt')] signaturesURLs += [PrecommitTasks.getResource('/forbidden/http-signatures.txt')]
signaturesURLs += [file('src/main/resources/forbidden/rest-high-level-signatures.txt').toURI().toURL()] signaturesURLs += [file('src/main/resources/forbidden/rest-high-level-signatures.txt').toURI().toURL()]
} }
shadowJar {
classifier = null
mergeServiceFiles()
}
// We don't need normal jar, we use shadow jar instead
jar.enabled = false
assemble.dependsOn shadowJar
javadoc {
/*
* Bundle all of the javadoc from all of the shaded projects into this one
* so we don't *have* to publish javadoc for all of the "client" jars.
*/
configurations.compile.dependencies.all { Dependency dep ->
Project p = dependencyToProject(dep)
if (p != null) {
evaluationDependsOn(p.path)
source += p.sourceSets.main.allJava
}
}
}
/*
* Use the jar for testing so we have tests of the bundled jar.
* Use the "shadow" configuration for testing because we need things
* in it.
*/
test {
classpath -= compileJava.outputs.files
classpath -= configurations.compile
classpath -= configurations.runtime
classpath += configurations.shadow
classpath += shadowJar.outputs.files
dependsOn shadowJar
}
integTestRunner {
classpath -= compileJava.outputs.files
classpath -= configurations.compile
classpath -= configurations.runtime
classpath += configurations.shadow
classpath += shadowJar.outputs.files
dependsOn shadowJar
}

View File

@ -66,8 +66,6 @@ import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.rankeval.RankEvalRequest; import org.elasticsearch.index.rankeval.RankEvalRequest;
import org.elasticsearch.index.rankeval.RankEvalResponse; import org.elasticsearch.index.rankeval.RankEvalResponse;
import org.elasticsearch.plugins.spi.NamedXContentProvider; import org.elasticsearch.plugins.spi.NamedXContentProvider;
import org.elasticsearch.protocol.xpack.XPackInfoRequest;
import org.elasticsearch.protocol.xpack.XPackInfoResponse;
import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.BytesRestResponse;
import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.script.mustache.MultiSearchTemplateRequest; import org.elasticsearch.script.mustache.MultiSearchTemplateRequest;
@ -204,6 +202,7 @@ public class RestHighLevelClient implements Closeable {
private final IngestClient ingestClient = new IngestClient(this); private final IngestClient ingestClient = new IngestClient(this);
private final SnapshotClient snapshotClient = new SnapshotClient(this); private final SnapshotClient snapshotClient = new SnapshotClient(this);
private final TasksClient tasksClient = new TasksClient(this); private final TasksClient tasksClient = new TasksClient(this);
private final XPackClient xPackClient = new XPackClient(this);
/** /**
* Creates a {@link RestHighLevelClient} given the low level {@link RestClientBuilder} that allows to build the * Creates a {@link RestHighLevelClient} given the low level {@link RestClientBuilder} that allows to build the
@ -294,6 +293,19 @@ public class RestHighLevelClient implements Closeable {
return tasksClient; return tasksClient;
} }
/**
* A wrapper for the {@link RestHighLevelClient} that provides methods for
* accessing the Elastic Licensed X-Pack APIs that are shipped with the
* default distribution of Elasticsearch. All of these APIs will 404 if run
* against the OSS distribution of Elasticsearch.
* <p>
* See the <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/xpack-api.html">
* X-Pack APIs on elastic.co</a> for more information.
*/
public final XPackClient xpack() {
return xPackClient;
}
/** /**
* Executes a bulk request using the Bulk API. * Executes a bulk request using the Bulk API.
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html">Bulk API on elastic.co</a> * See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html">Bulk API on elastic.co</a>
@ -794,34 +806,6 @@ public class RestHighLevelClient implements Closeable {
FieldCapabilitiesResponse::fromXContent, listener, emptySet()); FieldCapabilitiesResponse::fromXContent, listener, emptySet());
} }
/**
* Fetch information about X-Pack from the cluster if it is installed.
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/info-api.html">
* the docs</a> for more.
* @param request the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/
public XPackInfoResponse xPackInfo(XPackInfoRequest request, RequestOptions options) throws IOException {
return performRequestAndParseEntity(request, RequestConverters::xPackInfo, options,
XPackInfoResponse::fromXContent, emptySet());
}
/**
* Fetch information about X-Pack from the cluster if it is installed.
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/info-api.html">
* the docs</a> for more.
* @param request the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @param listener the listener to be notified upon request completion
*/
public void xPackInfoAsync(XPackInfoRequest request, RequestOptions options,
ActionListener<XPackInfoResponse> listener) {
performRequestAsyncAndParseEntity(request, RequestConverters::xPackInfo, options,
XPackInfoResponse::fromXContent, listener, emptySet());
}
protected final <Req extends ActionRequest, Resp> Resp performRequestAndParseEntity(Req request, protected final <Req extends ActionRequest, Resp> Resp performRequestAndParseEntity(Req request,
CheckedFunction<Req, Request, IOException> requestConverter, CheckedFunction<Req, Request, IOException> requestConverter,
RequestOptions options, RequestOptions options,

View File

@ -0,0 +1,73 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.protocol.xpack.XPackInfoRequest;
import org.elasticsearch.protocol.xpack.XPackInfoResponse;
import java.io.IOException;
import static java.util.Collections.emptySet;
/**
* A wrapper for the {@link RestHighLevelClient} that provides methods for
* accessing the Elastic Licensed X-Pack APIs that are shipped with the
* default distribution of Elasticsearch. All of these APIs will 404 if run
* against the OSS distribution of Elasticsearch.
* <p>
* See the <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/xpack-api.html">
* X-Pack APIs on elastic.co</a> for more information.
*/
public final class XPackClient {
private final RestHighLevelClient restHighLevelClient;
XPackClient(RestHighLevelClient restHighLevelClient) {
this.restHighLevelClient = restHighLevelClient;
}
/**
* Fetch information about X-Pack from the cluster.
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/info-api.html">
* the docs</a> for more.
* @param request the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/
public XPackInfoResponse info(XPackInfoRequest request, RequestOptions options) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(request, RequestConverters::xPackInfo, options,
XPackInfoResponse::fromXContent, emptySet());
}
/**
* Asynchronously fetch information about X-Pack from the cluster.
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/info-api.html">
* the docs</a> for more.
* @param request the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @param listener the listener to be notified upon request completion
*/
public void infoAsync(XPackInfoRequest request, RequestOptions options,
ActionListener<XPackInfoResponse> listener) {
restHighLevelClient.performRequestAsyncAndParseEntity(request, RequestConverters::xPackInfo, options,
XPackInfoResponse::fromXContent, listener, emptySet());
}
}

View File

@ -21,10 +21,10 @@ package org.elasticsearch.client;
import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpGet;
import org.elasticsearch.action.main.MainResponse; import org.elasticsearch.action.main.MainResponse;
import org.elasticsearch.protocol.license.LicenseStatus;
import org.elasticsearch.protocol.xpack.XPackInfoRequest; import org.elasticsearch.protocol.xpack.XPackInfoRequest;
import org.elasticsearch.protocol.xpack.XPackInfoResponse; import org.elasticsearch.protocol.xpack.XPackInfoResponse;
import org.elasticsearch.protocol.xpack.XPackInfoResponse.FeatureSetsInfo.FeatureSet; import org.elasticsearch.protocol.xpack.XPackInfoResponse.FeatureSetsInfo.FeatureSet;
import org.elasticsearch.protocol.xpack.license.LicenseStatus;
import java.io.IOException; import java.io.IOException;
import java.util.EnumSet; import java.util.EnumSet;
@ -60,7 +60,7 @@ public class PingAndInfoIT extends ESRestHighLevelClientTestCase {
XPackInfoRequest request = new XPackInfoRequest(); XPackInfoRequest request = new XPackInfoRequest();
request.setCategories(EnumSet.allOf(XPackInfoRequest.Category.class)); request.setCategories(EnumSet.allOf(XPackInfoRequest.Category.class));
request.setVerbose(true); request.setVerbose(true);
XPackInfoResponse info = highLevelClient().xPackInfo(request, RequestOptions.DEFAULT); XPackInfoResponse info = highLevelClient().xpack().info(request, RequestOptions.DEFAULT);
MainResponse mainResponse = highLevelClient().info(RequestOptions.DEFAULT); MainResponse mainResponse = highLevelClient().info(RequestOptions.DEFAULT);
@ -89,7 +89,7 @@ public class PingAndInfoIT extends ESRestHighLevelClientTestCase {
} }
public void testXPackInfoEmptyRequest() throws IOException { public void testXPackInfoEmptyRequest() throws IOException {
XPackInfoResponse info = highLevelClient().xPackInfo(new XPackInfoRequest(), RequestOptions.DEFAULT); XPackInfoResponse info = highLevelClient().xpack().info(new XPackInfoRequest(), RequestOptions.DEFAULT);
/* /*
* The default in the transport client is non-verbose and returning * The default in the transport client is non-verbose and returning

View File

@ -86,7 +86,7 @@ public class MiscellaneousDocumentationIT extends ESRestHighLevelClientTestCase
XPackInfoRequest.Category.BUILD, XPackInfoRequest.Category.BUILD,
XPackInfoRequest.Category.LICENSE, XPackInfoRequest.Category.LICENSE,
XPackInfoRequest.Category.FEATURES)); XPackInfoRequest.Category.FEATURES));
XPackInfoResponse response = client.xPackInfo(request, RequestOptions.DEFAULT); XPackInfoResponse response = client.xpack().info(request, RequestOptions.DEFAULT);
//end::x-pack-info-execute //end::x-pack-info-execute
//tag::x-pack-info-response //tag::x-pack-info-response
@ -122,7 +122,7 @@ public class MiscellaneousDocumentationIT extends ESRestHighLevelClientTestCase
listener = new LatchedActionListener<>(listener, latch); listener = new LatchedActionListener<>(listener, latch);
// tag::x-pack-info-execute-async // tag::x-pack-info-execute-async
client.xPackInfoAsync(request, RequestOptions.DEFAULT, listener); // <1> client.xpack().infoAsync(request, RequestOptions.DEFAULT, listener); // <1>
// end::x-pack-info-execute-async // end::x-pack-info-execute-async
assertTrue(latch.await(30L, TimeUnit.SECONDS)); assertTrue(latch.await(30L, TimeUnit.SECONDS));

View File

@ -50,7 +50,49 @@ PUT /test_index
The above configures a `search_synonyms` filter, with a path of The above configures a `search_synonyms` filter, with a path of
`analysis/synonym.txt` (relative to the `config` location). The `analysis/synonym.txt` (relative to the `config` location). The
`search_synonyms` analyzer is then configured with the filter. `search_synonyms` analyzer is then configured with the filter.
Additional settings are: `expand` (defaults to `true`).
Additional settings are:
* `expand` (defaults to `true`).
* `lenient` (defaults to `false`). If `true` ignores exceptions while parsing the synonym configuration. It is important
to note that only those synonym rules which cannot get parsed are ignored. For instance consider the following request:
[source,js]
--------------------------------------------------
PUT /test_index
{
"settings": {
"index" : {
"analysis" : {
"analyzer" : {
"synonym" : {
"tokenizer" : "standard",
"filter" : ["my_stop", "synonym_graph"]
}
},
"filter" : {
"my_stop": {
"type" : "stop",
"stopwords": ["bar"]
},
"synonym_graph" : {
"type" : "synonym_graph",
"lenient": true,
"synonyms" : ["foo, bar => baz"]
}
}
}
}
}
}
--------------------------------------------------
// CONSOLE
With the above request the word `bar` gets skipped but a mapping `foo => baz` is still added. However, if the mapping
being added was "foo, baz => bar" nothing would get added to the synonym list. This is because the target word for the
mapping is itself eliminated because it was a stop word. Similarly, if the mapping was "bar, foo, baz" and `expand` was
set to `false` no mapping would get added as when `expand=false` the target mapping is the first word. However, if
`expand=true` then the mappings added would be equivalent to `foo, baz => foo, baz` i.e, all mappings other than the
stop word.
[float] [float]
==== `tokenizer` and `ignore_case` are deprecated ==== `tokenizer` and `ignore_case` are deprecated

View File

@ -33,12 +33,55 @@ PUT /test_index
The above configures a `synonym` filter, with a path of The above configures a `synonym` filter, with a path of
`analysis/synonym.txt` (relative to the `config` location). The `analysis/synonym.txt` (relative to the `config` location). The
`synonym` analyzer is then configured with the filter. Additional `synonym` analyzer is then configured with the filter.
settings is: `expand` (defaults to `true`).
This filter tokenize synonyms with whatever tokenizer and token filters This filter tokenize synonyms with whatever tokenizer and token filters
appear before it in the chain. appear before it in the chain.
Additional settings are:
* `expand` (defaults to `true`).
* `lenient` (defaults to `false`). If `true` ignores exceptions while parsing the synonym configuration. It is important
to note that only those synonym rules which cannot get parsed are ignored. For instance consider the following request:
[source,js]
--------------------------------------------------
PUT /test_index
{
"settings": {
"index" : {
"analysis" : {
"analyzer" : {
"synonym" : {
"tokenizer" : "standard",
"filter" : ["my_stop", "synonym"]
}
},
"filter" : {
"my_stop": {
"type" : "stop",
"stopwords": ["bar"]
},
"synonym" : {
"type" : "synonym",
"lenient": true,
"synonyms" : ["foo, bar => baz"]
}
}
}
}
}
}
--------------------------------------------------
// CONSOLE
With the above request the word `bar` gets skipped but a mapping `foo => baz` is still added. However, if the mapping
being added was "foo, baz => bar" nothing would get added to the synonym list. This is because the target word for the
mapping is itself eliminated because it was a stop word. Similarly, if the mapping was "bar, foo, baz" and `expand` was
set to `false` no mapping would get added as when `expand=false` the target mapping is the first word. However, if
`expand=true` then the mappings added would be equivalent to `foo, baz => foo, baz` i.e, all mappings other than the
stop word.
[float] [float]
==== `tokenizer` and `ignore_case` are deprecated ==== `tokenizer` and `ignore_case` are deprecated

View File

@ -129,7 +129,7 @@ public class GrokProcessorTests extends ESTestCase {
public void testMissingFieldWithIgnoreMissing() throws Exception { public void testMissingFieldWithIgnoreMissing() throws Exception {
String fieldName = "foo.bar"; String fieldName = "foo.bar";
IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>());
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); IngestDocument ingestDocument = new IngestDocument(originalIngestDocument);
GrokProcessor processor = new GrokProcessor(randomAlphaOfLength(10), Collections.singletonMap("ONE", "1"), GrokProcessor processor = new GrokProcessor(randomAlphaOfLength(10), Collections.singletonMap("ONE", "1"),
Collections.singletonList("%{ONE:one}"), fieldName, false, true, ThreadWatchdog.noop()); Collections.singletonList("%{ONE:one}"), fieldName, false, true, ThreadWatchdog.noop());
processor.execute(ingestDocument); processor.execute(ingestDocument);

View File

@ -33,7 +33,6 @@ import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import static org.elasticsearch.ingest.IngestDocumentMatcher.assertIngestDocument;
import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
@ -55,7 +54,7 @@ public class JsonProcessorTests extends ESTestCase {
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
jsonProcessor.execute(ingestDocument); jsonProcessor.execute(ingestDocument);
Map<String, Object> jsonified = ingestDocument.getFieldValue(randomTargetField, Map.class); Map<String, Object> jsonified = ingestDocument.getFieldValue(randomTargetField, Map.class);
assertIngestDocument(ingestDocument.getFieldValue(randomTargetField, Object.class), jsonified); assertEquals(ingestDocument.getFieldValue(randomTargetField, Object.class), jsonified);
} }
public void testInvalidValue() { public void testInvalidValue() {
@ -161,13 +160,10 @@ public class JsonProcessorTests extends ESTestCase {
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
jsonProcessor.execute(ingestDocument); jsonProcessor.execute(ingestDocument);
Map<String, Object> expected = new HashMap<>(); Map<String, Object> sourceAndMetadata = ingestDocument.getSourceAndMetadata();
expected.put("a", 1); assertEquals(1, sourceAndMetadata.get("a"));
expected.put("b", 2); assertEquals(2, sourceAndMetadata.get("b"));
expected.put("c", "see"); assertEquals("see", sourceAndMetadata.get("c"));
IngestDocument expectedIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), expected);
assertIngestDocument(ingestDocument, expectedIngestDocument);
} }
public void testAddBoolToRoot() { public void testAddBoolToRoot() {

View File

@ -21,5 +21,5 @@ apply plugin: 'elasticsearch.rest-test'
apply plugin: 'elasticsearch.test-with-dependencies' apply plugin: 'elasticsearch.test-with-dependencies'
dependencies { dependencies {
testCompile project(path: ':client:rest-high-level', configuration: 'runtime') testCompile project(path: ':client:rest-high-level', configuration: 'shadow')
} }

View File

@ -31,6 +31,12 @@ dependencies {
compile "org.hamcrest:hamcrest-core:${versions.hamcrest}" compile "org.hamcrest:hamcrest-core:${versions.hamcrest}"
compile "org.hamcrest:hamcrest-library:${versions.hamcrest}" compile "org.hamcrest:hamcrest-library:${versions.hamcrest}"
compile "org.apache.httpcomponents:httpcore:${versions.httpcore}"
compile "org.apache.httpcomponents:httpclient:${versions.httpclient}"
compile "org.apache.httpcomponents:fluent-hc:${versions.httpclient}"
compile "commons-codec:commons-codec:${versions.commonscodec}"
compile "commons-logging:commons-logging:${versions.commonslogging}"
compile project(':libs:core') compile project(':libs:core')
// pulls in the jar built by this project and its dependencies // pulls in the jar built by this project and its dependencies
@ -73,3 +79,17 @@ tasks.test.enabled = false
// this project doesn't get published // this project doesn't get published
tasks.dependencyLicenses.enabled = false tasks.dependencyLicenses.enabled = false
tasks.dependenciesInfo.enabled = false tasks.dependenciesInfo.enabled = false
tasks.thirdPartyAudit.excludes = [
//commons-logging optional dependencies
'org.apache.avalon.framework.logger.Logger',
'org.apache.log.Hierarchy',
'org.apache.log.Logger',
'org.apache.log4j.Category',
'org.apache.log4j.Level',
'org.apache.log4j.Logger',
'org.apache.log4j.Priority',
//commons-logging provided dependencies
'javax.servlet.ServletContextEvent',
'javax.servlet.ServletContextListener'
]

View File

@ -19,6 +19,12 @@
package org.elasticsearch.packaging.test; package org.elasticsearch.packaging.test;
import org.apache.http.client.fluent.Request;
import org.elasticsearch.packaging.util.Archives;
import org.elasticsearch.packaging.util.Platforms;
import org.elasticsearch.packaging.util.ServerUtils;
import org.elasticsearch.packaging.util.Shell;
import org.elasticsearch.packaging.util.Shell.Result;
import org.junit.Before; import org.junit.Before;
import org.junit.BeforeClass; import org.junit.BeforeClass;
import org.junit.FixMethodOrder; import org.junit.FixMethodOrder;
@ -28,9 +34,33 @@ import org.junit.runners.MethodSorters;
import org.elasticsearch.packaging.util.Distribution; import org.elasticsearch.packaging.util.Distribution;
import org.elasticsearch.packaging.util.Installation; import org.elasticsearch.packaging.util.Installation;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Arrays;
import java.util.stream.Stream;
import static java.util.stream.Collectors.joining;
import static org.elasticsearch.packaging.util.Archives.ARCHIVE_OWNER;
import static org.elasticsearch.packaging.util.Cleanup.cleanEverything; import static org.elasticsearch.packaging.util.Cleanup.cleanEverything;
import static org.elasticsearch.packaging.util.Archives.installArchive; import static org.elasticsearch.packaging.util.Archives.installArchive;
import static org.elasticsearch.packaging.util.Archives.verifyArchiveInstallation; import static org.elasticsearch.packaging.util.Archives.verifyArchiveInstallation;
import static org.elasticsearch.packaging.util.FileMatcher.Fileness.File;
import static org.elasticsearch.packaging.util.FileMatcher.file;
import static org.elasticsearch.packaging.util.FileMatcher.p660;
import static org.elasticsearch.packaging.util.FileUtils.append;
import static org.elasticsearch.packaging.util.FileUtils.cp;
import static org.elasticsearch.packaging.util.FileUtils.getTempDir;
import static org.elasticsearch.packaging.util.FileUtils.mkdir;
import static org.elasticsearch.packaging.util.FileUtils.rm;
import static org.elasticsearch.packaging.util.ServerUtils.makeRequest;
import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.notNullValue;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.isEmptyString;
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeThat;
import static org.junit.Assume.assumeTrue; import static org.junit.Assume.assumeTrue;
/** /**
@ -61,4 +91,226 @@ public abstract class ArchiveTestCase {
installation = installArchive(distribution()); installation = installArchive(distribution());
verifyArchiveInstallation(installation, distribution()); verifyArchiveInstallation(installation, distribution());
} }
@Test
public void test20PluginsListWithNoPlugins() {
assumeThat(installation, is(notNullValue()));
final Installation.Executables bin = installation.executables();
final Shell sh = new Shell();
final Result r = sh.run(bin.elasticsearchPlugin + " list");
assertThat(r.stdout, isEmptyString());
}
@Test
public void test30AbortWhenJavaMissing() {
assumeThat(installation, is(notNullValue()));
final Installation.Executables bin = installation.executables();
final Shell sh = new Shell();
Platforms.onWindows(() -> {
// on windows, removing java from PATH and removing JAVA_HOME is less involved than changing the permissions of the java
// executable. we also don't check permissions in the windows scripts anyway
final String originalPath = sh.run("$Env:PATH").stdout.trim();
final String newPath = Arrays.stream(originalPath.split(";"))
.filter(path -> path.contains("Java") == false)
.collect(joining(";"));
// note the lack of a $ when clearing the JAVA_HOME env variable - with a $ it deletes the java home directory
// https://docs.microsoft.com/en-us/powershell/module/microsoft.powershell.core/providers/environment-provider?view=powershell-6
//
// this won't persist to another session so we don't have to reset anything
final Result runResult = sh.runIgnoreExitCode(
"$Env:PATH = '" + newPath + "'; " +
"Remove-Item Env:JAVA_HOME; " +
bin.elasticsearch
);
assertThat(runResult.exitCode, is(1));
assertThat(runResult.stderr, containsString("could not find java; set JAVA_HOME or ensure java is in PATH"));
});
Platforms.onLinux(() -> {
final String javaPath = sh.run("which java").stdout.trim();
try {
sh.run("chmod -x '" + javaPath + "'");
final Result runResult = sh.runIgnoreExitCode(bin.elasticsearch.toString());
assertThat(runResult.exitCode, is(1));
assertThat(runResult.stdout, containsString("could not find java; set JAVA_HOME or ensure java is in PATH"));
} finally {
sh.run("chmod +x '" + javaPath + "'");
}
});
}
@Test
public void test40CreateKeystoreManually() {
assumeThat(installation, is(notNullValue()));
final Installation.Executables bin = installation.executables();
final Shell sh = new Shell();
Platforms.onLinux(() -> sh.run("sudo -u " + ARCHIVE_OWNER + " " + bin.elasticsearchKeystore + " create"));
// this is a hack around the fact that we can't run a command in the same session as the same user but not as administrator.
// the keystore ends up being owned by the Administrators group, so we manually set it to be owned by the vagrant user here.
// from the server's perspective the permissions aren't really different, this is just to reflect what we'd expect in the tests.
// when we run these commands as a role user we won't have to do this
Platforms.onWindows(() -> sh.run(
bin.elasticsearchKeystore + " create; " +
"$account = New-Object System.Security.Principal.NTAccount 'vagrant'; " +
"$acl = Get-Acl '" + installation.config("elasticsearch.keystore") + "'; " +
"$acl.SetOwner($account); " +
"Set-Acl '" + installation.config("elasticsearch.keystore") + "' $acl"
));
assertThat(installation.config("elasticsearch.keystore"), file(File, ARCHIVE_OWNER, ARCHIVE_OWNER, p660));
Platforms.onLinux(() -> {
final Result r = sh.run("sudo -u " + ARCHIVE_OWNER + " " + bin.elasticsearchKeystore + " list");
assertThat(r.stdout, containsString("keystore.seed"));
});
Platforms.onWindows(() -> {
final Result r = sh.run(bin.elasticsearchKeystore + " list");
assertThat(r.stdout, containsString("keystore.seed"));
});
}
@Test
public void test50StartAndStop() throws IOException {
assumeThat(installation, is(notNullValue()));
// cleanup from previous test
rm(installation.config("elasticsearch.keystore"));
Archives.runElasticsearch(installation);
final String gcLogName = Platforms.LINUX
? "gc.log.0.current"
: "gc.log";
assertTrue("gc logs exist", Files.exists(installation.logs.resolve(gcLogName)));
ServerUtils.runElasticsearchTests();
Archives.stopElasticsearch(installation);
}
@Test
public void test60AutoCreateKeystore() {
assumeThat(installation, is(notNullValue()));
assertThat(installation.config("elasticsearch.keystore"), file(File, ARCHIVE_OWNER, ARCHIVE_OWNER, p660));
final Installation.Executables bin = installation.executables();
final Shell sh = new Shell();
Platforms.onLinux(() -> {
final Result result = sh.run("sudo -u " + ARCHIVE_OWNER + " " + bin.elasticsearchKeystore + " list");
assertThat(result.stdout, containsString("keystore.seed"));
});
Platforms.onWindows(() -> {
final Result result = sh.run(bin.elasticsearchKeystore + " list");
assertThat(result.stdout, containsString("keystore.seed"));
});
}
@Test
public void test70CustomPathConfAndJvmOptions() throws IOException {
assumeThat(installation, is(notNullValue()));
final Path tempConf = getTempDir().resolve("esconf-alternate");
try {
mkdir(tempConf);
cp(installation.config("elasticsearch.yml"), tempConf.resolve("elasticsearch.yml"));
cp(installation.config("log4j2.properties"), tempConf.resolve("log4j2.properties"));
// we have to disable Log4j from using JMX lest it will hit a security
// manager exception before we have configured logging; this will fail
// startup since we detect usages of logging before it is configured
final String jvmOptions =
"-Xms512m\n" +
"-Xmx512m\n" +
"-Dlog4j2.disable.jmx=true\n";
append(tempConf.resolve("jvm.options"), jvmOptions);
final Shell sh = new Shell();
Platforms.onLinux(() -> sh.run("chown -R elasticsearch:elasticsearch " + tempConf));
Platforms.onWindows(() -> sh.run(
"$account = New-Object System.Security.Principal.NTAccount 'vagrant'; " +
"$tempConf = Get-ChildItem '" + tempConf + "' -Recurse; " +
"$tempConf += Get-Item '" + tempConf + "'; " +
"$tempConf | ForEach-Object { " +
"$acl = Get-Acl $_.FullName; " +
"$acl.SetOwner($account); " +
"Set-Acl $_.FullName $acl " +
"}"
));
final Shell serverShell = new Shell();
serverShell.getEnv().put("ES_PATH_CONF", tempConf.toString());
serverShell.getEnv().put("ES_JAVA_OPTS", "-XX:-UseCompressedOops");
Archives.runElasticsearch(installation, serverShell);
final String nodesResponse = makeRequest(Request.Get("http://localhost:9200/_nodes"));
assertThat(nodesResponse, containsString("\"heap_init_in_bytes\":536870912"));
assertThat(nodesResponse, containsString("\"using_compressed_ordinary_object_pointers\":\"false\""));
Archives.stopElasticsearch(installation);
} finally {
rm(tempConf);
}
}
@Test
public void test80RelativePathConf() throws IOException {
assumeThat(installation, is(notNullValue()));
final Path temp = getTempDir().resolve("esconf-alternate");
final Path tempConf = temp.resolve("config");
try {
mkdir(tempConf);
Stream.of(
"elasticsearch.yml",
"log4j2.properties",
"jvm.options"
).forEach(file -> cp(installation.config(file), tempConf.resolve(file)));
append(tempConf.resolve("elasticsearch.yml"), "node.name: relative");
final Shell sh = new Shell();
Platforms.onLinux(() -> sh.run("chown -R elasticsearch:elasticsearch " + temp));
Platforms.onWindows(() -> sh.run(
"$account = New-Object System.Security.Principal.NTAccount 'vagrant'; " +
"$tempConf = Get-ChildItem '" + temp + "' -Recurse; " +
"$tempConf += Get-Item '" + temp + "'; " +
"$tempConf | ForEach-Object { " +
"$acl = Get-Acl $_.FullName; " +
"$acl.SetOwner($account); " +
"Set-Acl $_.FullName $acl " +
"}"
));
final Shell serverShell = new Shell(temp);
serverShell.getEnv().put("ES_PATH_CONF", "config");
Archives.runElasticsearch(installation, serverShell);
final String nodesResponse = makeRequest(Request.Get("http://localhost:9200/_nodes"));
assertThat(nodesResponse, containsString("\"name\":\"relative\""));
Archives.stopElasticsearch(installation);
} finally {
rm(tempConf);
}
}
} }

View File

@ -19,11 +19,14 @@
package org.elasticsearch.packaging.util; package org.elasticsearch.packaging.util;
import java.io.IOException;
import java.nio.file.Files; import java.nio.file.Files;
import java.nio.file.Path; import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.List; import java.util.List;
import java.util.stream.Stream; import java.util.stream.Stream;
import static java.util.stream.Collectors.joining;
import static org.elasticsearch.packaging.util.FileMatcher.Fileness.Directory; import static org.elasticsearch.packaging.util.FileMatcher.Fileness.Directory;
import static org.elasticsearch.packaging.util.FileMatcher.Fileness.File; import static org.elasticsearch.packaging.util.FileMatcher.Fileness.File;
import static org.elasticsearch.packaging.util.FileMatcher.file; import static org.elasticsearch.packaging.util.FileMatcher.file;
@ -36,17 +39,26 @@ import static org.elasticsearch.packaging.util.FileUtils.getPackagingArchivesDir
import static org.elasticsearch.packaging.util.FileUtils.lsGlob; import static org.elasticsearch.packaging.util.FileUtils.lsGlob;
import static org.elasticsearch.packaging.util.FileUtils.mv; import static org.elasticsearch.packaging.util.FileUtils.mv;
import static org.elasticsearch.packaging.util.FileUtils.slurp;
import static org.elasticsearch.packaging.util.Platforms.isDPKG; import static org.elasticsearch.packaging.util.Platforms.isDPKG;
import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.isEmptyOrNullString;
import static org.hamcrest.core.Is.is; import static org.hamcrest.core.Is.is;
import static org.hamcrest.collection.IsEmptyCollection.empty; import static org.hamcrest.collection.IsEmptyCollection.empty;
import static org.hamcrest.collection.IsCollectionWithSize.hasSize; import static org.hamcrest.collection.IsCollectionWithSize.hasSize;
import static org.hamcrest.core.IsNot.not;
import static org.junit.Assert.assertTrue;
/** /**
* Installation and verification logic for archive distributions * Installation and verification logic for archive distributions
*/ */
public class Archives { public class Archives {
// in the future we'll run as a role user on Windows
public static final String ARCHIVE_OWNER = Platforms.WINDOWS
? "vagrant"
: "elasticsearch";
public static Installation installArchive(Distribution distribution) { public static Installation installArchive(Distribution distribution) {
return installArchive(distribution, getDefaultArchiveInstallPath(), getCurrentVersion()); return installArchive(distribution, getDefaultArchiveInstallPath(), getCurrentVersion());
} }
@ -63,22 +75,20 @@ public class Archives {
if (distribution.packaging == Distribution.Packaging.TAR) { if (distribution.packaging == Distribution.Packaging.TAR) {
if (Platforms.LINUX) { Platforms.onLinux(() -> sh.run("tar -C " + baseInstallPath + " -xzpf " + distributionFile));
sh.bash("tar -C " + baseInstallPath + " -xzpf " + distributionFile);
} else { if (Platforms.WINDOWS) {
throw new RuntimeException("Distribution " + distribution + " is not supported on windows"); throw new RuntimeException("Distribution " + distribution + " is not supported on windows");
} }
} else if (distribution.packaging == Distribution.Packaging.ZIP) { } else if (distribution.packaging == Distribution.Packaging.ZIP) {
if (Platforms.LINUX) { Platforms.onLinux(() -> sh.run("unzip " + distributionFile + " -d " + baseInstallPath));
sh.bash("unzip " + distributionFile + " -d " + baseInstallPath);
} else { Platforms.onWindows(() -> sh.run(
sh.powershell(
"Add-Type -AssemblyName 'System.IO.Compression.Filesystem'; " + "Add-Type -AssemblyName 'System.IO.Compression.Filesystem'; " +
"[IO.Compression.ZipFile]::ExtractToDirectory('" + distributionFile + "', '" + baseInstallPath + "')" "[IO.Compression.ZipFile]::ExtractToDirectory('" + distributionFile + "', '" + baseInstallPath + "')"
); ));
}
} else { } else {
throw new RuntimeException("Distribution " + distribution + " is not a known archive type"); throw new RuntimeException("Distribution " + distribution + " is not a known archive type");
@ -93,9 +103,8 @@ public class Archives {
assertThat("only the intended installation exists", installations, hasSize(1)); assertThat("only the intended installation exists", installations, hasSize(1));
assertThat("only the intended installation exists", installations.get(0), is(fullInstallPath)); assertThat("only the intended installation exists", installations.get(0), is(fullInstallPath));
if (Platforms.LINUX) { Platforms.onLinux(() -> setupArchiveUsersLinux(fullInstallPath));
setupArchiveUsersLinux(fullInstallPath); Platforms.onWindows(() -> setupArchiveUsersWindows(fullInstallPath));
}
return new Installation(fullInstallPath); return new Installation(fullInstallPath);
} }
@ -103,17 +112,17 @@ public class Archives {
private static void setupArchiveUsersLinux(Path installPath) { private static void setupArchiveUsersLinux(Path installPath) {
final Shell sh = new Shell(); final Shell sh = new Shell();
if (sh.bashIgnoreExitCode("getent group elasticsearch").isSuccess() == false) { if (sh.runIgnoreExitCode("getent group elasticsearch").isSuccess() == false) {
if (isDPKG()) { if (isDPKG()) {
sh.bash("addgroup --system elasticsearch"); sh.run("addgroup --system elasticsearch");
} else { } else {
sh.bash("groupadd -r elasticsearch"); sh.run("groupadd -r elasticsearch");
} }
} }
if (sh.bashIgnoreExitCode("id elasticsearch").isSuccess() == false) { if (sh.runIgnoreExitCode("id elasticsearch").isSuccess() == false) {
if (isDPKG()) { if (isDPKG()) {
sh.bash("adduser " + sh.run("adduser " +
"--quiet " + "--quiet " +
"--system " + "--system " +
"--no-create-home " + "--no-create-home " +
@ -122,7 +131,7 @@ public class Archives {
"--shell /bin/false " + "--shell /bin/false " +
"elasticsearch"); "elasticsearch");
} else { } else {
sh.bash("useradd " + sh.run("useradd " +
"--system " + "--system " +
"-M " + "-M " +
"--gid elasticsearch " + "--gid elasticsearch " +
@ -131,20 +140,29 @@ public class Archives {
"elasticsearch"); "elasticsearch");
} }
} }
sh.bash("chown -R elasticsearch:elasticsearch " + installPath); sh.run("chown -R elasticsearch:elasticsearch " + installPath);
}
private static void setupArchiveUsersWindows(Path installPath) {
// we want the installation to be owned as the vagrant user rather than the Administrators group
final Shell sh = new Shell();
sh.run(
"$account = New-Object System.Security.Principal.NTAccount 'vagrant'; " +
"$install = Get-ChildItem -Path '" + installPath + "' -Recurse; " +
"$install += Get-Item -Path '" + installPath + "'; " +
"$install | ForEach-Object { " +
"$acl = Get-Acl $_.FullName; " +
"$acl.SetOwner($account); " +
"Set-Acl $_.FullName $acl " +
"}"
);
} }
public static void verifyArchiveInstallation(Installation installation, Distribution distribution) { public static void verifyArchiveInstallation(Installation installation, Distribution distribution) {
// on Windows for now we leave the installation owned by the vagrant user that the tests run as. Since the vagrant account verifyOssInstallation(installation, distribution, ARCHIVE_OWNER);
// is a local administrator, the files really end up being owned by the local administrators group. In the future we'll
// install and run elasticesearch with a role user on Windows
final String owner = Platforms.WINDOWS
? "BUILTIN\\Administrators"
: "elasticsearch";
verifyOssInstallation(installation, distribution, owner);
if (distribution.flavor == Distribution.Flavor.DEFAULT) { if (distribution.flavor == Distribution.Flavor.DEFAULT) {
verifyDefaultInstallation(installation, distribution, owner); verifyDefaultInstallation(installation, distribution, ARCHIVE_OWNER);
} }
} }
@ -160,38 +178,38 @@ public class Archives {
assertThat(Files.exists(es.data), is(false)); assertThat(Files.exists(es.data), is(false));
assertThat(Files.exists(es.scripts), is(false)); assertThat(Files.exists(es.scripts), is(false));
assertThat(es.home.resolve("bin"), file(Directory, owner, owner, p755)); assertThat(es.bin, file(Directory, owner, owner, p755));
assertThat(es.home.resolve("lib"), file(Directory, owner, owner, p755)); assertThat(es.lib, file(Directory, owner, owner, p755));
assertThat(Files.exists(es.config.resolve("elasticsearch.keystore")), is(false)); assertThat(Files.exists(es.config("elasticsearch.keystore")), is(false));
Stream.of( Stream.of(
"bin/elasticsearch", "elasticsearch",
"bin/elasticsearch-env", "elasticsearch-env",
"bin/elasticsearch-keystore", "elasticsearch-keystore",
"bin/elasticsearch-plugin", "elasticsearch-plugin",
"bin/elasticsearch-translog" "elasticsearch-translog"
).forEach(executable -> { ).forEach(executable -> {
assertThat(es.home.resolve(executable), file(File, owner, owner, p755)); assertThat(es.bin(executable), file(File, owner, owner, p755));
if (distribution.packaging == Distribution.Packaging.ZIP) { if (distribution.packaging == Distribution.Packaging.ZIP) {
assertThat(es.home.resolve(executable + ".bat"), file(File, owner)); assertThat(es.bin(executable + ".bat"), file(File, owner));
} }
}); });
if (distribution.packaging == Distribution.Packaging.ZIP) { if (distribution.packaging == Distribution.Packaging.ZIP) {
Stream.of( Stream.of(
"bin/elasticsearch-service.bat", "elasticsearch-service.bat",
"bin/elasticsearch-service-mgr.exe", "elasticsearch-service-mgr.exe",
"bin/elasticsearch-service-x64.exe" "elasticsearch-service-x64.exe"
).forEach(executable -> assertThat(es.home.resolve(executable), file(File, owner))); ).forEach(executable -> assertThat(es.bin(executable), file(File, owner)));
} }
Stream.of( Stream.of(
"elasticsearch.yml", "elasticsearch.yml",
"jvm.options", "jvm.options",
"log4j2.properties" "log4j2.properties"
).forEach(config -> assertThat(es.config.resolve(config), file(File, owner, owner, p660))); ).forEach(config -> assertThat(es.config(config), file(File, owner, owner, p660)));
Stream.of( Stream.of(
"NOTICE.txt", "NOTICE.txt",
@ -203,30 +221,30 @@ public class Archives {
private static void verifyDefaultInstallation(Installation es, Distribution distribution, String owner) { private static void verifyDefaultInstallation(Installation es, Distribution distribution, String owner) {
Stream.of( Stream.of(
"bin/elasticsearch-certgen", "elasticsearch-certgen",
"bin/elasticsearch-certutil", "elasticsearch-certutil",
"bin/elasticsearch-croneval", "elasticsearch-croneval",
"bin/elasticsearch-migrate", "elasticsearch-migrate",
"bin/elasticsearch-saml-metadata", "elasticsearch-saml-metadata",
"bin/elasticsearch-setup-passwords", "elasticsearch-setup-passwords",
"bin/elasticsearch-sql-cli", "elasticsearch-sql-cli",
"bin/elasticsearch-syskeygen", "elasticsearch-syskeygen",
"bin/elasticsearch-users", "elasticsearch-users",
"bin/x-pack-env", "x-pack-env",
"bin/x-pack-security-env", "x-pack-security-env",
"bin/x-pack-watcher-env" "x-pack-watcher-env"
).forEach(executable -> { ).forEach(executable -> {
assertThat(es.home.resolve(executable), file(File, owner, owner, p755)); assertThat(es.bin(executable), file(File, owner, owner, p755));
if (distribution.packaging == Distribution.Packaging.ZIP) { if (distribution.packaging == Distribution.Packaging.ZIP) {
assertThat(es.home.resolve(executable + ".bat"), file(File, owner)); assertThat(es.bin(executable + ".bat"), file(File, owner));
} }
}); });
// at this time we only install the current version of archive distributions, but if that changes we'll need to pass // at this time we only install the current version of archive distributions, but if that changes we'll need to pass
// the version through here // the version through here
assertThat(es.home.resolve("bin/elasticsearch-sql-cli-" + getCurrentVersion() + ".jar"), file(File, owner, owner, p755)); assertThat(es.bin("elasticsearch-sql-cli-" + getCurrentVersion() + ".jar"), file(File, owner, owner, p755));
Stream.of( Stream.of(
"users", "users",
@ -234,7 +252,72 @@ public class Archives {
"roles.yml", "roles.yml",
"role_mapping.yml", "role_mapping.yml",
"log4j2.properties" "log4j2.properties"
).forEach(config -> assertThat(es.config.resolve(config), file(File, owner, owner, p660))); ).forEach(config -> assertThat(es.config(config), file(File, owner, owner, p660)));
}
public static void runElasticsearch(Installation installation) throws IOException {
runElasticsearch(installation, new Shell());
}
public static void runElasticsearch(Installation installation, Shell sh) throws IOException {
final Path pidFile = installation.home.resolve("elasticsearch.pid");
final Installation.Executables bin = installation.executables();
Platforms.onLinux(() -> {
// If jayatana is installed then we try to use it. Elasticsearch should ignore it even when we try.
// If it doesn't ignore it then Elasticsearch will fail to start because of security errors.
// This line is attempting to emulate the on login behavior of /usr/share/upstart/sessions/jayatana.conf
if (Files.exists(Paths.get("/usr/share/java/jayatanaag.jar"))) {
sh.getEnv().put("JAVA_TOOL_OPTIONS", "-javaagent:/usr/share/java/jayatanaag.jar");
}
sh.run("sudo -E -u " + ARCHIVE_OWNER + " " +
bin.elasticsearch + " -d -p " + installation.home.resolve("elasticsearch.pid"));
});
Platforms.onWindows(() -> {
// this starts the server in the background. the -d flag is unsupported on windows
// these tests run as Administrator. we don't want to run the server as Administrator, so we provide the current user's
// username and password to the process which has the effect of starting it not as Administrator.
sh.run(
"$password = ConvertTo-SecureString 'vagrant' -AsPlainText -Force; " +
"$processInfo = New-Object System.Diagnostics.ProcessStartInfo; " +
"$processInfo.FileName = '" + bin.elasticsearch + "'; " +
"$processInfo.Arguments = '-p " + installation.home.resolve("elasticsearch.pid") + "'; " +
"$processInfo.Username = 'vagrant'; " +
"$processInfo.Password = $password; " +
"$processInfo.RedirectStandardOutput = $true; " +
"$processInfo.RedirectStandardError = $true; " +
sh.env.entrySet().stream()
.map(entry -> "$processInfo.Environment.Add('" + entry.getKey() + "', '" + entry.getValue() + "'); ")
.collect(joining()) +
"$processInfo.UseShellExecute = $false; " +
"$process = New-Object System.Diagnostics.Process; " +
"$process.StartInfo = $processInfo; " +
"$process.Start() | Out-Null; " +
"$process.Id;"
);
});
ServerUtils.waitForElasticsearch();
assertTrue(Files.exists(pidFile));
String pid = slurp(pidFile).trim();
assertThat(pid, not(isEmptyOrNullString()));
Platforms.onLinux(() -> sh.run("ps " + pid));
Platforms.onWindows(() -> sh.run("Get-Process -Id " + pid));
}
public static void stopElasticsearch(Installation installation) {
Path pidFile = installation.home.resolve("elasticsearch.pid");
assertTrue(Files.exists(pidFile));
String pid = slurp(pidFile).trim();
assertThat(pid, not(isEmptyOrNullString()));
final Shell sh = new Shell();
Platforms.onLinux(() -> sh.run("kill -SIGTERM " + pid));
Platforms.onWindows(() -> sh.run("Get-Process -Id " + pid + " | Stop-Process -Force"));
} }
} }

View File

@ -56,31 +56,28 @@ public class Cleanup {
final Shell sh = new Shell(); final Shell sh = new Shell();
// kill elasticsearch processes // kill elasticsearch processes
if (Platforms.WINDOWS) { Platforms.onLinux(() -> {
sh.runIgnoreExitCode("pkill -u elasticsearch");
sh.runIgnoreExitCode("ps aux | grep -i 'org.elasticsearch.bootstrap.Elasticsearch' | awk {'print $2'} | xargs kill -9");
});
Platforms.onWindows(() -> {
// the view of processes returned by Get-Process doesn't expose command line arguments, so we use WMI here // the view of processes returned by Get-Process doesn't expose command line arguments, so we use WMI here
sh.powershellIgnoreExitCode( sh.runIgnoreExitCode(
"Get-WmiObject Win32_Process | " + "Get-WmiObject Win32_Process | " +
"Where-Object { $_.CommandLine -Match 'org.elasticsearch.bootstrap.Elasticsearch' } | " + "Where-Object { $_.CommandLine -Match 'org.elasticsearch.bootstrap.Elasticsearch' } | " +
"ForEach-Object { $_.Terminate() }" "ForEach-Object { $_.Terminate() }"
); );
});
} else { Platforms.onLinux(Cleanup::purgePackagesLinux);
sh.bashIgnoreExitCode("pkill -u elasticsearch");
sh.bashIgnoreExitCode("ps aux | grep -i 'org.elasticsearch.bootstrap.Elasticsearch' | awk {'print $2'} | xargs kill -9");
}
if (Platforms.LINUX) {
purgePackagesLinux();
}
// remove elasticsearch users // remove elasticsearch users
if (Platforms.LINUX) { Platforms.onLinux(() -> {
sh.bashIgnoreExitCode("userdel elasticsearch"); sh.runIgnoreExitCode("userdel elasticsearch");
sh.bashIgnoreExitCode("groupdel elasticsearch"); sh.runIgnoreExitCode("groupdel elasticsearch");
} });
// when we run es as a role user on windows, add the equivalent here
// delete files that may still exist // delete files that may still exist
lsGlob(getTempDir(), "elasticsearch*").forEach(FileUtils::rm); lsGlob(getTempDir(), "elasticsearch*").forEach(FileUtils::rm);
@ -95,7 +92,7 @@ public class Cleanup {
// disable elasticsearch service // disable elasticsearch service
// todo add this for windows when adding tests for service intallation // todo add this for windows when adding tests for service intallation
if (Platforms.LINUX && isSystemd()) { if (Platforms.LINUX && isSystemd()) {
sh.bash("systemctl unmask systemd-sysctl.service"); sh.run("systemctl unmask systemd-sysctl.service");
} }
} }
@ -103,19 +100,19 @@ public class Cleanup {
final Shell sh = new Shell(); final Shell sh = new Shell();
if (isRPM()) { if (isRPM()) {
sh.bashIgnoreExitCode("rpm --quiet -e elasticsearch elasticsearch-oss"); sh.runIgnoreExitCode("rpm --quiet -e elasticsearch elasticsearch-oss");
} }
if (isYUM()) { if (isYUM()) {
sh.bashIgnoreExitCode("yum remove -y elasticsearch elasticsearch-oss"); sh.runIgnoreExitCode("yum remove -y elasticsearch elasticsearch-oss");
} }
if (isDPKG()) { if (isDPKG()) {
sh.bashIgnoreExitCode("dpkg --purge elasticsearch elasticsearch-oss"); sh.runIgnoreExitCode("dpkg --purge elasticsearch elasticsearch-oss");
} }
if (isAptGet()) { if (isAptGet()) {
sh.bashIgnoreExitCode("apt-get --quiet --yes purge elasticsearch elasticsearch-oss"); sh.runIgnoreExitCode("apt-get --quiet --yes purge elasticsearch elasticsearch-oss");
} }
} }
} }

View File

@ -21,11 +21,14 @@ package org.elasticsearch.packaging.util;
import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.core.internal.io.IOUtils;
import java.io.BufferedWriter;
import java.io.IOException; import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.DirectoryStream; import java.nio.file.DirectoryStream;
import java.nio.file.Files; import java.nio.file.Files;
import java.nio.file.Path; import java.nio.file.Path;
import java.nio.file.Paths; import java.nio.file.Paths;
import java.nio.file.StandardOpenOption;
import java.nio.file.attribute.BasicFileAttributes; import java.nio.file.attribute.BasicFileAttributes;
import java.nio.file.attribute.FileOwnerAttributeView; import java.nio.file.attribute.FileOwnerAttributeView;
import java.nio.file.attribute.PosixFileAttributes; import java.nio.file.attribute.PosixFileAttributes;
@ -63,6 +66,22 @@ public class FileUtils {
} }
} }
public static Path mkdir(Path path) {
try {
return Files.createDirectories(path);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public static Path cp(Path source, Path target) {
try {
return Files.copy(source, target);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public static Path mv(Path source, Path target) { public static Path mv(Path source, Path target) {
try { try {
return Files.move(source, target); return Files.move(source, target);
@ -71,9 +90,19 @@ public class FileUtils {
} }
} }
public static void append(Path file, String text) {
try (BufferedWriter writer = Files.newBufferedWriter(file, StandardCharsets.UTF_8,
StandardOpenOption.CREATE, StandardOpenOption.APPEND)) {
writer.write(text);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public static String slurp(Path file) { public static String slurp(Path file) {
try { try {
return String.join("\n", Files.readAllLines(file)); return String.join("\n", Files.readAllLines(file, StandardCharsets.UTF_8));
} catch (IOException e) { } catch (IOException e) {
throw new RuntimeException(e); throw new RuntimeException(e);
} }

View File

@ -27,6 +27,8 @@ import java.nio.file.Path;
public class Installation { public class Installation {
public final Path home; public final Path home;
public final Path bin; // this isn't a first-class installation feature but we include it for convenience
public final Path lib; // same
public final Path config; public final Path config;
public final Path data; public final Path data;
public final Path logs; public final Path logs;
@ -36,6 +38,9 @@ public class Installation {
public Installation(Path home, Path config, Path data, Path logs, Path plugins, Path modules, Path scripts) { public Installation(Path home, Path config, Path data, Path logs, Path plugins, Path modules, Path scripts) {
this.home = home; this.home = home;
this.bin = home.resolve("bin");
this.lib = home.resolve("lib");
this.config = config; this.config = config;
this.data = data; this.data = data;
this.logs = logs; this.logs = logs;
@ -55,4 +60,31 @@ public class Installation {
home.resolve("scripts") home.resolve("scripts")
); );
} }
public Path bin(String executableName) {
return bin.resolve(executableName);
}
public Path config(String configFileName) {
return config.resolve(configFileName);
}
public Executables executables() {
return new Executables();
}
public class Executables {
public final Path elasticsearch = platformExecutable("elasticsearch");
public final Path elasticsearchPlugin = platformExecutable("elasticsearch-plugin");
public final Path elasticsearchKeystore = platformExecutable("elasticsearch-keystore");
public final Path elasticsearchTranslog = platformExecutable("elasticsearch-translog");
private Path platformExecutable(String name) {
final String platformExecutableName = Platforms.WINDOWS
? name + ".bat"
: name;
return bin(platformExecutableName);
}
}
} }

View File

@ -28,41 +28,61 @@ public class Platforms {
if (WINDOWS) { if (WINDOWS) {
return false; return false;
} }
return new Shell().bashIgnoreExitCode("which dpkg").isSuccess(); return new Shell().runIgnoreExitCode("which dpkg").isSuccess();
} }
public static boolean isAptGet() { public static boolean isAptGet() {
if (WINDOWS) { if (WINDOWS) {
return false; return false;
} }
return new Shell().bashIgnoreExitCode("which apt-get").isSuccess(); return new Shell().runIgnoreExitCode("which apt-get").isSuccess();
} }
public static boolean isRPM() { public static boolean isRPM() {
if (WINDOWS) { if (WINDOWS) {
return false; return false;
} }
return new Shell().bashIgnoreExitCode("which rpm").isSuccess(); return new Shell().runIgnoreExitCode("which rpm").isSuccess();
} }
public static boolean isYUM() { public static boolean isYUM() {
if (WINDOWS) { if (WINDOWS) {
return false; return false;
} }
return new Shell().bashIgnoreExitCode("which yum").isSuccess(); return new Shell().runIgnoreExitCode("which yum").isSuccess();
} }
public static boolean isSystemd() { public static boolean isSystemd() {
if (WINDOWS) { if (WINDOWS) {
return false; return false;
} }
return new Shell().bashIgnoreExitCode("which systemctl").isSuccess(); return new Shell().runIgnoreExitCode("which systemctl").isSuccess();
} }
public static boolean isSysVInit() { public static boolean isSysVInit() {
if (WINDOWS) { if (WINDOWS) {
return false; return false;
} }
return new Shell().bashIgnoreExitCode("which service").isSuccess(); return new Shell().runIgnoreExitCode("which service").isSuccess();
}
public static void onWindows(PlatformAction action) {
if (WINDOWS) {
action.run();
}
}
public static void onLinux(PlatformAction action) {
if (LINUX) {
action.run();
}
}
/**
* Essentially a Runnable, but we make the distinction so it's more clear that these are synchronous
*/
@FunctionalInterface
public interface PlatformAction {
void run();
} }
} }

View File

@ -0,0 +1,123 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.packaging.util;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.http.HttpResponse;
import org.apache.http.client.fluent.Request;
import org.apache.http.conn.HttpHostConnectException;
import org.apache.http.entity.ContentType;
import org.apache.http.util.EntityUtils;
import java.io.IOException;
import java.util.Objects;
import java.util.concurrent.TimeUnit;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.containsString;
public class ServerUtils {
private static final Log LOG = LogFactory.getLog(ServerUtils.class);
private static final long waitTime = TimeUnit.SECONDS.toMillis(60);
private static final long timeoutLength = TimeUnit.SECONDS.toMillis(10);
public static void waitForElasticsearch() throws IOException {
waitForElasticsearch("green", null);
}
public static void waitForElasticsearch(String status, String index) throws IOException {
Objects.requireNonNull(status);
// we loop here rather than letting httpclient handle retries so we can measure the entire waiting time
final long startTime = System.currentTimeMillis();
long timeElapsed = 0;
boolean started = false;
while (started == false && timeElapsed < waitTime) {
try {
final HttpResponse response = Request.Get("http://localhost:9200/_cluster/health")
.connectTimeout((int) timeoutLength)
.socketTimeout((int) timeoutLength)
.execute()
.returnResponse();
if (response.getStatusLine().getStatusCode() >= 300) {
final String statusLine = response.getStatusLine().toString();
final String body = EntityUtils.toString(response.getEntity());
throw new RuntimeException("Connecting to elasticsearch cluster health API failed:\n" + statusLine+ "\n" + body);
}
started = true;
} catch (HttpHostConnectException e) {
// we want to retry if the connection is refused
LOG.info("Got connection refused when waiting for cluster health", e);
}
timeElapsed = System.currentTimeMillis() - startTime;
}
if (started == false) {
throw new RuntimeException("Elasticsearch did not start");
}
final String url;
if (index == null) {
url = "http://localhost:9200/_cluster/health?wait_for_status=" + status + "&timeout=60s&pretty";
} else {
url = "http://localhost:9200/_cluster/health/" + index + "?wait_for_status=" + status + "&timeout=60s&pretty";
}
final String body = makeRequest(Request.Get(url));
assertThat("cluster health response must contain desired status", body, containsString(status));
}
public static void runElasticsearchTests() throws IOException {
makeRequest(
Request.Post("http://localhost:9200/library/book/1?refresh=true&pretty")
.bodyString("{ \"title\": \"Book #1\", \"pages\": 123 }", ContentType.APPLICATION_JSON));
makeRequest(
Request.Post("http://localhost:9200/library/book/2?refresh=true&pretty")
.bodyString("{ \"title\": \"Book #2\", \"pages\": 456 }", ContentType.APPLICATION_JSON));
String count = makeRequest(Request.Get("http://localhost:9200/_count?pretty"));
assertThat(count, containsString("\"count\" : 2"));
makeRequest(Request.Delete("http://localhost:9200/_all"));
}
public static String makeRequest(Request request) throws IOException {
final HttpResponse response = request.execute().returnResponse();
final String body = EntityUtils.toString(response.getEntity());
if (response.getStatusLine().getStatusCode() >= 300) {
throw new RuntimeException("Request failed:\n" + response.getStatusLine().toString() + "\n" + body);
}
return body;
}
}

View File

@ -58,58 +58,50 @@ public class Shell {
this.workingDirectory = workingDirectory; this.workingDirectory = workingDirectory;
} }
/** public Map<String, String> getEnv() {
* Runs a script in a bash shell, throwing an exception if its exit code is nonzero return env;
*/
public Result bash(String script) {
return run(bashCommand(script));
} }
/** /**
* Runs a script in a bash shell * Run the provided string as a shell script. On Linux the {@code bash -c [script]} syntax will be used, and on Windows
* the {@code powershell.exe -Command [script]} syntax will be used. Throws an exception if the exit code of the script is nonzero
*/ */
public Result bashIgnoreExitCode(String script) { public Result run(String script) {
return runIgnoreExitCode(bashCommand(script)); return runScript(getScriptCommand(script));
}
/**
* Same as {@link #run(String)}, but does not throw an exception if the exit code of the script is nonzero
*/
public Result runIgnoreExitCode(String script) {
return runScriptIgnoreExitCode(getScriptCommand(script));
}
private String[] getScriptCommand(String script) {
if (Platforms.WINDOWS) {
return powershellCommand(script);
} else {
return bashCommand(script);
}
} }
private static String[] bashCommand(String script) { private static String[] bashCommand(String script) {
return Stream.concat(Stream.of("bash", "-c"), Stream.of(script)).toArray(String[]::new); return Stream.concat(Stream.of("bash", "-c"), Stream.of(script)).toArray(String[]::new);
} }
/**
* Runs a script in a powershell shell, throwing an exception if its exit code is nonzero
*/
public Result powershell(String script) {
return run(powershellCommand(script));
}
/**
* Runs a script in a powershell shell
*/
public Result powershellIgnoreExitCode(String script) {
return runIgnoreExitCode(powershellCommand(script));
}
private static String[] powershellCommand(String script) { private static String[] powershellCommand(String script) {
return Stream.concat(Stream.of("powershell.exe", "-Command"), Stream.of(script)).toArray(String[]::new); return Stream.concat(Stream.of("powershell.exe", "-Command"), Stream.of(script)).toArray(String[]::new);
} }
/** private Result runScript(String[] command) {
* Runs an executable file, passing all elements of {@code command} after the first as arguments. Throws an exception if the process' Result result = runScriptIgnoreExitCode(command);
* exit code is nonzero
*/
private Result run(String[] command) {
Result result = runIgnoreExitCode(command);
if (result.isSuccess() == false) { if (result.isSuccess() == false) {
throw new RuntimeException("Command was not successful: [" + String.join(" ", command) + "] result: " + result.toString()); throw new RuntimeException("Command was not successful: [" + String.join(" ", command) + "] result: " + result.toString());
} }
return result; return result;
} }
/** private Result runScriptIgnoreExitCode(String[] command) {
* Runs an executable file, passing all elements of {@code command} after the first as arguments
*/
private Result runIgnoreExitCode(String[] command) {
ProcessBuilder builder = new ProcessBuilder(); ProcessBuilder builder = new ProcessBuilder();
builder.command(command); builder.command(command);

View File

@ -1,178 +0,0 @@
#!/usr/bin/env bats
# This file is used to test the tar gz package.
# WARNING: This testing file must be executed as root and can
# dramatically change your system. It should only be executed
# in a throw-away VM like those made by the Vagrantfile at
# the root of the Elasticsearch source code. This should
# cause the script to fail if it is executed any other way:
[ -f /etc/is_vagrant_vm ] || {
>&2 echo "must be run on a vagrant VM"
exit 1
}
# The test case can be executed with the Bash Automated
# Testing System tool available at https://github.com/sstephenson/bats
# Thanks to Sam Stephenson!
# Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Load test utilities
load $BATS_UTILS/utils.bash
load $BATS_UTILS/tar.bash
load $BATS_UTILS/plugins.bash
setup() {
skip_not_tar_gz
export ESHOME=/tmp/elasticsearch
export_elasticsearch_paths
}
##################################
# Install TAR GZ package
##################################
@test "[TAR] tar command is available" {
# Cleans everything for the 1st execution
clean_before_test
run tar --version
[ "$status" -eq 0 ]
}
@test "[TAR] archive is available" {
local version=$(cat version)
count=$(find . -type f -name "${PACKAGE_NAME}-${version}.tar.gz" | wc -l)
[ "$count" -eq 1 ]
}
@test "[TAR] archive is not installed" {
count=$(find /tmp -type d -name 'elasticsearch*' | wc -l)
[ "$count" -eq 0 ]
}
@test "[TAR] install archive" {
# Install the archive
install_archive
set_debug_logging
count=$(find /tmp -type d -name 'elasticsearch*' | wc -l)
[ "$count" -eq 1 ]
# Its simpler to check that the install was correct in this test rather
# than in another test because install_archive sets a number of path
# variables that verify_archive_installation reads. To separate this into
# another test you'd have to recreate the variables.
verify_archive_installation
}
@test "[TAR] verify elasticsearch-plugin list runs without any plugins installed" {
# previously this would fail because the archive installations did
# not create an empty plugins directory
local plugins_list=`$ESHOME/bin/elasticsearch-plugin list`
[[ -z $plugins_list ]]
}
@test "[TAR] elasticsearch fails if java executable is not found" {
local JAVA=$(which java)
sudo chmod -x $JAVA
run "$ESHOME/bin/elasticsearch"
sudo chmod +x $JAVA
[ "$status" -eq 1 ]
local expected="could not find java; set JAVA_HOME or ensure java is in PATH"
[[ "$output" == *"$expected"* ]] || {
echo "Expected error message [$expected] but found: $output"
false
}
}
@test "[TAR] test creating elasticearch.keystore" {
sudo -E -u elasticsearch "$ESHOME/bin/elasticsearch-keystore" create
assert_file "$ESCONFIG/elasticsearch.keystore" f elasticsearch elasticsearch 660
sudo -E -u elasticsearch "$ESHOME/bin/elasticsearch-keystore" list | grep "keystore.seed"
# cleanup for the next test
rm -rf "$ESCONFIG/elasticsearch.keystore"
}
##################################
# Check that Elasticsearch is working
##################################
@test "[TAR] test elasticsearch" {
start_elasticsearch_service
run_elasticsearch_tests
stop_elasticsearch_service
}
@test "[TAR] test auto-creating elasticearch.keystore" {
# a keystore should automatically be created after the service is started
assert_file "$ESCONFIG/elasticsearch.keystore" f elasticsearch elasticsearch 660
# the keystore should be seeded
sudo -E -u elasticsearch "$ESHOME/bin/elasticsearch-keystore" list | grep "keystore.seed"
}
@test "[TAR] start Elasticsearch with custom JVM options" {
local es_java_opts=$ES_JAVA_OPTS
local es_path_conf=$ES_PATH_CONF
local temp=`mktemp -d`
cp "$ESCONFIG"/elasticsearch.yml "$temp"
cp "$ESCONFIG"/log4j2.properties "$temp"
touch "$temp/jvm.options"
chown -R elasticsearch:elasticsearch "$temp"
echo "-Xms512m" >> "$temp/jvm.options"
echo "-Xmx512m" >> "$temp/jvm.options"
# we have to disable Log4j from using JMX lest it will hit a security
# manager exception before we have configured logging; this will fail
# startup since we detect usages of logging before it is configured
echo "-Dlog4j2.disable.jmx=true" >> "$temp/jvm.options"
export ES_PATH_CONF="$temp"
export ES_JAVA_OPTS="-XX:-UseCompressedOops"
start_elasticsearch_service
curl -s -XGET localhost:9200/_nodes | fgrep '"heap_init_in_bytes":536870912'
curl -s -XGET localhost:9200/_nodes | fgrep '"using_compressed_ordinary_object_pointers":"false"'
stop_elasticsearch_service
export ES_PATH_CONF=$es_path_conf
export ES_JAVA_OPTS=$es_java_opts
}
@test "[TAR] GC logs exist" {
start_elasticsearch_service
assert_file_exist $ESHOME/logs/gc.log.0.current
stop_elasticsearch_service
}
@test "[TAR] relative ES_PATH_CONF" {
local es_path_conf=$ES_PATH_CONF
local temp=`mktemp -d`
mkdir "$temp"/config
cp "$ESCONFIG"/elasticsearch.yml "$temp"/config
cp "$ESCONFIG"/log4j2.properties "$temp"/config
cp "$ESCONFIG/jvm.options" "$temp/config"
chown -R elasticsearch:elasticsearch "$temp"
echo "node.name: relative" >> "$temp"/config/elasticsearch.yml
cd "$temp"
export ES_PATH_CONF=config
start_elasticsearch_service
curl -s -XGET localhost:9200/_nodes | fgrep '"name":"relative"'
stop_elasticsearch_service
export ES_PATH_CONF=$es_path_conf
}
@test "[TAR] remove tar" {
rm -rf "/tmp/elasticsearch"
}

View File

@ -0,0 +1,68 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.analysis;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.synonym.SolrSynonymParser;
import org.apache.lucene.util.CharsRef;
import org.apache.lucene.util.CharsRefBuilder;
import org.elasticsearch.common.logging.Loggers;
import java.io.IOException;
public class ESSolrSynonymParser extends SolrSynonymParser {
private final boolean lenient;
private static final Logger logger =
Loggers.getLogger(ESSolrSynonymParser.class, "ESSolrSynonymParser");
public ESSolrSynonymParser(boolean dedup, boolean expand, boolean lenient, Analyzer analyzer) {
super(dedup, expand, analyzer);
this.lenient = lenient;
}
@Override
public void add(CharsRef input, CharsRef output, boolean includeOrig) {
// This condition follows up on the overridden analyze method. In case lenient was set to true and there was an
// exception during super.analyze we return a zero-length CharsRef for that word which caused an exception. When
// the synonym mappings for the words are added using the add method we skip the ones that were left empty by
// analyze i.e., in the case when lenient is set we only add those combinations which are non-zero-length. The
// else would happen only in the case when the input or output is empty and lenient is set, in which case we
// quietly ignore it. For more details on the control-flow see SolrSynonymParser::addInternal.
if (lenient == false || (input.length > 0 && output.length > 0)) {
super.add(input, output, includeOrig);
}
}
@Override
public CharsRef analyze(String text, CharsRefBuilder reuse) throws IOException {
try {
return super.analyze(text, reuse);
} catch (IllegalArgumentException ex) {
if (lenient) {
logger.info("Synonym rule for [" + text + "] was ignored");
return new CharsRef("");
} else {
throw ex;
}
}
}
}

View File

@ -0,0 +1,68 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.analysis;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.synonym.WordnetSynonymParser;
import org.apache.lucene.util.CharsRef;
import org.apache.lucene.util.CharsRefBuilder;
import org.elasticsearch.common.logging.Loggers;
import java.io.IOException;
public class ESWordnetSynonymParser extends WordnetSynonymParser {
private final boolean lenient;
private static final Logger logger =
Loggers.getLogger(ESSolrSynonymParser.class, "ESWordnetSynonymParser");
public ESWordnetSynonymParser(boolean dedup, boolean expand, boolean lenient, Analyzer analyzer) {
super(dedup, expand, analyzer);
this.lenient = lenient;
}
@Override
public void add(CharsRef input, CharsRef output, boolean includeOrig) {
// This condition follows up on the overridden analyze method. In case lenient was set to true and there was an
// exception during super.analyze we return a zero-length CharsRef for that word which caused an exception. When
// the synonym mappings for the words are added using the add method we skip the ones that were left empty by
// analyze i.e., in the case when lenient is set we only add those combinations which are non-zero-length. The
// else would happen only in the case when the input or output is empty and lenient is set, in which case we
// quietly ignore it. For more details on the control-flow see SolrSynonymParser::addInternal.
if (lenient == false || (input.length > 0 && output.length > 0)) {
super.add(input, output, includeOrig);
}
}
@Override
public CharsRef analyze(String text, CharsRefBuilder reuse) throws IOException {
try {
return super.analyze(text, reuse);
} catch (IllegalArgumentException ex) {
if (lenient) {
logger.info("Synonym rule for [" + text + "] was ignored");
return new CharsRef("");
} else {
throw ex;
}
}
}
}

View File

@ -21,10 +21,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.synonym.SolrSynonymParser;
import org.apache.lucene.analysis.synonym.SynonymGraphFilter; import org.apache.lucene.analysis.synonym.SynonymGraphFilter;
import org.apache.lucene.analysis.synonym.SynonymMap; import org.apache.lucene.analysis.synonym.SynonymMap;
import org.apache.lucene.analysis.synonym.WordnetSynonymParser;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSettings;
@ -58,11 +56,11 @@ public class SynonymGraphTokenFilterFactory extends SynonymTokenFilterFactory {
try { try {
SynonymMap.Builder parser; SynonymMap.Builder parser;
if ("wordnet".equalsIgnoreCase(format)) { if ("wordnet".equalsIgnoreCase(format)) {
parser = new WordnetSynonymParser(true, expand, analyzerForParseSynonym); parser = new ESWordnetSynonymParser(true, expand, lenient, analyzerForParseSynonym);
((WordnetSynonymParser) parser).parse(rulesReader); ((ESWordnetSynonymParser) parser).parse(rulesReader);
} else { } else {
parser = new SolrSynonymParser(true, expand, analyzerForParseSynonym); parser = new ESSolrSynonymParser(true, expand, lenient, analyzerForParseSynonym);
((SolrSynonymParser) parser).parse(rulesReader); ((ESSolrSynonymParser) parser).parse(rulesReader);
} }
synonymMap = parser.build(); synonymMap = parser.build();
} catch (Exception e) { } catch (Exception e) {

View File

@ -21,10 +21,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.synonym.SolrSynonymParser;
import org.apache.lucene.analysis.synonym.SynonymFilter; import org.apache.lucene.analysis.synonym.SynonymFilter;
import org.apache.lucene.analysis.synonym.SynonymMap; import org.apache.lucene.analysis.synonym.SynonymMap;
import org.apache.lucene.analysis.synonym.WordnetSynonymParser;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSettings;
@ -38,6 +36,7 @@ public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory {
protected final String format; protected final String format;
protected final boolean expand; protected final boolean expand;
protected final boolean lenient;
protected final Settings settings; protected final Settings settings;
public SynonymTokenFilterFactory(IndexSettings indexSettings, Environment env, AnalysisRegistry analysisRegistry, public SynonymTokenFilterFactory(IndexSettings indexSettings, Environment env, AnalysisRegistry analysisRegistry,
@ -52,6 +51,7 @@ public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory {
} }
this.expand = settings.getAsBoolean("expand", true); this.expand = settings.getAsBoolean("expand", true);
this.lenient = settings.getAsBoolean("lenient", false);
this.format = settings.get("format", ""); this.format = settings.get("format", "");
} }
@ -93,11 +93,11 @@ public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory {
try { try {
SynonymMap.Builder parser; SynonymMap.Builder parser;
if ("wordnet".equalsIgnoreCase(format)) { if ("wordnet".equalsIgnoreCase(format)) {
parser = new WordnetSynonymParser(true, expand, analyzerForParseSynonym); parser = new ESWordnetSynonymParser(true, expand, lenient, analyzerForParseSynonym);
((WordnetSynonymParser) parser).parse(rulesReader); ((ESWordnetSynonymParser) parser).parse(rulesReader);
} else { } else {
parser = new SolrSynonymParser(true, expand, analyzerForParseSynonym); parser = new ESSolrSynonymParser(true, expand, lenient, analyzerForParseSynonym);
((SolrSynonymParser) parser).parse(rulesReader); ((ESSolrSynonymParser) parser).parse(rulesReader);
} }
synonymMap = parser.build(); synonymMap = parser.build();
} catch (Exception e) { } catch (Exception e) {

View File

@ -0,0 +1,78 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.apache.lucene.analysis.synonym.SynonymFilter;
import org.apache.lucene.analysis.synonym.SynonymMap;
import org.elasticsearch.test.ESTokenStreamTestCase;
import java.io.IOException;
import java.io.StringReader;
import java.text.ParseException;
import static org.hamcrest.Matchers.containsString;
public class ESSolrSynonymParserTests extends ESTokenStreamTestCase {
public void testLenientParser() throws IOException, ParseException {
ESSolrSynonymParser parser = new ESSolrSynonymParser(true, false, true, new StandardAnalyzer());
String rules =
"&,and\n" +
"come,advance,approach\n";
StringReader rulesReader = new StringReader(rules);
parser.parse(rulesReader);
SynonymMap synonymMap = parser.build();
Tokenizer tokenizer = new StandardTokenizer();
tokenizer.setReader(new StringReader("approach quietly then advance & destroy"));
TokenStream ts = new SynonymFilter(tokenizer, synonymMap, false);
assertTokenStreamContents(ts, new String[]{"come", "quietly", "then", "come", "destroy"});
}
public void testLenientParserWithSomeIncorrectLines() throws IOException, ParseException {
CharArraySet stopSet = new CharArraySet(1, true);
stopSet.add("bar");
ESSolrSynonymParser parser =
new ESSolrSynonymParser(true, false, true, new StandardAnalyzer(stopSet));
String rules = "foo,bar,baz";
StringReader rulesReader = new StringReader(rules);
parser.parse(rulesReader);
SynonymMap synonymMap = parser.build();
Tokenizer tokenizer = new StandardTokenizer();
tokenizer.setReader(new StringReader("first word is foo, then bar and lastly baz"));
TokenStream ts = new SynonymFilter(new StopFilter(tokenizer, stopSet), synonymMap, false);
assertTokenStreamContents(ts, new String[]{"first", "word", "is", "foo", "then", "and", "lastly", "foo"});
}
public void testNonLenientParser() {
ESSolrSynonymParser parser = new ESSolrSynonymParser(true, false, false, new StandardAnalyzer());
String rules =
"&,and=>and\n" +
"come,advance,approach\n";
StringReader rulesReader = new StringReader(rules);
ParseException ex = expectThrows(ParseException.class, () -> parser.parse(rulesReader));
assertThat(ex.getMessage(), containsString("Invalid synonym rule at line 1"));
}
}

View File

@ -0,0 +1,88 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.apache.lucene.analysis.synonym.SynonymFilter;
import org.apache.lucene.analysis.synonym.SynonymMap;
import org.elasticsearch.test.ESTokenStreamTestCase;
import java.io.IOException;
import java.io.StringReader;
import java.text.ParseException;
import static org.hamcrest.Matchers.containsString;
public class ESWordnetSynonymParserTests extends ESTokenStreamTestCase {
public void testLenientParser() throws IOException, ParseException {
ESWordnetSynonymParser parser = new ESWordnetSynonymParser(true, false, true, new StandardAnalyzer());
String rules =
"s(100000001,1,'&',a,1,0).\n" +
"s(100000001,2,'and',a,1,0).\n" +
"s(100000002,1,'come',v,1,0).\n" +
"s(100000002,2,'advance',v,1,0).\n" +
"s(100000002,3,'approach',v,1,0).";
StringReader rulesReader = new StringReader(rules);
parser.parse(rulesReader);
SynonymMap synonymMap = parser.build();
Tokenizer tokenizer = new StandardTokenizer();
tokenizer.setReader(new StringReader("approach quietly then advance & destroy"));
TokenStream ts = new SynonymFilter(tokenizer, synonymMap, false);
assertTokenStreamContents(ts, new String[]{"come", "quietly", "then", "come", "destroy"});
}
public void testLenientParserWithSomeIncorrectLines() throws IOException, ParseException {
CharArraySet stopSet = new CharArraySet(1, true);
stopSet.add("bar");
ESWordnetSynonymParser parser =
new ESWordnetSynonymParser(true, false, true, new StandardAnalyzer(stopSet));
String rules =
"s(100000001,1,'foo',v,1,0).\n" +
"s(100000001,2,'bar',v,1,0).\n" +
"s(100000001,3,'baz',v,1,0).";
StringReader rulesReader = new StringReader(rules);
parser.parse(rulesReader);
SynonymMap synonymMap = parser.build();
Tokenizer tokenizer = new StandardTokenizer();
tokenizer.setReader(new StringReader("first word is foo, then bar and lastly baz"));
TokenStream ts = new SynonymFilter(new StopFilter(tokenizer, stopSet), synonymMap, false);
assertTokenStreamContents(ts, new String[]{"first", "word", "is", "foo", "then", "and", "lastly", "foo"});
}
public void testNonLenientParser() {
ESWordnetSynonymParser parser = new ESWordnetSynonymParser(true, false, false, new StandardAnalyzer());
String rules =
"s(100000001,1,'&',a,1,0).\n" +
"s(100000001,2,'and',a,1,0).\n" +
"s(100000002,1,'come',v,1,0).\n" +
"s(100000002,2,'advance',v,1,0).\n" +
"s(100000002,3,'approach',v,1,0).";
StringReader rulesReader = new StringReader(rules);
ParseException ex = expectThrows(ParseException.class, () -> parser.parse(rulesReader));
assertThat(ex.getMessage(), containsString("Invalid synonym rule at line 1"));
}
}

View File

@ -73,6 +73,7 @@ import org.elasticsearch.test.DummyShardLock;
import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.ESSingleNodeTestCase;
import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.test.IndexSettingsModule;
import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.InternalSettingsPlugin;
import org.elasticsearch.test.junit.annotations.TestLogging;
import java.io.IOException; import java.io.IOException;
import java.io.UncheckedIOException; import java.io.UncheckedIOException;
@ -84,6 +85,7 @@ import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.Locale;
import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CountDownLatch; import java.util.concurrent.CountDownLatch;
import java.util.concurrent.CyclicBarrier; import java.util.concurrent.CyclicBarrier;
@ -404,6 +406,7 @@ public class IndexShardIT extends ESSingleNodeTestCase {
} }
} }
@TestLogging("_root:DEBUG,org.elasticsearch.index.shard:TRACE,org.elasticsearch.index.engine:TRACE")
public void testStressMaybeFlushOrRollTranslogGeneration() throws Exception { public void testStressMaybeFlushOrRollTranslogGeneration() throws Exception {
createIndex("test"); createIndex("test");
ensureGreen(); ensureGreen();
@ -446,13 +449,14 @@ public class IndexShardIT extends ESSingleNodeTestCase {
barrier.await(); barrier.await();
final CheckedRunnable<Exception> check; final CheckedRunnable<Exception> check;
if (flush) { if (flush) {
final FlushStats flushStats = shard.flushStats(); final FlushStats initialStats = shard.flushStats();
final long total = flushStats.getTotal();
final long periodic = flushStats.getPeriodic();
client().prepareIndex("test", "test", "1").setSource("{}", XContentType.JSON).get(); client().prepareIndex("test", "test", "1").setSource("{}", XContentType.JSON).get();
check = () -> { check = () -> {
assertThat(shard.flushStats().getTotal(), equalTo(total + 1)); final FlushStats currentStats = shard.flushStats();
assertThat(shard.flushStats().getPeriodic(), equalTo(periodic + 1)); String msg = String.format(Locale.ROOT, "flush stats: total=[%d vs %d], periodic=[%d vs %d]",
initialStats.getTotal(), currentStats.getTotal(), initialStats.getPeriodic(), currentStats.getPeriodic());
assertThat(msg, currentStats.getPeriodic(), equalTo(initialStats.getPeriodic() + 1));
assertThat(msg, currentStats.getTotal(), equalTo(initialStats.getTotal() + 1));
}; };
} else { } else {
final long generation = getTranslog(shard).currentFileGeneration(); final long generation = getTranslog(shard).currentFileGeneration();

View File

@ -20,48 +20,61 @@
package org.elasticsearch.ingest; package org.elasticsearch.ingest;
import java.util.List; import java.util.List;
import java.util.Locale;
import java.util.Map; import java.util.Map;
import java.util.Objects;
import static org.hamcrest.Matchers.equalTo;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertThat;
public class IngestDocumentMatcher { public class IngestDocumentMatcher {
/** /**
* Helper method to assert the equivalence between two IngestDocuments. * Helper method to assert the equivalence between two IngestDocuments.
* *
* @param a first object to compare * @param docA first document to compare
* @param b second object to compare * @param docB second document to compare
*/ */
public static void assertIngestDocument(Object a, Object b) { public static void assertIngestDocument(IngestDocument docA, IngestDocument docB) {
if ((deepEquals(docA.getIngestMetadata(), docB.getIngestMetadata(), true) &&
deepEquals(docA.getSourceAndMetadata(), docB.getSourceAndMetadata(), false)) == false) {
throw new AssertionError("Expected [" + docA + "] but received [" + docB + "].");
}
}
private static boolean deepEquals(Object a, Object b, boolean isIngestMeta) {
if (a instanceof Map) { if (a instanceof Map) {
Map<?, ?> mapA = (Map<?, ?>) a; Map<?, ?> mapA = (Map<?, ?>) a;
if (b instanceof Map == false) {
return false;
}
Map<?, ?> mapB = (Map<?, ?>) b; Map<?, ?> mapB = (Map<?, ?>) b;
if (mapA.size() != mapB.size()) {
return false;
}
for (Map.Entry<?, ?> entry : mapA.entrySet()) { for (Map.Entry<?, ?> entry : mapA.entrySet()) {
if (entry.getValue() instanceof List || entry.getValue() instanceof Map) { Object key = entry.getKey();
assertIngestDocument(entry.getValue(), mapB.get(entry.getKey())); // Don't compare the timestamp of ingest metadata since it will differ between executions
if ((isIngestMeta && "timestamp".equals(key)) == false
&& deepEquals(entry.getValue(), mapB.get(key), false) == false) {
return false;
} }
} }
return true;
} else if (a instanceof List) { } else if (a instanceof List) {
List<?> listA = (List<?>) a; List<?> listA = (List<?>) a;
if (b instanceof List == false) {
return false;
}
List<?> listB = (List<?>) b; List<?> listB = (List<?>) b;
for (int i = 0; i < listA.size(); i++) { int countA = listA.size();
if (countA != listB.size()) {
return false;
}
for (int i = 0; i < countA; i++) {
Object value = listA.get(i); Object value = listA.get(i);
if (value instanceof List || value instanceof Map) { if (deepEquals(value, listB.get(i), false) == false) {
assertIngestDocument(value, listB.get(i)); return false;
} }
} }
} else if (a instanceof byte[]) { return true;
assertArrayEquals((byte[]) a, (byte[])b);
} else if (a instanceof IngestDocument) {
IngestDocument docA = (IngestDocument) a;
IngestDocument docB = (IngestDocument) b;
assertIngestDocument(docA.getSourceAndMetadata(), docB.getSourceAndMetadata());
assertIngestDocument(docA.getIngestMetadata(), docB.getIngestMetadata());
} else { } else {
String msg = String.format(Locale.ROOT, "Expected %s class to be equal to %s", a.getClass().getName(), b.getClass().getName()); return Objects.deepEquals(a, b);
assertThat(msg, a, equalTo(b));
} }
} }
} }

View File

@ -0,0 +1,82 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.ingest;
import org.elasticsearch.test.ESTestCase;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import static org.elasticsearch.ingest.IngestDocumentMatcher.assertIngestDocument;
public class IngestDocumentMatcherTests extends ESTestCase {
public void testDifferentMapData() {
Map<String, Object> sourceAndMetadata1 = new HashMap<>();
sourceAndMetadata1.put("foo", "bar");
IngestDocument document1 = new IngestDocument(sourceAndMetadata1, new HashMap<>());
IngestDocument document2 = new IngestDocument(new HashMap<>(), new HashMap<>());
assertThrowsOnComparision(document1, document2);
}
public void testDifferentLengthListData() {
String rootKey = "foo";
IngestDocument document1 =
new IngestDocument(Collections.singletonMap(rootKey, Arrays.asList("bar", "baz")), new HashMap<>());
IngestDocument document2 =
new IngestDocument(Collections.singletonMap(rootKey, Collections.emptyList()), new HashMap<>());
assertThrowsOnComparision(document1, document2);
}
public void testDifferentNestedListFieldData() {
String rootKey = "foo";
IngestDocument document1 =
new IngestDocument(Collections.singletonMap(rootKey, Arrays.asList("bar", "baz")), new HashMap<>());
IngestDocument document2 =
new IngestDocument(Collections.singletonMap(rootKey, Arrays.asList("bar", "blub")), new HashMap<>());
assertThrowsOnComparision(document1, document2);
}
public void testDifferentNestedMapFieldData() {
String rootKey = "foo";
IngestDocument document1 =
new IngestDocument(Collections.singletonMap(rootKey, Collections.singletonMap("bar", "baz")), new HashMap<>());
IngestDocument document2 =
new IngestDocument(Collections.singletonMap(rootKey, Collections.singletonMap("bar", "blub")), new HashMap<>());
assertThrowsOnComparision(document1, document2);
}
public void testOnTypeConflict() {
String rootKey = "foo";
IngestDocument document1 =
new IngestDocument(Collections.singletonMap(rootKey, Collections.singletonList("baz")), new HashMap<>());
IngestDocument document2 = new IngestDocument(
Collections.singletonMap(rootKey, Collections.singletonMap("blub", "blab")), new HashMap<>()
);
assertThrowsOnComparision(document1, document2);
}
private static void assertThrowsOnComparision(IngestDocument document1, IngestDocument document2) {
expectThrows(AssertionError.class, () -> assertIngestDocument(document1, document2));
expectThrows(AssertionError.class, () -> assertIngestDocument(document2, document1));
}
}

View File

@ -11,6 +11,8 @@ realm.
`GET /_xpack/security/role/<name>` + `GET /_xpack/security/role/<name>` +
`DELETE /_xpack/security/role/<name>` +
`POST /_xpack/security/role/<name>/_clear_cache` + `POST /_xpack/security/role/<name>/_clear_cache` +
`POST /_xpack/security/role/<name>` + `POST /_xpack/security/role/<name>` +

View File

@ -29,7 +29,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.protocol.license.LicenseStatus; import org.elasticsearch.protocol.xpack.license.LicenseStatus;
/** /**
* Data structure for license. Use {@link Builder} to build a license. * Data structure for license. Use {@link Builder} to build a license.

View File

@ -52,6 +52,9 @@ public class XPackLicenseState {
messages.put(XPackField.LOGSTASH, new String[] { messages.put(XPackField.LOGSTASH, new String[] {
"Logstash will continue to poll centrally-managed pipelines" "Logstash will continue to poll centrally-managed pipelines"
}); });
messages.put(XPackField.BEATS, new String[] {
"Beats will continue to poll centrally-managed configuration"
});
messages.put(XPackField.DEPRECATION, new String[] { messages.put(XPackField.DEPRECATION, new String[] {
"Deprecation APIs are disabled" "Deprecation APIs are disabled"
}); });
@ -81,6 +84,7 @@ public class XPackLicenseState {
messages.put(XPackField.GRAPH, XPackLicenseState::graphAcknowledgementMessages); messages.put(XPackField.GRAPH, XPackLicenseState::graphAcknowledgementMessages);
messages.put(XPackField.MACHINE_LEARNING, XPackLicenseState::machineLearningAcknowledgementMessages); messages.put(XPackField.MACHINE_LEARNING, XPackLicenseState::machineLearningAcknowledgementMessages);
messages.put(XPackField.LOGSTASH, XPackLicenseState::logstashAcknowledgementMessages); messages.put(XPackField.LOGSTASH, XPackLicenseState::logstashAcknowledgementMessages);
messages.put(XPackField.BEATS, XPackLicenseState::beatsAcknowledgementMessages);
messages.put(XPackField.SQL, XPackLicenseState::sqlAcknowledgementMessages); messages.put(XPackField.SQL, XPackLicenseState::sqlAcknowledgementMessages);
ACKNOWLEDGMENT_MESSAGES = Collections.unmodifiableMap(messages); ACKNOWLEDGMENT_MESSAGES = Collections.unmodifiableMap(messages);
} }
@ -205,11 +209,7 @@ public class XPackLicenseState {
private static String[] logstashAcknowledgementMessages(OperationMode currentMode, OperationMode newMode) { private static String[] logstashAcknowledgementMessages(OperationMode currentMode, OperationMode newMode) {
switch (newMode) { switch (newMode) {
case BASIC: case BASIC:
switch (currentMode) { if (isBasic(currentMode) == false) {
case TRIAL:
case STANDARD:
case GOLD:
case PLATINUM:
return new String[] { "Logstash will no longer poll for centrally-managed pipelines" }; return new String[] { "Logstash will no longer poll for centrally-managed pipelines" };
} }
break; break;
@ -217,6 +217,17 @@ public class XPackLicenseState {
return Strings.EMPTY_ARRAY; return Strings.EMPTY_ARRAY;
} }
private static String[] beatsAcknowledgementMessages(OperationMode currentMode, OperationMode newMode) {
switch (newMode) {
case BASIC:
if (isBasic(currentMode) == false) {
return new String[] { "Beats will no longer be able to use centrally-managed configuration" };
}
break;
}
return Strings.EMPTY_ARRAY;
}
private static String[] sqlAcknowledgementMessages(OperationMode currentMode, OperationMode newMode) { private static String[] sqlAcknowledgementMessages(OperationMode currentMode, OperationMode newMode) {
switch (newMode) { switch (newMode) {
case BASIC: case BASIC:
@ -232,6 +243,10 @@ public class XPackLicenseState {
return Strings.EMPTY_ARRAY; return Strings.EMPTY_ARRAY;
} }
private static boolean isBasic(OperationMode mode) {
return mode == OperationMode.BASIC;
}
/** A wrapper for the license mode and state, to allow atomically swapping. */ /** A wrapper for the license mode and state, to allow atomically swapping. */
private static class Status { private static class Status {
@ -500,20 +515,17 @@ public class XPackLicenseState {
*/ */
public boolean isLogstashAllowed() { public boolean isLogstashAllowed() {
Status localStatus = status; Status localStatus = status;
return localStatus.active && (isBasic(localStatus.mode) == false);
if (localStatus.active == false) {
return false;
} }
switch (localStatus.mode) { /**
case TRIAL: * Beats is allowed as long as there is an active license of type TRIAL, STANDARD, GOLD or PLATINUM
case GOLD: * @return {@code true} as long as there is a valid license
case PLATINUM: */
case STANDARD: public boolean isBeatsAllowed() {
return true; Status localStatus = status;
default: return localStatus.active && (isBasic(localStatus.mode) == false);
return false;
}
} }
/** /**

View File

@ -62,6 +62,7 @@ import org.elasticsearch.xpack.core.indexlifecycle.action.RemovePolicyForIndexAc
import org.elasticsearch.xpack.core.indexlifecycle.action.RetryAction; import org.elasticsearch.xpack.core.indexlifecycle.action.RetryAction;
import org.elasticsearch.xpack.core.indexlifecycle.action.SetPolicyForIndexAction; import org.elasticsearch.xpack.core.indexlifecycle.action.SetPolicyForIndexAction;
import org.elasticsearch.xpack.core.logstash.LogstashFeatureSetUsage; import org.elasticsearch.xpack.core.logstash.LogstashFeatureSetUsage;
import org.elasticsearch.xpack.core.beats.BeatsFeatureSetUsage;
import org.elasticsearch.xpack.core.ml.MachineLearningFeatureSetUsage; import org.elasticsearch.xpack.core.ml.MachineLearningFeatureSetUsage;
import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.MlMetadata;
import org.elasticsearch.xpack.core.ml.action.CloseJobAction; import org.elasticsearch.xpack.core.ml.action.CloseJobAction;
@ -349,6 +350,8 @@ public class XPackClientPlugin extends Plugin implements ActionPlugin, NetworkPl
new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.GRAPH, GraphFeatureSetUsage::new), new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.GRAPH, GraphFeatureSetUsage::new),
// logstash // logstash
new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.LOGSTASH, LogstashFeatureSetUsage::new), new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.LOGSTASH, LogstashFeatureSetUsage::new),
// beats
new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.BEATS, BeatsFeatureSetUsage::new),
// ML - Custom metadata // ML - Custom metadata
new NamedWriteableRegistry.Entry(MetaData.Custom.class, "ml", MlMetadata::new), new NamedWriteableRegistry.Entry(MetaData.Custom.class, "ml", MlMetadata::new),
new NamedWriteableRegistry.Entry(NamedDiff.class, "ml", MlMetadata.MlMetadataDiff::new), new NamedWriteableRegistry.Entry(NamedDiff.class, "ml", MlMetadata.MlMetadataDiff::new),

View File

@ -19,6 +19,8 @@ public final class XPackField {
public static final String MACHINE_LEARNING = "ml"; public static final String MACHINE_LEARNING = "ml";
/** Name constant for the Logstash feature. */ /** Name constant for the Logstash feature. */
public static final String LOGSTASH = "logstash"; public static final String LOGSTASH = "logstash";
/** Name constant for the Beats feature. */
public static final String BEATS = "beats";
/** Name constant for the Deprecation API feature. */ /** Name constant for the Deprecation API feature. */
public static final String DEPRECATION = "deprecation"; public static final String DEPRECATION = "deprecation";
/** Name constant for the upgrade feature. */ /** Name constant for the upgrade feature. */

View File

@ -67,6 +67,10 @@ public class XPackSettings {
public static final Setting<Boolean> LOGSTASH_ENABLED = Setting.boolSetting("xpack.logstash.enabled", true, public static final Setting<Boolean> LOGSTASH_ENABLED = Setting.boolSetting("xpack.logstash.enabled", true,
Setting.Property.NodeScope); Setting.Property.NodeScope);
/** Setting for enabling or disabling Beats extensions. Defaults to true. */
public static final Setting<Boolean> BEATS_ENABLED = Setting.boolSetting("xpack.beats.enabled", true,
Setting.Property.NodeScope);
/** /**
* Setting for enabling or disabling the index lifecycle extension. Defaults to true. * Setting for enabling or disabling the index lifecycle extension. Defaults to true.
*/ */

View File

@ -0,0 +1,24 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.core.beats;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.xpack.core.XPackFeatureSet;
import org.elasticsearch.xpack.core.XPackField;
import java.io.IOException;
public final class BeatsFeatureSetUsage extends XPackFeatureSet.Usage {
public BeatsFeatureSetUsage(StreamInput in) throws IOException {
super(in);
}
public BeatsFeatureSetUsage(boolean available, boolean enabled) {
super(XPackField.BEATS, available, enabled);
}
}

View File

@ -55,11 +55,7 @@ public final class Role {
} }
public static Builder builder(String... names) { public static Builder builder(String... names) {
return new Builder(names, null); return new Builder(names);
}
public static Builder builder(String[] names, FieldPermissionsCache fieldPermissionsCache) {
return new Builder(names, fieldPermissionsCache);
} }
public static Builder builder(RoleDescriptor rd, FieldPermissionsCache fieldPermissionsCache) { public static Builder builder(RoleDescriptor rd, FieldPermissionsCache fieldPermissionsCache) {
@ -94,16 +90,13 @@ public final class Role {
private ClusterPermission cluster = ClusterPermission.NONE; private ClusterPermission cluster = ClusterPermission.NONE;
private RunAsPermission runAs = RunAsPermission.NONE; private RunAsPermission runAs = RunAsPermission.NONE;
private List<IndicesPermission.Group> groups = new ArrayList<>(); private List<IndicesPermission.Group> groups = new ArrayList<>();
private FieldPermissionsCache fieldPermissionsCache = null;
private Builder(String[] names, FieldPermissionsCache fieldPermissionsCache) { private Builder(String[] names) {
this.names = names; this.names = names;
this.fieldPermissionsCache = fieldPermissionsCache;
} }
private Builder(RoleDescriptor rd, @Nullable FieldPermissionsCache fieldPermissionsCache) { private Builder(RoleDescriptor rd, @Nullable FieldPermissionsCache fieldPermissionsCache) {
this.names = new String[] { rd.getName() }; this.names = new String[] { rd.getName() };
this.fieldPermissionsCache = fieldPermissionsCache;
if (rd.getClusterPrivileges().length == 0) { if (rd.getClusterPrivileges().length == 0) {
cluster = ClusterPermission.NONE; cluster = ClusterPermission.NONE;
} else { } else {

View File

@ -80,11 +80,19 @@ public class ReservedRolesStore {
new RoleDescriptor.IndicesPrivileges[] { new RoleDescriptor.IndicesPrivileges[] {
RoleDescriptor.IndicesPrivileges.builder().indices(".kibana*", ".reporting-*").privileges("all").build(), RoleDescriptor.IndicesPrivileges.builder().indices(".kibana*", ".reporting-*").privileges("all").build(),
RoleDescriptor.IndicesPrivileges.builder() RoleDescriptor.IndicesPrivileges.builder()
.indices(".monitoring-*").privileges("read", "read_cross_cluster").build() .indices(".monitoring-*").privileges("read", "read_cross_cluster").build(),
RoleDescriptor.IndicesPrivileges.builder()
.indices(".management-beats").privileges("create_index", "read", "write").build()
}, },
null, MetadataUtils.DEFAULT_RESERVED_METADATA)) null, MetadataUtils.DEFAULT_RESERVED_METADATA))
.put("logstash_system", new RoleDescriptor("logstash_system", new String[] { "monitor", MonitoringBulkAction.NAME}, .put("logstash_system", new RoleDescriptor("logstash_system", new String[] { "monitor", MonitoringBulkAction.NAME},
null, null, MetadataUtils.DEFAULT_RESERVED_METADATA)) null, null, MetadataUtils.DEFAULT_RESERVED_METADATA))
.put("beats_admin", new RoleDescriptor("beats_admin",
null,
new RoleDescriptor.IndicesPrivileges[] {
RoleDescriptor.IndicesPrivileges.builder().indices(".management-beats").privileges("all").build()
},
null, MetadataUtils.DEFAULT_RESERVED_METADATA))
.put(UsernamesField.BEATS_ROLE, new RoleDescriptor(UsernamesField.BEATS_ROLE, .put(UsernamesField.BEATS_ROLE, new RoleDescriptor(UsernamesField.BEATS_ROLE,
new String[] { "monitor", MonitoringBulkAction.NAME}, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA)) new String[] { "monitor", MonitoringBulkAction.NAME}, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA))
.put("machine_learning_user", new RoleDescriptor("machine_learning_user", new String[] { "monitor_ml" }, .put("machine_learning_user", new RoleDescriptor("machine_learning_user", new String[] { "monitor_ml" },

View File

@ -10,10 +10,10 @@ import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.license.License; import org.elasticsearch.license.License;
import org.elasticsearch.license.LicenseService; import org.elasticsearch.license.LicenseService;
import org.elasticsearch.protocol.license.LicenseStatus;
import org.elasticsearch.protocol.xpack.XPackInfoRequest; import org.elasticsearch.protocol.xpack.XPackInfoRequest;
import org.elasticsearch.protocol.xpack.XPackInfoResponse; import org.elasticsearch.protocol.xpack.XPackInfoResponse;
import org.elasticsearch.protocol.xpack.XPackInfoResponse.FeatureSetsInfo.FeatureSet; import org.elasticsearch.protocol.xpack.XPackInfoResponse.FeatureSetsInfo.FeatureSet;
import org.elasticsearch.protocol.xpack.license.LicenseStatus;
import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.Task;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.Transport;

View File

@ -132,6 +132,7 @@ public class ReservedRolesStoreTests extends ESTestCase {
assertThat(ReservedRolesStore.isReserved("watcher_user"), is(true)); assertThat(ReservedRolesStore.isReserved("watcher_user"), is(true));
assertThat(ReservedRolesStore.isReserved("watcher_admin"), is(true)); assertThat(ReservedRolesStore.isReserved("watcher_admin"), is(true));
assertThat(ReservedRolesStore.isReserved("kibana_dashboard_only_user"), is(true)); assertThat(ReservedRolesStore.isReserved("kibana_dashboard_only_user"), is(true));
assertThat(ReservedRolesStore.isReserved("beats_admin"), is(true));
assertThat(ReservedRolesStore.isReserved(XPackUser.ROLE_NAME), is(true)); assertThat(ReservedRolesStore.isReserved(XPackUser.ROLE_NAME), is(true));
assertThat(ReservedRolesStore.isReserved(LogstashSystemUser.ROLE_NAME), is(true)); assertThat(ReservedRolesStore.isReserved(LogstashSystemUser.ROLE_NAME), is(true));
assertThat(ReservedRolesStore.isReserved(BeatsSystemUser.ROLE_NAME), is(true)); assertThat(ReservedRolesStore.isReserved(BeatsSystemUser.ROLE_NAME), is(true));
@ -220,6 +221,20 @@ public class ReservedRolesStoreTests extends ESTestCase {
assertThat(kibanaRole.indices().allowedIndicesMatcher(GetAction.NAME).test(index), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetAction.NAME).test(index), is(true));
assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(index), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(index), is(true));
}); });
// Beats management index
final String index = ".management-beats";
assertThat(kibanaRole.indices().allowedIndicesMatcher("indices:foo").test(index), is(false));
assertThat(kibanaRole.indices().allowedIndicesMatcher("indices:bar").test(index), is(false));
assertThat(kibanaRole.indices().allowedIndicesMatcher(DeleteIndexAction.NAME).test(index), is(false));
assertThat(kibanaRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(index), is(true));
assertThat(kibanaRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(index), is(true));
assertThat(kibanaRole.indices().allowedIndicesMatcher(DeleteAction.NAME).test(index), is(true));
assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(index), is(false));
assertThat(kibanaRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(index), is(true));
assertThat(kibanaRole.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(index), is(true));
assertThat(kibanaRole.indices().allowedIndicesMatcher(GetAction.NAME).test(index), is(true));
assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(index), is(false));
} }
public void testKibanaUserRole() { public void testKibanaUserRole() {
@ -478,6 +493,39 @@ public class ReservedRolesStoreTests extends ESTestCase {
is(false)); is(false));
} }
public void testBeatsAdminRole() {
final RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("beats_admin");
assertNotNull(roleDescriptor);
assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true));
final Role beatsAdminRole = Role.builder(roleDescriptor, null).build();
assertThat(beatsAdminRole.cluster().check(ClusterHealthAction.NAME), is(false));
assertThat(beatsAdminRole.cluster().check(ClusterStateAction.NAME), is(false));
assertThat(beatsAdminRole.cluster().check(ClusterStatsAction.NAME), is(false));
assertThat(beatsAdminRole.cluster().check(PutIndexTemplateAction.NAME), is(false));
assertThat(beatsAdminRole.cluster().check(ClusterRerouteAction.NAME), is(false));
assertThat(beatsAdminRole.cluster().check(ClusterUpdateSettingsAction.NAME), is(false));
assertThat(beatsAdminRole.cluster().check(MonitoringBulkAction.NAME), is(false));
assertThat(beatsAdminRole.runAs().check(randomAlphaOfLengthBetween(1, 30)), is(false));
assertThat(beatsAdminRole.indices().allowedIndicesMatcher("indices:foo").test(randomAlphaOfLengthBetween(8, 24)),
is(false));
final String index = ".management-beats";
logger.info("index name [{}]", index);
assertThat(beatsAdminRole.indices().allowedIndicesMatcher("indices:foo").test(index), is(true));
assertThat(beatsAdminRole.indices().allowedIndicesMatcher("indices:bar").test(index), is(true));
assertThat(beatsAdminRole.indices().allowedIndicesMatcher(DeleteIndexAction.NAME).test(index), is(true));
assertThat(beatsAdminRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(index), is(true));
assertThat(beatsAdminRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(index), is(true));
assertThat(beatsAdminRole.indices().allowedIndicesMatcher(DeleteAction.NAME).test(index), is(true));
assertThat(beatsAdminRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(index), is(true));
assertThat(beatsAdminRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(index), is(true));
assertThat(beatsAdminRole.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(index), is(true));
assertThat(beatsAdminRole.indices().allowedIndicesMatcher(GetAction.NAME).test(index), is(true));
}
public void testBeatsSystemRole() { public void testBeatsSystemRole() {
RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor(BeatsSystemUser.ROLE_NAME); RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor(BeatsSystemUser.ROLE_NAME);
assertNotNull(roleDescriptor); assertNotNull(roleDescriptor);

View File

@ -13,9 +13,9 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.license.License; import org.elasticsearch.license.License;
import org.elasticsearch.protocol.license.LicenseStatus;
import org.elasticsearch.protocol.xpack.XPackInfoRequest; import org.elasticsearch.protocol.xpack.XPackInfoRequest;
import org.elasticsearch.protocol.xpack.XPackInfoResponse; import org.elasticsearch.protocol.xpack.XPackInfoResponse;
import org.elasticsearch.protocol.xpack.license.LicenseStatus;
import org.elasticsearch.transport.ActionNotFoundTransportException; import org.elasticsearch.transport.ActionNotFoundTransportException;
import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.transport.RemoteClusterAware;
import org.elasticsearch.xpack.core.action.XPackInfoAction; import org.elasticsearch.xpack.core.action.XPackInfoAction;

View File

@ -11,8 +11,8 @@ import org.elasticsearch.client.Client;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.protocol.license.LicenseStatus;
import org.elasticsearch.protocol.xpack.XPackInfoResponse; import org.elasticsearch.protocol.xpack.XPackInfoResponse;
import org.elasticsearch.protocol.xpack.license.LicenseStatus;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xpack.core.action.XPackInfoAction; import org.elasticsearch.xpack.core.action.XPackInfoAction;

View File

@ -278,7 +278,7 @@ public class CompositeRolesStore extends AbstractComponent {
final Set<String> clusterPrivs = clusterPrivileges.isEmpty() ? null : clusterPrivileges; final Set<String> clusterPrivs = clusterPrivileges.isEmpty() ? null : clusterPrivileges;
final Privilege runAsPrivilege = runAs.isEmpty() ? Privilege.NONE : new Privilege(runAs, runAs.toArray(Strings.EMPTY_ARRAY)); final Privilege runAsPrivilege = runAs.isEmpty() ? Privilege.NONE : new Privilege(runAs, runAs.toArray(Strings.EMPTY_ARRAY));
Role.Builder builder = Role.builder(roleNames.toArray(new String[roleNames.size()]), fieldPermissionsCache) Role.Builder builder = Role.builder(roleNames.toArray(new String[roleNames.size()]))
.cluster(ClusterPrivilege.get(clusterPrivs)) .cluster(ClusterPrivilege.get(clusterPrivs))
.runAs(runAsPrivilege); .runAs(runAsPrivilege);
indicesPrivilegesMap.entrySet().forEach((entry) -> { indicesPrivilegesMap.entrySet().forEach((entry) -> {

View File

@ -31,7 +31,7 @@ import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.protocol.license.LicenseStatus; import org.elasticsearch.protocol.xpack.license.LicenseStatus;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;

View File

@ -16,7 +16,7 @@
* specific language governing permissions and limitations * specific language governing permissions and limitations
* under the License. * under the License.
*/ */
package org.elasticsearch.protocol.license; package org.elasticsearch.protocol.xpack.license;
import java.io.IOException; import java.io.IOException;

View File

@ -21,4 +21,4 @@
* Request and Response objects for the default distribution's License * Request and Response objects for the default distribution's License
* APIs. * APIs.
*/ */
package org.elasticsearch.protocol.license; package org.elasticsearch.protocol.xpack.license;

View File

@ -21,4 +21,4 @@
* Request and Response objects for the default distribution's Security * Request and Response objects for the default distribution's Security
* APIs. * APIs.
*/ */
package org.elasticsearch.protocol.security; package org.elasticsearch.protocol.xpack.security;

View File

@ -21,4 +21,4 @@
* Request and Response objects for the default distribution's Watcher * Request and Response objects for the default distribution's Watcher
* APIs. * APIs.
*/ */
package org.elasticsearch.protocol.watcher; package org.elasticsearch.protocol.xpack.watcher;

View File

@ -21,11 +21,11 @@ package org.elasticsearch.protocol.xpack;
import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.protocol.license.LicenseStatus;
import org.elasticsearch.protocol.xpack.XPackInfoResponse.BuildInfo; import org.elasticsearch.protocol.xpack.XPackInfoResponse.BuildInfo;
import org.elasticsearch.protocol.xpack.XPackInfoResponse.LicenseInfo; import org.elasticsearch.protocol.xpack.XPackInfoResponse.LicenseInfo;
import org.elasticsearch.protocol.xpack.XPackInfoResponse.FeatureSetsInfo; import org.elasticsearch.protocol.xpack.XPackInfoResponse.FeatureSetsInfo;
import org.elasticsearch.protocol.xpack.XPackInfoResponse.FeatureSetsInfo.FeatureSet; import org.elasticsearch.protocol.xpack.XPackInfoResponse.FeatureSetsInfo.FeatureSet;
import org.elasticsearch.protocol.xpack.license.LicenseStatus;
import org.elasticsearch.test.AbstractStreamableXContentTestCase; import org.elasticsearch.test.AbstractStreamableXContentTestCase;
import java.util.HashMap; import java.util.HashMap;

View File

@ -16,7 +16,7 @@
* specific language governing permissions and limitations * specific language governing permissions and limitations
* under the License. * under the License.
*/ */
package org.elasticsearch.protocol.license; package org.elasticsearch.protocol.xpack.license;
import java.io.IOException; import java.io.IOException;

View File

@ -6,7 +6,10 @@
wait_for_active_shards: all wait_for_active_shards: all
body: body:
settings: settings:
number_of_replicas: 0 # we use 1 replica to make sure we don't have shards relocating. Relocating a shard with
# a scroll on it prevents shards from moving back into a where a scroll is running (it holds the shard lock)
# see https://github.com/elastic/elasticsearch/issues/31827
number_of_replicas: 1
index.routing.allocation.include.upgraded: true index.routing.allocation.include.upgraded: true
- do: - do: