Merge remote-tracking branch 'elastic/master' into ccr
* elastic/master: (46 commits) NETWORKING: Make RemoteClusterConn. Lazy Resolve DNS (#32764) [DOCS] Splits the users API documentation into multiple pages (#32825) [DOCS] Splits the token APIs into separate pages (#32865) [DOCS] Creates redirects for role management APIs page Bypassing failing test PainlessDomainSplitIT#testHRDSplit (#32966) TEST: Mute testRetentionPolicyChangeDuringRecovery [DOCS] Fixes more broken links to role management APIs [Docs] Tweaks and fixes to rollup docs [DOCS] Fixes links to role management APIs [ML][TEST] Fix BasicRenormalizationIT after adding multibucket feature [DOCS] Splits the roles API documentation into multiple pages (#32794) [TEST] Run pre 6.4 nodes in non-FIPS JVMs (#32901) Make Geo Context Mapping Parsing More Strict (#32821) [ML] fix updating opened jobs scheduled events (#31651) (#32881) Scripted metric aggregations: add deprecation warning and system property to control legacy params (#31597) Tests: Fix timezone conversion in DateTimeUnitTests Enable FIPS140LicenseBootstrapCheck (#32903) Fix InternalAutoDateHistogram reproducible failure (#32723) Remove assertion in testDocStats on deletedDocs counter (#32914) HLRC: Move ML request converters into their own class (#32906) ...
This commit is contained in:
commit
ac75968c0b
|
@ -87,8 +87,15 @@ subprojects {
|
|||
}
|
||||
}
|
||||
}
|
||||
repositories {
|
||||
maven {
|
||||
name = 'localTest'
|
||||
url = "${rootProject.buildDir}/local-test-repo"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
plugins.withType(BuildPlugin).whenPluginAdded {
|
||||
project.licenseFile = project.rootProject.file('licenses/APACHE-LICENSE-2.0.txt')
|
||||
project.noticeFile = project.rootProject.file('NOTICE.txt')
|
||||
|
@ -228,6 +235,7 @@ subprojects {
|
|||
"org.elasticsearch.client:elasticsearch-rest-high-level-client:${version}": ':client:rest-high-level',
|
||||
"org.elasticsearch.client:test:${version}": ':client:test',
|
||||
"org.elasticsearch.client:transport:${version}": ':client:transport',
|
||||
"org.elasticsearch.plugin:elasticsearch-scripting-painless-spi:${version}": ':modules:lang-painless:spi',
|
||||
"org.elasticsearch.test:framework:${version}": ':test:framework',
|
||||
"org.elasticsearch.distribution.integ-test-zip:elasticsearch:${version}": ':distribution:archives:integ-test-zip',
|
||||
"org.elasticsearch.distribution.zip:elasticsearch:${version}": ':distribution:archives:zip',
|
||||
|
|
|
@ -162,11 +162,24 @@ if (project != rootProject) {
|
|||
// it's fine as we run them as part of :buildSrc
|
||||
test.enabled = false
|
||||
task integTest(type: Test) {
|
||||
// integration test requires the local testing repo for example plugin builds
|
||||
dependsOn project.rootProject.allprojects.collect {
|
||||
it.tasks.matching { it.name == 'publishNebulaPublicationToLocalTestRepository'}
|
||||
}
|
||||
exclude "**/*Tests.class"
|
||||
include "**/*IT.class"
|
||||
testClassesDirs = sourceSets.test.output.classesDirs
|
||||
classpath = sourceSets.test.runtimeClasspath
|
||||
inputs.dir(file("src/testKit"))
|
||||
// tell BuildExamplePluginsIT where to find the example plugins
|
||||
systemProperty (
|
||||
'test.build-tools.plugin.examples',
|
||||
files(
|
||||
project(':example-plugins').subprojects.collect { it.projectDir }
|
||||
).asPath,
|
||||
)
|
||||
systemProperty 'test.local-test-repo-path', "${rootProject.buildDir}/local-test-repo"
|
||||
systemProperty 'test.lucene-snapshot-revision', (versions.lucene =~ /\w+-snapshot-([a-z0-9]+)/)[0][1]
|
||||
}
|
||||
check.dependsOn(integTest)
|
||||
|
||||
|
|
|
@ -554,7 +554,7 @@ class BuildPlugin implements Plugin<Project> {
|
|||
project.publishing {
|
||||
publications {
|
||||
nebula(MavenPublication) {
|
||||
artifact project.tasks.shadowJar
|
||||
artifacts = [ project.tasks.shadowJar ]
|
||||
artifactId = project.archivesBaseName
|
||||
/*
|
||||
* Configure the pom to include the "shadow" as compile dependencies
|
||||
|
@ -584,7 +584,6 @@ class BuildPlugin implements Plugin<Project> {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/** Adds compiler settings to the project */
|
||||
|
@ -799,6 +798,8 @@ class BuildPlugin implements Plugin<Project> {
|
|||
systemProperty 'tests.task', path
|
||||
systemProperty 'tests.security.manager', 'true'
|
||||
systemProperty 'jna.nosys', 'true'
|
||||
// TODO: remove this deprecation compatibility setting for 7.0
|
||||
systemProperty 'es.aggregations.enable_scripted_metric_agg_param', 'false'
|
||||
systemProperty 'compiler.java', project.ext.compilerJavaVersion.getMajorVersion()
|
||||
if (project.ext.inFipsJvm) {
|
||||
systemProperty 'runtime.java', project.ext.runtimeJavaVersion.getMajorVersion() + "FIPS"
|
||||
|
|
|
@ -25,7 +25,6 @@ import org.elasticsearch.gradle.NoticeTask
|
|||
import org.elasticsearch.gradle.test.RestIntegTestTask
|
||||
import org.elasticsearch.gradle.test.RunTask
|
||||
import org.gradle.api.InvalidUserDataException
|
||||
import org.gradle.api.JavaVersion
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.XmlProvider
|
||||
|
@ -39,7 +38,6 @@ import java.nio.file.Path
|
|||
import java.nio.file.StandardCopyOption
|
||||
import java.util.regex.Matcher
|
||||
import java.util.regex.Pattern
|
||||
|
||||
/**
|
||||
* Encapsulates build configuration for an Elasticsearch plugin.
|
||||
*/
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.elasticsearch.gradle.plugin
|
|||
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.tasks.Input
|
||||
import org.gradle.api.tasks.InputFile
|
||||
|
||||
/**
|
||||
* A container for plugin properties that will be written to the plugin descriptor, for easy
|
||||
|
@ -55,18 +56,39 @@ class PluginPropertiesExtension {
|
|||
boolean requiresKeystore = false
|
||||
|
||||
/** A license file that should be included in the built plugin zip. */
|
||||
@Input
|
||||
File licenseFile = null
|
||||
private File licenseFile = null
|
||||
|
||||
/**
|
||||
* A notice file that should be included in the built plugin zip. This will be
|
||||
* extended with notices from the {@code licenses/} directory.
|
||||
*/
|
||||
@Input
|
||||
File noticeFile = null
|
||||
private File noticeFile = null
|
||||
|
||||
Project project = null
|
||||
|
||||
PluginPropertiesExtension(Project project) {
|
||||
name = project.name
|
||||
version = project.version
|
||||
this.project = project
|
||||
}
|
||||
|
||||
@InputFile
|
||||
File getLicenseFile() {
|
||||
return licenseFile
|
||||
}
|
||||
|
||||
void setLicenseFile(File licenseFile) {
|
||||
project.ext.licenseFile = licenseFile
|
||||
this.licenseFile = licenseFile
|
||||
}
|
||||
|
||||
@InputFile
|
||||
File getNoticeFile() {
|
||||
return noticeFile
|
||||
}
|
||||
|
||||
void setNoticeFile(File noticeFile) {
|
||||
project.ext.noticeFile = noticeFile
|
||||
this.noticeFile = noticeFile
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,7 +23,6 @@ import org.gradle.api.InvalidUserDataException
|
|||
import org.gradle.api.Task
|
||||
import org.gradle.api.tasks.Copy
|
||||
import org.gradle.api.tasks.OutputFile
|
||||
|
||||
/**
|
||||
* Creates a plugin descriptor.
|
||||
*/
|
||||
|
|
|
@ -177,6 +177,12 @@ class NodeInfo {
|
|||
javaVersion = 8
|
||||
} else if (nodeVersion.onOrAfter("6.2.0") && nodeVersion.before("6.3.0")) {
|
||||
javaVersion = 9
|
||||
} else if (project.inFipsJvm && nodeVersion.onOrAfter("6.3.0") && nodeVersion.before("6.4.0")) {
|
||||
/*
|
||||
* Elasticsearch versions before 6.4.0 cannot be run in a FIPS-140 JVM. If we're running
|
||||
* bwc tests in a FIPS-140 JVM, ensure that the pre v6.4.0 nodes use a Java 10 JVM instead.
|
||||
*/
|
||||
javaVersion = 10
|
||||
}
|
||||
|
||||
args.addAll("-E", "node.portsfile=true")
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.gradle.api.provider.Provider
|
|||
import org.gradle.api.tasks.Copy
|
||||
import org.gradle.api.tasks.Input
|
||||
import org.gradle.api.tasks.TaskState
|
||||
import org.gradle.plugins.ide.idea.IdeaPlugin
|
||||
|
||||
import java.nio.charset.StandardCharsets
|
||||
import java.nio.file.Files
|
||||
|
@ -243,10 +244,12 @@ public class RestIntegTestTask extends DefaultTask {
|
|||
}
|
||||
}
|
||||
}
|
||||
project.idea {
|
||||
module {
|
||||
if (scopes.TEST != null) {
|
||||
scopes.TEST.plus.add(project.configurations.restSpec)
|
||||
if (project.plugins.hasPlugin(IdeaPlugin)) {
|
||||
project.idea {
|
||||
module {
|
||||
if (scopes.TEST != null) {
|
||||
scopes.TEST.plus.add(project.configurations.restSpec)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,164 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.gradle;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.elasticsearch.gradle.test.GradleIntegrationTestCase;
|
||||
import org.gradle.testkit.runner.GradleRunner;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Rule;
|
||||
import org.junit.rules.TemporaryFolder;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.StandardOpenOption;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class BuildExamplePluginsIT extends GradleIntegrationTestCase {
|
||||
|
||||
private static List<File> EXAMPLE_PLUGINS = Collections.unmodifiableList(
|
||||
Arrays.stream(
|
||||
Objects.requireNonNull(System.getProperty("test.build-tools.plugin.examples"))
|
||||
.split(File.pathSeparator)
|
||||
).map(File::new).collect(Collectors.toList())
|
||||
);
|
||||
|
||||
@Rule
|
||||
public TemporaryFolder tmpDir = new TemporaryFolder();
|
||||
|
||||
public final File examplePlugin;
|
||||
|
||||
public BuildExamplePluginsIT(File examplePlugin) {
|
||||
this.examplePlugin = examplePlugin;
|
||||
}
|
||||
|
||||
@BeforeClass
|
||||
public static void assertProjectsExist() {
|
||||
assertEquals(
|
||||
EXAMPLE_PLUGINS,
|
||||
EXAMPLE_PLUGINS.stream().filter(File::exists).collect(Collectors.toList())
|
||||
);
|
||||
}
|
||||
|
||||
@ParametersFactory
|
||||
public static Iterable<Object[]> parameters() {
|
||||
return EXAMPLE_PLUGINS
|
||||
.stream()
|
||||
.map(each -> new Object[] {each})
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
|
||||
public void testCurrentExamplePlugin() throws IOException {
|
||||
FileUtils.copyDirectory(examplePlugin, tmpDir.getRoot());
|
||||
// just get rid of deprecation warnings
|
||||
Files.write(
|
||||
getTempPath("settings.gradle"),
|
||||
"enableFeaturePreview('STABLE_PUBLISHING')\n".getBytes(StandardCharsets.UTF_8)
|
||||
);
|
||||
|
||||
adaptBuildScriptForTest();
|
||||
|
||||
Files.write(
|
||||
tmpDir.newFile("NOTICE.txt").toPath(),
|
||||
"dummy test notice".getBytes(StandardCharsets.UTF_8)
|
||||
);
|
||||
|
||||
GradleRunner.create()
|
||||
.withProjectDir(tmpDir.getRoot())
|
||||
.withArguments("clean", "check", "-s", "-i", "--warning-mode=all", "--scan")
|
||||
.withPluginClasspath()
|
||||
.build();
|
||||
}
|
||||
|
||||
private void adaptBuildScriptForTest() throws IOException {
|
||||
// Add the local repo as a build script URL so we can pull in build-tools and apply the plugin under test
|
||||
// + is ok because we have no other repo and just want to pick up latest
|
||||
writeBuildScript(
|
||||
"buildscript {\n" +
|
||||
" repositories {\n" +
|
||||
" maven {\n" +
|
||||
" url = '" + getLocalTestRepoPath() + "'\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" dependencies {\n" +
|
||||
" classpath \"org.elasticsearch.gradle:build-tools:+\"\n" +
|
||||
" }\n" +
|
||||
"}\n"
|
||||
);
|
||||
// get the original file
|
||||
Files.readAllLines(getTempPath("build.gradle"), StandardCharsets.UTF_8)
|
||||
.stream()
|
||||
.map(line -> line + "\n")
|
||||
.forEach(this::writeBuildScript);
|
||||
// Add a repositories section to be able to resolve dependencies
|
||||
String luceneSnapshotRepo = "";
|
||||
String luceneSnapshotRevision = System.getProperty("test.lucene-snapshot-revision");
|
||||
if (luceneSnapshotRepo != null) {
|
||||
luceneSnapshotRepo = " maven {\n" +
|
||||
" url \"http://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/" + luceneSnapshotRevision + "\"\n" +
|
||||
" }\n";
|
||||
}
|
||||
writeBuildScript("\n" +
|
||||
"repositories {\n" +
|
||||
" maven {\n" +
|
||||
" url \"" + getLocalTestRepoPath() + "\"\n" +
|
||||
" }\n" +
|
||||
luceneSnapshotRepo +
|
||||
"}\n"
|
||||
);
|
||||
Files.delete(getTempPath("build.gradle"));
|
||||
Files.move(getTempPath("build.gradle.new"), getTempPath("build.gradle"));
|
||||
System.err.print("Generated build script is:");
|
||||
Files.readAllLines(getTempPath("build.gradle")).forEach(System.err::println);
|
||||
}
|
||||
|
||||
private Path getTempPath(String fileName) {
|
||||
return new File(tmpDir.getRoot(), fileName).toPath();
|
||||
}
|
||||
|
||||
private Path writeBuildScript(String script) {
|
||||
try {
|
||||
Path path = getTempPath("build.gradle.new");
|
||||
return Files.write(
|
||||
path,
|
||||
script.getBytes(StandardCharsets.UTF_8),
|
||||
Files.exists(path) ? StandardOpenOption.APPEND : StandardOpenOption.CREATE_NEW
|
||||
);
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
private String getLocalTestRepoPath() {
|
||||
String property = System.getProperty("test.local-test-repo-path");
|
||||
Objects.requireNonNull(property, "test.local-test-repo-path not passed to tests");
|
||||
File file = new File(property);
|
||||
assertTrue("Expected " + property + " to exist, but it did not!", file.exists());
|
||||
return file.getAbsolutePath();
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,78 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.client.methods.HttpDelete;
|
||||
import org.apache.http.client.methods.HttpPost;
|
||||
import org.apache.http.client.methods.HttpPut;
|
||||
import org.elasticsearch.client.RequestConverters.EndpointBuilder;
|
||||
import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.OpenJobRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.PutJobRequest;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.client.RequestConverters.REQUEST_BODY_CONTENT_TYPE;
|
||||
import static org.elasticsearch.client.RequestConverters.createEntity;
|
||||
|
||||
final class MLRequestConverters {
|
||||
|
||||
private MLRequestConverters() {}
|
||||
|
||||
static Request putJob(PutJobRequest putJobRequest) throws IOException {
|
||||
String endpoint = new EndpointBuilder()
|
||||
.addPathPartAsIs("_xpack")
|
||||
.addPathPartAsIs("ml")
|
||||
.addPathPartAsIs("anomaly_detectors")
|
||||
.addPathPart(putJobRequest.getJob().getId())
|
||||
.build();
|
||||
Request request = new Request(HttpPut.METHOD_NAME, endpoint);
|
||||
request.setEntity(createEntity(putJobRequest, REQUEST_BODY_CONTENT_TYPE));
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request openJob(OpenJobRequest openJobRequest) throws IOException {
|
||||
String endpoint = new EndpointBuilder()
|
||||
.addPathPartAsIs("_xpack")
|
||||
.addPathPartAsIs("ml")
|
||||
.addPathPartAsIs("anomaly_detectors")
|
||||
.addPathPart(openJobRequest.getJobId())
|
||||
.addPathPartAsIs("_open")
|
||||
.build();
|
||||
Request request = new Request(HttpPost.METHOD_NAME, endpoint);
|
||||
request.setJsonEntity(openJobRequest.toString());
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request deleteJob(DeleteJobRequest deleteJobRequest) {
|
||||
String endpoint = new EndpointBuilder()
|
||||
.addPathPartAsIs("_xpack")
|
||||
.addPathPartAsIs("ml")
|
||||
.addPathPartAsIs("anomaly_detectors")
|
||||
.addPathPart(deleteJobRequest.getJobId())
|
||||
.build();
|
||||
Request request = new Request(HttpDelete.METHOD_NAME, endpoint);
|
||||
|
||||
RequestConverters.Params params = new RequestConverters.Params(request);
|
||||
params.putParam("force", Boolean.toString(deleteJobRequest.isForce()));
|
||||
|
||||
return request;
|
||||
}
|
||||
}
|
|
@ -19,6 +19,8 @@
|
|||
package org.elasticsearch.client;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.DeleteJobResponse;
|
||||
import org.elasticsearch.protocol.xpack.ml.OpenJobRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.OpenJobResponse;
|
||||
import org.elasticsearch.protocol.xpack.ml.PutJobRequest;
|
||||
|
@ -55,7 +57,7 @@ public final class MachineLearningClient {
|
|||
*/
|
||||
public PutJobResponse putJob(PutJobRequest request, RequestOptions options) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(request,
|
||||
RequestConverters::putMachineLearningJob,
|
||||
MLRequestConverters::putJob,
|
||||
options,
|
||||
PutJobResponse::fromXContent,
|
||||
Collections.emptySet());
|
||||
|
@ -73,13 +75,51 @@ public final class MachineLearningClient {
|
|||
*/
|
||||
public void putJobAsync(PutJobRequest request, RequestOptions options, ActionListener<PutJobResponse> listener) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(request,
|
||||
RequestConverters::putMachineLearningJob,
|
||||
MLRequestConverters::putJob,
|
||||
options,
|
||||
PutJobResponse::fromXContent,
|
||||
listener,
|
||||
Collections.emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes the given Machine Learning Job
|
||||
* <p>
|
||||
* For additional info
|
||||
* see <a href="http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-job.html">ML Delete Job documentation</a>
|
||||
* </p>
|
||||
* @param request the request to delete the job
|
||||
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @return action acknowledgement
|
||||
* @throws IOException when there is a serialization issue sending the request or receiving the response
|
||||
*/
|
||||
public DeleteJobResponse deleteJob(DeleteJobRequest request, RequestOptions options) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(request,
|
||||
MLRequestConverters::deleteJob,
|
||||
options,
|
||||
DeleteJobResponse::fromXContent,
|
||||
Collections.emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes the given Machine Learning Job asynchronously and notifies the listener on completion
|
||||
* <p>
|
||||
* For additional info
|
||||
* see <a href="http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-job.html">ML Delete Job documentation</a>
|
||||
* </p>
|
||||
* @param request the request to delete the job
|
||||
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @param listener Listener to be notified upon request completion
|
||||
*/
|
||||
public void deleteJobAsync(DeleteJobRequest request, RequestOptions options, ActionListener<DeleteJobResponse> listener) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(request,
|
||||
MLRequestConverters::deleteJob,
|
||||
options,
|
||||
DeleteJobResponse::fromXContent,
|
||||
listener,
|
||||
Collections.emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Opens a Machine Learning Job.
|
||||
* When you open a new job, it starts with an empty model.
|
||||
|
@ -98,7 +138,7 @@ public final class MachineLearningClient {
|
|||
*/
|
||||
public OpenJobResponse openJob(OpenJobRequest request, RequestOptions options) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(request,
|
||||
RequestConverters::machineLearningOpenJob,
|
||||
MLRequestConverters::openJob,
|
||||
options,
|
||||
OpenJobResponse::fromXContent,
|
||||
Collections.emptySet());
|
||||
|
@ -120,7 +160,7 @@ public final class MachineLearningClient {
|
|||
*/
|
||||
public void openJobAsync(OpenJobRequest request, RequestOptions options, ActionListener<OpenJobResponse> listener) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(request,
|
||||
RequestConverters::machineLearningOpenJob,
|
||||
MLRequestConverters::openJob,
|
||||
options,
|
||||
OpenJobResponse::fromXContent,
|
||||
listener,
|
||||
|
|
|
@ -112,8 +112,6 @@ import org.elasticsearch.protocol.xpack.license.DeleteLicenseRequest;
|
|||
import org.elasticsearch.protocol.xpack.license.GetLicenseRequest;
|
||||
import org.elasticsearch.protocol.xpack.license.PutLicenseRequest;
|
||||
import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.OpenJobRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.PutJobRequest;
|
||||
import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest;
|
||||
import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest;
|
||||
import org.elasticsearch.rest.action.search.RestSearchAction;
|
||||
|
@ -1199,31 +1197,6 @@ final class RequestConverters {
|
|||
return request;
|
||||
}
|
||||
|
||||
static Request putMachineLearningJob(PutJobRequest putJobRequest) throws IOException {
|
||||
String endpoint = new EndpointBuilder()
|
||||
.addPathPartAsIs("_xpack")
|
||||
.addPathPartAsIs("ml")
|
||||
.addPathPartAsIs("anomaly_detectors")
|
||||
.addPathPart(putJobRequest.getJob().getId())
|
||||
.build();
|
||||
Request request = new Request(HttpPut.METHOD_NAME, endpoint);
|
||||
request.setEntity(createEntity(putJobRequest, REQUEST_BODY_CONTENT_TYPE));
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request machineLearningOpenJob(OpenJobRequest openJobRequest) throws IOException {
|
||||
String endpoint = new EndpointBuilder()
|
||||
.addPathPartAsIs("_xpack")
|
||||
.addPathPartAsIs("ml")
|
||||
.addPathPartAsIs("anomaly_detectors")
|
||||
.addPathPart(openJobRequest.getJobId())
|
||||
.addPathPartAsIs("_open")
|
||||
.build();
|
||||
Request request = new Request(HttpPost.METHOD_NAME, endpoint);
|
||||
request.setJsonEntity(openJobRequest.toString());
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request getMigrationAssistance(IndexUpgradeInfoRequest indexUpgradeInfoRequest) {
|
||||
EndpointBuilder endpointBuilder = new EndpointBuilder()
|
||||
.addPathPartAsIs("_xpack/migration/assistance")
|
||||
|
@ -1235,7 +1208,7 @@ final class RequestConverters {
|
|||
return request;
|
||||
}
|
||||
|
||||
private static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) throws IOException {
|
||||
static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) throws IOException {
|
||||
BytesRef source = XContentHelper.toXContent(toXContent, xContentType, false).toBytesRef();
|
||||
return new ByteArrayEntity(source.bytes, source.offset, source.length, createContentType(xContentType));
|
||||
}
|
||||
|
|
|
@ -0,0 +1,90 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.client.methods.HttpDelete;
|
||||
import org.apache.http.client.methods.HttpPost;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.OpenJobRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.PutJobRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.job.config.AnalysisConfig;
|
||||
import org.elasticsearch.protocol.xpack.ml.job.config.Detector;
|
||||
import org.elasticsearch.protocol.xpack.ml.job.config.Job;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public class MLRequestConvertersTests extends ESTestCase {
|
||||
|
||||
public void testPutJob() throws IOException {
|
||||
Job job = createValidJob("foo");
|
||||
PutJobRequest putJobRequest = new PutJobRequest(job);
|
||||
|
||||
Request request = MLRequestConverters.putJob(putJobRequest);
|
||||
|
||||
assertThat(request.getEndpoint(), equalTo("/_xpack/ml/anomaly_detectors/foo"));
|
||||
try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) {
|
||||
Job parsedJob = Job.PARSER.apply(parser, null).build();
|
||||
assertThat(parsedJob, equalTo(job));
|
||||
}
|
||||
}
|
||||
|
||||
public void testOpenJob() throws Exception {
|
||||
String jobId = "some-job-id";
|
||||
OpenJobRequest openJobRequest = new OpenJobRequest(jobId);
|
||||
openJobRequest.setTimeout(TimeValue.timeValueMinutes(10));
|
||||
|
||||
Request request = MLRequestConverters.openJob(openJobRequest);
|
||||
assertEquals(HttpPost.METHOD_NAME, request.getMethod());
|
||||
assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/_open", request.getEndpoint());
|
||||
ByteArrayOutputStream bos = new ByteArrayOutputStream();
|
||||
request.getEntity().writeTo(bos);
|
||||
assertEquals(bos.toString("UTF-8"), "{\"job_id\":\""+ jobId +"\",\"timeout\":\"10m\"}");
|
||||
}
|
||||
|
||||
public void testDeleteJob() {
|
||||
String jobId = randomAlphaOfLength(10);
|
||||
DeleteJobRequest deleteJobRequest = new DeleteJobRequest(jobId);
|
||||
|
||||
Request request = MLRequestConverters.deleteJob(deleteJobRequest);
|
||||
assertEquals(HttpDelete.METHOD_NAME, request.getMethod());
|
||||
assertEquals("/_xpack/ml/anomaly_detectors/" + jobId, request.getEndpoint());
|
||||
assertEquals(Boolean.toString(false), request.getParameters().get("force"));
|
||||
|
||||
deleteJobRequest.setForce(true);
|
||||
request = MLRequestConverters.deleteJob(deleteJobRequest);
|
||||
assertEquals(Boolean.toString(true), request.getParameters().get("force"));
|
||||
}
|
||||
|
||||
private static Job createValidJob(String jobId) {
|
||||
AnalysisConfig.Builder analysisConfig = AnalysisConfig.builder(Collections.singletonList(
|
||||
Detector.builder().setFunction("count").build()));
|
||||
Job.Builder jobBuilder = Job.builder(jobId);
|
||||
jobBuilder.setAnalysisConfig(analysisConfig);
|
||||
return jobBuilder.build();
|
||||
}
|
||||
}
|
|
@ -20,6 +20,8 @@ package org.elasticsearch.client;
|
|||
|
||||
import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.DeleteJobResponse;
|
||||
import org.elasticsearch.protocol.xpack.ml.OpenJobRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.OpenJobResponse;
|
||||
import org.elasticsearch.protocol.xpack.ml.PutJobRequest;
|
||||
|
@ -48,6 +50,19 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase {
|
|||
assertThat(createdJob.getJobType(), is(Job.ANOMALY_DETECTOR_JOB_TYPE));
|
||||
}
|
||||
|
||||
public void testDeleteJob() throws Exception {
|
||||
String jobId = randomValidJobId();
|
||||
Job job = buildJob(jobId);
|
||||
MachineLearningClient machineLearningClient = highLevelClient().machineLearning();
|
||||
machineLearningClient.putJob(new PutJobRequest(job), RequestOptions.DEFAULT);
|
||||
|
||||
DeleteJobResponse response = execute(new DeleteJobRequest(jobId),
|
||||
machineLearningClient::deleteJob,
|
||||
machineLearningClient::deleteJobAsync);
|
||||
|
||||
assertTrue(response.isAcknowledged());
|
||||
}
|
||||
|
||||
public void testOpenJob() throws Exception {
|
||||
String jobId = randomValidJobId();
|
||||
Job job = buildJob(jobId);
|
||||
|
|
|
@ -127,7 +127,6 @@ import org.elasticsearch.index.rankeval.RatedRequest;
|
|||
import org.elasticsearch.index.rankeval.RestRankEvalAction;
|
||||
import org.elasticsearch.protocol.xpack.XPackInfoRequest;
|
||||
import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.OpenJobRequest;
|
||||
import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest;
|
||||
import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest;
|
||||
import org.elasticsearch.repositories.fs.FsRepository;
|
||||
|
@ -2611,19 +2610,6 @@ public class RequestConvertersTests extends ESTestCase {
|
|||
assertThat(request.getEntity(), nullValue());
|
||||
}
|
||||
|
||||
public void testPostMachineLearningOpenJob() throws Exception {
|
||||
String jobId = "some-job-id";
|
||||
OpenJobRequest openJobRequest = new OpenJobRequest(jobId);
|
||||
openJobRequest.setTimeout(TimeValue.timeValueMinutes(10));
|
||||
|
||||
Request request = RequestConverters.machineLearningOpenJob(openJobRequest);
|
||||
assertEquals(HttpPost.METHOD_NAME, request.getMethod());
|
||||
assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/_open", request.getEndpoint());
|
||||
ByteArrayOutputStream bos = new ByteArrayOutputStream();
|
||||
request.getEntity().writeTo(bos);
|
||||
assertEquals(bos.toString("UTF-8"), "{\"job_id\":\""+ jobId +"\",\"timeout\":\"10m\"}");
|
||||
}
|
||||
|
||||
/**
|
||||
* Randomize the {@link FetchSourceContext} request parameters.
|
||||
*/
|
||||
|
|
|
@ -25,6 +25,8 @@ import org.elasticsearch.client.MachineLearningIT;
|
|||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.DeleteJobResponse;
|
||||
import org.elasticsearch.protocol.xpack.ml.OpenJobRequest;
|
||||
import org.elasticsearch.protocol.xpack.ml.OpenJobResponse;
|
||||
import org.elasticsearch.protocol.xpack.ml.PutJobRequest;
|
||||
|
@ -122,6 +124,56 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testDeleteJob() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
String jobId = "my-first-machine-learning-job";
|
||||
|
||||
Job job = MachineLearningIT.buildJob(jobId);
|
||||
client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT);
|
||||
|
||||
Job secondJob = MachineLearningIT.buildJob("my-second-machine-learning-job");
|
||||
client.machineLearning().putJob(new PutJobRequest(secondJob), RequestOptions.DEFAULT);
|
||||
|
||||
{
|
||||
//tag::x-pack-delete-ml-job-request
|
||||
DeleteJobRequest deleteJobRequest = new DeleteJobRequest("my-first-machine-learning-job");
|
||||
deleteJobRequest.setForce(false); //<1>
|
||||
DeleteJobResponse deleteJobResponse = client.machineLearning().deleteJob(deleteJobRequest, RequestOptions.DEFAULT);
|
||||
//end::x-pack-delete-ml-job-request
|
||||
|
||||
//tag::x-pack-delete-ml-job-response
|
||||
boolean isAcknowledged = deleteJobResponse.isAcknowledged(); //<1>
|
||||
//end::x-pack-delete-ml-job-response
|
||||
}
|
||||
{
|
||||
//tag::x-pack-delete-ml-job-request-listener
|
||||
ActionListener<DeleteJobResponse> listener = new ActionListener<DeleteJobResponse>() {
|
||||
@Override
|
||||
public void onResponse(DeleteJobResponse deleteJobResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
//end::x-pack-delete-ml-job-request-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
//tag::x-pack-delete-ml-job-request-async
|
||||
DeleteJobRequest deleteJobRequest = new DeleteJobRequest("my-second-machine-learning-job");
|
||||
client.machineLearning().deleteJobAsync(deleteJobRequest, RequestOptions.DEFAULT, listener); // <1>
|
||||
//end::x-pack-delete-ml-job-request-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testOpenJob() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
|
@ -143,7 +195,6 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
//end::x-pack-ml-open-job-execute
|
||||
|
||||
}
|
||||
|
||||
{
|
||||
//tag::x-pack-ml-open-job-listener
|
||||
ActionListener<OpenJobResponse> listener = new ActionListener<OpenJobResponse>() {
|
||||
|
@ -154,7 +205,7 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
//<2>
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
//end::x-pack-ml-open-job-listener
|
||||
|
@ -169,6 +220,5 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -41,6 +41,9 @@ integTestCluster {
|
|||
// TODO: remove this for 7.0, this exists to allow the doc examples in 6.x to continue using the defaults
|
||||
systemProperty 'es.scripting.use_java_time', 'false'
|
||||
systemProperty 'es.scripting.update.ctx_in_params', 'false'
|
||||
|
||||
// TODO: remove this deprecation compatibility setting for 7.0
|
||||
systemProperty 'es.aggregations.enable_scripted_metric_agg_param', 'false'
|
||||
}
|
||||
|
||||
// remove when https://github.com/elastic/elasticsearch/issues/31305 is fixed
|
||||
|
@ -400,25 +403,25 @@ buildRestTests.setups['stored_scripted_metric_script'] = '''
|
|||
- do:
|
||||
put_script:
|
||||
id: "my_init_script"
|
||||
body: { "script": { "lang": "painless", "source": "params._agg.transactions = []" } }
|
||||
body: { "script": { "lang": "painless", "source": "state.transactions = []" } }
|
||||
- match: { acknowledged: true }
|
||||
|
||||
- do:
|
||||
put_script:
|
||||
id: "my_map_script"
|
||||
body: { "script": { "lang": "painless", "source": "params._agg.transactions.add(doc.type.value == 'sale' ? doc.amount.value : -1 * doc.amount.value)" } }
|
||||
body: { "script": { "lang": "painless", "source": "state.transactions.add(doc.type.value == 'sale' ? doc.amount.value : -1 * doc.amount.value)" } }
|
||||
- match: { acknowledged: true }
|
||||
|
||||
- do:
|
||||
put_script:
|
||||
id: "my_combine_script"
|
||||
body: { "script": { "lang": "painless", "source": "double profit = 0;for (t in params._agg.transactions) { profit += t; } return profit" } }
|
||||
body: { "script": { "lang": "painless", "source": "double profit = 0;for (t in state.transactions) { profit += t; } return profit" } }
|
||||
- match: { acknowledged: true }
|
||||
|
||||
- do:
|
||||
put_script:
|
||||
id: "my_reduce_script"
|
||||
body: { "script": { "lang": "painless", "source": "double profit = 0;for (a in params._aggs) { profit += a; } return profit" } }
|
||||
body: { "script": { "lang": "painless", "source": "double profit = 0;for (a in states) { profit += a; } return profit" } }
|
||||
- match: { acknowledged: true }
|
||||
'''
|
||||
|
||||
|
|
|
@ -0,0 +1,49 @@
|
|||
[[java-rest-high-x-pack-ml-delete-job]]
|
||||
=== Delete Job API
|
||||
|
||||
[[java-rest-high-x-pack-machine-learning-delete-job-request]]
|
||||
==== Delete Job Request
|
||||
|
||||
A `DeleteJobRequest` object requires a non-null `jobId` and can optionally set `force`.
|
||||
Can be executed as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
---------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-delete-ml-job-request]
|
||||
---------------------------------------------------
|
||||
<1> Use to forcefully delete an opened job;
|
||||
this method is quicker than closing and deleting the job.
|
||||
Defaults to `false`
|
||||
|
||||
[[java-rest-high-x-pack-machine-learning-delete-job-response]]
|
||||
==== Delete Job Response
|
||||
|
||||
The returned `DeleteJobResponse` object indicates the acknowledgement of the request:
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
---------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-delete-ml-job-response]
|
||||
---------------------------------------------------
|
||||
<1> `isAcknowledged` was the deletion request acknowledged or not
|
||||
|
||||
[[java-rest-high-x-pack-machine-learning-delete-job-async]]
|
||||
==== Delete Job Asynchronously
|
||||
|
||||
This request can also be made asynchronously.
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
---------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-delete-ml-job-request-async]
|
||||
---------------------------------------------------
|
||||
<1> The `DeleteJobRequest` to execute and the `ActionListener` to alert on completion or error.
|
||||
|
||||
The deletion request returns immediately. Once the request is completed, the `ActionListener` is
|
||||
called back using the `onResponse` or `onFailure`. The latter indicates some failure occurred when
|
||||
making the request.
|
||||
|
||||
A typical listener for a `DeleteJobRequest` could be defined as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
---------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-delete-ml-job-request-listener]
|
||||
---------------------------------------------------
|
||||
<1> The action to be taken when it is completed
|
||||
<2> What to do when a failure occurs
|
|
@ -205,9 +205,11 @@ include::licensing/delete-license.asciidoc[]
|
|||
The Java High Level REST Client supports the following Machine Learning APIs:
|
||||
|
||||
* <<java-rest-high-x-pack-ml-put-job>>
|
||||
* <<java-rest-high-x-pack-ml-delete-job>>
|
||||
* <<java-rest-high-x-pack-ml-open-job>>
|
||||
|
||||
include::ml/put-job.asciidoc[]
|
||||
include::ml/delete-job.asciidoc[]
|
||||
include::ml/open-job.asciidoc[]
|
||||
|
||||
== Migration APIs
|
||||
|
|
|
@ -17,14 +17,11 @@ Integrations are not plugins, but are external tools or modules that make it eas
|
|||
* https://drupal.org/project/elasticsearch_connector[Drupal]:
|
||||
Drupal Elasticsearch integration.
|
||||
|
||||
* https://wordpress.org/plugins/wpsolr-search-engine/[WPSOLR]:
|
||||
Elasticsearch (and Apache Solr) WordPress Plugin
|
||||
|
||||
* http://searchbox-io.github.com/wp-elasticsearch/[Wp-Elasticsearch]:
|
||||
* https://wordpress.org/plugins/elasticpress/[ElasticPress]:
|
||||
Elasticsearch WordPress Plugin
|
||||
|
||||
* https://github.com/wallmanderco/elasticsearch-indexer[Elasticsearch Indexer]:
|
||||
Elasticsearch WordPress Plugin
|
||||
* https://wordpress.org/plugins/wpsolr-search-engine/[WPSOLR]:
|
||||
Elasticsearch (and Apache Solr) WordPress Plugin
|
||||
|
||||
* https://doc.tiki.org/Elasticsearch[Tiki Wiki CMS Groupware]:
|
||||
Tiki has native support for Elasticsearch. This provides faster & better
|
||||
|
|
|
@ -47,7 +47,7 @@ POST test/_doc/1/_update
|
|||
// TEST[continued]
|
||||
|
||||
We can add a tag to the list of tags (note, if the tag exists, it
|
||||
will still add it, since its a list):
|
||||
will still add it, since it's a list):
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
@ -65,6 +65,28 @@ POST test/_doc/1/_update
|
|||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
We can remove a tag from the list of tags. Note that the Painless function to
|
||||
`remove` a tag takes as its parameter the array index of the element you wish
|
||||
to remove, so you need a bit more logic to locate it while avoiding a runtime
|
||||
error. Note that if the tag was present more than once in the list, this will
|
||||
remove only one occurrence of it:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST test/_doc/1/_update
|
||||
{
|
||||
"script" : {
|
||||
"source": "if (ctx._source.tags.contains(params.tag)) { ctx._source.tags.remove(ctx._source.tags.indexOf(params.tag)) }",
|
||||
"lang": "painless",
|
||||
"params" : {
|
||||
"tag" : "blue"
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
In addition to `_source`, the following variables are available through
|
||||
the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`
|
||||
and `_now` (the current timestamp).
|
||||
|
@ -172,7 +194,7 @@ the request was ignored.
|
|||
"_index": "test",
|
||||
"_type": "_doc",
|
||||
"_id": "1",
|
||||
"_version": 6,
|
||||
"_version": 7,
|
||||
"result": "noop"
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
|
|
@ -93,7 +93,8 @@ Replication is important for two primary reasons:
|
|||
|
||||
|
||||
To summarize, each index can be split into multiple shards. An index can also be replicated zero (meaning no replicas) or more times. Once replicated, each index will have primary shards (the original shards that were replicated from) and replica shards (the copies of the primary shards).
|
||||
The number of shards and replicas can be defined per index at the time the index is created. After the index is created, you may change the number of replicas dynamically anytime but you cannot change the number of shards after-the-fact.
|
||||
|
||||
The number of shards and replicas can be defined per index at the time the index is created. After the index is created, you may also change the number of replicas dynamically anytime. You can change the number of shards for an existing index using the {ref}/indices-shrink-index.html[`_shrink`] and {ref}/indices-split-index.html[`_split`] APIs, however this is not a trivial task and pre-planning for the correct number of shards is the optimal approach.
|
||||
|
||||
By default, each index in Elasticsearch is allocated one primary shard and one replica which means that if you have at least two nodes in your cluster, your index will have one primary shard and another replica shard (one complete replica) for a total of two shards per index.
|
||||
|
||||
|
|
|
@ -92,6 +92,9 @@ deprecated in 6.x, has been removed. Context enabled suggestion queries
|
|||
without contexts have to visit every suggestion, which degrades the search performance
|
||||
considerably.
|
||||
|
||||
For geo context the value of the `path` parameter is now validated against the mapping,
|
||||
and the context is only accepted if `path` points to a field with `geo_point` type.
|
||||
|
||||
==== Semantics changed for `max_concurrent_shard_requests`
|
||||
|
||||
`max_concurrent_shard_requests` used to limit the total number of concurrent shard
|
||||
|
|
|
@ -503,3 +503,31 @@ guide to the {painless}/index.html[Painless Scripting Language].
|
|||
|
||||
See the {painless}/painless-api-reference.html[Painless API Reference] in
|
||||
the guide to the {painless}/index.html[Painless Scripting Language].
|
||||
|
||||
[role="exclude", id="security-api-roles"]
|
||||
=== Role management APIs
|
||||
|
||||
You can use the following APIs to add, remove, and retrieve roles in the native realm:
|
||||
|
||||
* <<security-api-put-role,Create role>>, <<security-api-delete-role,Delete role>>
|
||||
* <<security-api-clear-role-cache,Clear roles cache>>
|
||||
* <<security-api-get-role,Get roles>>
|
||||
|
||||
[role="exclude",id="security-api-tokens"]
|
||||
=== Token management APIs
|
||||
|
||||
You can use the following APIs to create and invalidate bearer tokens for access
|
||||
without requiring basic authentication:
|
||||
|
||||
* <<security-api-get-token,Get token>>, <<security-api-invalidate-token,Invalidate token>>
|
||||
|
||||
[role="exclude",id="security-api-users"]
|
||||
=== User Management APIs
|
||||
|
||||
You can use the following APIs to create, read, update, and delete users from the
|
||||
native realm:
|
||||
|
||||
* <<security-api-put-user,Create users>>, <<security-api-delete-user,Delete users>>
|
||||
* <<security-api-enable-user,Enable users>>, <<security-api-disable-user,Disable users>>
|
||||
* <<security-api-change-password,Change passwords>>
|
||||
* <<security-api-get-user,Get users>>
|
||||
|
|
|
@ -90,7 +90,8 @@ And here is a sample response:
|
|||
|
||||
Set to `false` to return an overall failure if the request would produce partial
|
||||
results. Defaults to true, which will allow partial results in the case of timeouts
|
||||
or partial failures.
|
||||
or partial failures. This default can be controlled using the cluster-level setting
|
||||
`search.default_allow_partial_results`.
|
||||
|
||||
`terminate_after`::
|
||||
|
||||
|
|
|
@ -125,5 +125,6 @@ more details on the different types of search that can be performed.
|
|||
|
||||
|`allow_partial_search_results` |Set to `false` to return an overall failure if the request would produce
|
||||
partial results. Defaults to true, which will allow partial results in the case of timeouts
|
||||
or partial failures..
|
||||
or partial failures. This default can be controlled using the cluster-level setting
|
||||
`search.default_allow_partial_results`.
|
||||
|=======================================================================
|
||||
|
|
|
@ -8,8 +8,8 @@ distributions, and the `data` directory under the root of the
|
|||
Elasticsearch installation for the <<zip-targz,tar and zip>> archive
|
||||
distributions). If this path is not suitable for receiving heap dumps,
|
||||
you should modify the entry `-XX:HeapDumpPath=...` in
|
||||
<<jvm-options,`jvm.options`>>. If you specify a fixed filename instead
|
||||
of a directory, the JVM will repeatedly use the same file; this is one
|
||||
mechanism for preventing heap dumps from accumulating in the heap dump
|
||||
path. Alternatively, you can configure a scheduled task via your OS to
|
||||
remove heap dumps that are older than a configured age.
|
||||
<<jvm-options,`jvm.options`>>. If you specify a directory, the JVM
|
||||
will generate a filename for the heap dump based on the PID of the running
|
||||
instance. If you specify a fixed filename instead of a directory, the file must
|
||||
not exist when the JVM needs to perform a heap dump on an out of memory
|
||||
exception, otherwise the heap dump will fail.
|
||||
|
|
|
@ -9,7 +9,7 @@ location on a single node. This can be useful for testing Elasticsearch's
|
|||
ability to form clusters, but it is not a configuration recommended for
|
||||
production.
|
||||
|
||||
In order to communicate and to form a cluster with nodes on other servers, your
|
||||
In order to form a cluster with nodes on other servers, your
|
||||
node will need to bind to a non-loopback address. While there are many
|
||||
<<modules-network,network settings>>, usually all you need to configure is
|
||||
`network.host`:
|
||||
|
|
|
@ -0,0 +1,150 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.CharBuffer;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Arrays;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Helper class similar to Arrays to handle conversions for Char arrays
|
||||
*/
|
||||
public final class CharArrays {
|
||||
|
||||
private CharArrays() {}
|
||||
|
||||
/**
|
||||
* Decodes the provided byte[] to a UTF-8 char[]. This is done while avoiding
|
||||
* conversions to String. The provided byte[] is not modified by this method, so
|
||||
* the caller needs to take care of clearing the value if it is sensitive.
|
||||
*/
|
||||
public static char[] utf8BytesToChars(byte[] utf8Bytes) {
|
||||
final ByteBuffer byteBuffer = ByteBuffer.wrap(utf8Bytes);
|
||||
final CharBuffer charBuffer = StandardCharsets.UTF_8.decode(byteBuffer);
|
||||
final char[] chars;
|
||||
if (charBuffer.hasArray()) {
|
||||
// there is no guarantee that the char buffers backing array is the right size
|
||||
// so we need to make a copy
|
||||
chars = Arrays.copyOfRange(charBuffer.array(), charBuffer.position(), charBuffer.limit());
|
||||
Arrays.fill(charBuffer.array(), (char) 0); // clear sensitive data
|
||||
} else {
|
||||
final int length = charBuffer.limit() - charBuffer.position();
|
||||
chars = new char[length];
|
||||
charBuffer.get(chars);
|
||||
// if the buffer is not read only we can reset and fill with 0's
|
||||
if (charBuffer.isReadOnly() == false) {
|
||||
charBuffer.clear(); // reset
|
||||
for (int i = 0; i < charBuffer.limit(); i++) {
|
||||
charBuffer.put((char) 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
return chars;
|
||||
}
|
||||
|
||||
/**
|
||||
* Encodes the provided char[] to a UTF-8 byte[]. This is done while avoiding
|
||||
* conversions to String. The provided char[] is not modified by this method, so
|
||||
* the caller needs to take care of clearing the value if it is sensitive.
|
||||
*/
|
||||
public static byte[] toUtf8Bytes(char[] chars) {
|
||||
final CharBuffer charBuffer = CharBuffer.wrap(chars);
|
||||
final ByteBuffer byteBuffer = StandardCharsets.UTF_8.encode(charBuffer);
|
||||
final byte[] bytes;
|
||||
if (byteBuffer.hasArray()) {
|
||||
// there is no guarantee that the byte buffers backing array is the right size
|
||||
// so we need to make a copy
|
||||
bytes = Arrays.copyOfRange(byteBuffer.array(), byteBuffer.position(), byteBuffer.limit());
|
||||
Arrays.fill(byteBuffer.array(), (byte) 0); // clear sensitive data
|
||||
} else {
|
||||
final int length = byteBuffer.limit() - byteBuffer.position();
|
||||
bytes = new byte[length];
|
||||
byteBuffer.get(bytes);
|
||||
// if the buffer is not read only we can reset and fill with 0's
|
||||
if (byteBuffer.isReadOnly() == false) {
|
||||
byteBuffer.clear(); // reset
|
||||
for (int i = 0; i < byteBuffer.limit(); i++) {
|
||||
byteBuffer.put((byte) 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
return bytes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests if a char[] contains a sequence of characters that match the prefix. This is like
|
||||
* {@link String#startsWith(String)} but does not require conversion of the char[] to a string.
|
||||
*/
|
||||
public static boolean charsBeginsWith(String prefix, char[] chars) {
|
||||
if (chars == null || prefix == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (prefix.length() > chars.length) {
|
||||
return false;
|
||||
}
|
||||
|
||||
for (int i = 0; i < prefix.length(); i++) {
|
||||
if (chars[i] != prefix.charAt(i)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Constant time equality check of char arrays to avoid potential timing attacks.
|
||||
*/
|
||||
public static boolean constantTimeEquals(char[] a, char[] b) {
|
||||
Objects.requireNonNull(a, "char arrays must not be null for constantTimeEquals");
|
||||
Objects.requireNonNull(b, "char arrays must not be null for constantTimeEquals");
|
||||
if (a.length != b.length) {
|
||||
return false;
|
||||
}
|
||||
|
||||
int equals = 0;
|
||||
for (int i = 0; i < a.length; i++) {
|
||||
equals |= a[i] ^ b[i];
|
||||
}
|
||||
|
||||
return equals == 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Constant time equality check of strings to avoid potential timing attacks.
|
||||
*/
|
||||
public static boolean constantTimeEquals(String a, String b) {
|
||||
Objects.requireNonNull(a, "strings must not be null for constantTimeEquals");
|
||||
Objects.requireNonNull(b, "strings must not be null for constantTimeEquals");
|
||||
if (a.length() != b.length()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
int equals = 0;
|
||||
for (int i = 0; i < a.length(); i++) {
|
||||
equals |= a.charAt(i) ^ b.charAt(i);
|
||||
}
|
||||
|
||||
return equals == 0;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,75 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
|
||||
public class CharArraysTests extends ESTestCase {
|
||||
|
||||
public void testCharsToBytes() {
|
||||
final String originalValue = randomUnicodeOfCodepointLengthBetween(0, 32);
|
||||
final byte[] expectedBytes = originalValue.getBytes(StandardCharsets.UTF_8);
|
||||
final char[] valueChars = originalValue.toCharArray();
|
||||
|
||||
final byte[] convertedBytes = CharArrays.toUtf8Bytes(valueChars);
|
||||
assertArrayEquals(expectedBytes, convertedBytes);
|
||||
}
|
||||
|
||||
public void testBytesToUtf8Chars() {
|
||||
final String originalValue = randomUnicodeOfCodepointLengthBetween(0, 32);
|
||||
final byte[] bytes = originalValue.getBytes(StandardCharsets.UTF_8);
|
||||
final char[] expectedChars = originalValue.toCharArray();
|
||||
|
||||
final char[] convertedChars = CharArrays.utf8BytesToChars(bytes);
|
||||
assertArrayEquals(expectedChars, convertedChars);
|
||||
}
|
||||
|
||||
public void testCharsBeginsWith() {
|
||||
assertFalse(CharArrays.charsBeginsWith(randomAlphaOfLength(4), null));
|
||||
assertFalse(CharArrays.charsBeginsWith(null, null));
|
||||
assertFalse(CharArrays.charsBeginsWith(null, randomAlphaOfLength(4).toCharArray()));
|
||||
assertFalse(CharArrays.charsBeginsWith(randomAlphaOfLength(2), randomAlphaOfLengthBetween(3, 8).toCharArray()));
|
||||
|
||||
final String prefix = randomAlphaOfLengthBetween(2, 4);
|
||||
assertTrue(CharArrays.charsBeginsWith(prefix, prefix.toCharArray()));
|
||||
final char[] prefixedValue = prefix.concat(randomAlphaOfLengthBetween(1, 12)).toCharArray();
|
||||
assertTrue(CharArrays.charsBeginsWith(prefix, prefixedValue));
|
||||
|
||||
final String modifiedPrefix = randomBoolean() ? prefix.substring(1) : prefix.substring(0, prefix.length() - 1);
|
||||
char[] nonMatchingValue;
|
||||
do {
|
||||
nonMatchingValue = modifiedPrefix.concat(randomAlphaOfLengthBetween(0, 12)).toCharArray();
|
||||
} while (new String(nonMatchingValue).startsWith(prefix));
|
||||
assertFalse(CharArrays.charsBeginsWith(prefix, nonMatchingValue));
|
||||
assertTrue(CharArrays.charsBeginsWith(modifiedPrefix, nonMatchingValue));
|
||||
}
|
||||
|
||||
public void testConstantTimeEquals() {
|
||||
final String value = randomAlphaOfLengthBetween(0, 32);
|
||||
assertTrue(CharArrays.constantTimeEquals(value, value));
|
||||
assertTrue(CharArrays.constantTimeEquals(value.toCharArray(), value.toCharArray()));
|
||||
|
||||
final String other = randomAlphaOfLengthBetween(1, 32);
|
||||
assertFalse(CharArrays.constantTimeEquals(value, other));
|
||||
assertFalse(CharArrays.constantTimeEquals(value.toCharArray(), other.toCharArray()));
|
||||
}
|
||||
}
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.painless;
|
|||
|
||||
import org.elasticsearch.painless.lookup.PainlessLookup;
|
||||
import org.elasticsearch.painless.lookup.PainlessLookupUtility;
|
||||
import org.elasticsearch.painless.lookup.def;
|
||||
|
||||
import java.lang.invoke.MethodType;
|
||||
import java.lang.reflect.Field;
|
||||
|
@ -190,7 +191,7 @@ public class ScriptClassInfo {
|
|||
componentType = componentType.getComponentType();
|
||||
}
|
||||
|
||||
if (painlessLookup.lookupPainlessClass(componentType) == null) {
|
||||
if (componentType != def.class && painlessLookup.lookupPainlessClass(componentType) == null) {
|
||||
throw new IllegalArgumentException(unknownErrorMessageSource.apply(componentType));
|
||||
}
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@ import java.util.Objects;
|
|||
import java.util.Set;
|
||||
import java.util.function.Function;
|
||||
|
||||
import static org.elasticsearch.painless.lookup.PainlessLookupUtility.DEF_CLASS_NAME;
|
||||
import static org.elasticsearch.painless.lookup.PainlessLookupUtility.buildPainlessConstructorKey;
|
||||
import static org.elasticsearch.painless.lookup.PainlessLookupUtility.buildPainlessFieldKey;
|
||||
import static org.elasticsearch.painless.lookup.PainlessLookupUtility.buildPainlessMethodKey;
|
||||
|
@ -47,7 +48,7 @@ public final class PainlessLookup {
|
|||
public boolean isValidCanonicalClassName(String canonicalClassName) {
|
||||
Objects.requireNonNull(canonicalClassName);
|
||||
|
||||
return canonicalClassNamesToClasses.containsKey(canonicalClassName);
|
||||
return DEF_CLASS_NAME.equals(canonicalClassName) || canonicalClassNamesToClasses.containsKey(canonicalClassName);
|
||||
}
|
||||
|
||||
public Class<?> canonicalTypeNameToType(String canonicalTypeName) {
|
||||
|
|
|
@ -211,9 +211,6 @@ public final class PainlessLookupBuilder {
|
|||
public PainlessLookupBuilder() {
|
||||
canonicalClassNamesToClasses = new HashMap<>();
|
||||
classesToPainlessClassBuilders = new HashMap<>();
|
||||
|
||||
canonicalClassNamesToClasses.put(DEF_CLASS_NAME, def.class);
|
||||
classesToPainlessClassBuilders.put(def.class, new PainlessClassBuilder());
|
||||
}
|
||||
|
||||
private Class<?> canonicalTypeNameToType(String canonicalTypeName) {
|
||||
|
@ -225,7 +222,7 @@ public final class PainlessLookupBuilder {
|
|||
type = type.getComponentType();
|
||||
}
|
||||
|
||||
return classesToPainlessClassBuilders.containsKey(type);
|
||||
return type == def.class || classesToPainlessClassBuilders.containsKey(type);
|
||||
}
|
||||
|
||||
public void addPainlessClass(ClassLoader classLoader, String javaClassName, boolean importClassName) {
|
||||
|
|
|
@ -82,7 +82,7 @@ public final class PainlessLookupUtility {
|
|||
Objects.requireNonNull(canonicalTypeName);
|
||||
Objects.requireNonNull(canonicalClassNamesToClasses);
|
||||
|
||||
Class<?> type = canonicalClassNamesToClasses.get(canonicalTypeName);
|
||||
Class<?> type = DEF_CLASS_NAME.equals(canonicalTypeName) ? def.class : canonicalClassNamesToClasses.get(canonicalTypeName);
|
||||
|
||||
if (type != null) {
|
||||
return type;
|
||||
|
@ -105,7 +105,7 @@ public final class PainlessLookupUtility {
|
|||
}
|
||||
|
||||
canonicalTypeName = canonicalTypeName.substring(0, canonicalTypeName.indexOf('['));
|
||||
type = canonicalClassNamesToClasses.get(canonicalTypeName);
|
||||
type = DEF_CLASS_NAME.equals(canonicalTypeName) ? def.class : canonicalClassNamesToClasses.get(canonicalTypeName);
|
||||
|
||||
if (type != null) {
|
||||
char arrayBraces[] = new char[arrayDimensions];
|
||||
|
|
|
@ -16,13 +16,14 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
apply plugin: 'elasticsearch.esplugin'
|
||||
|
||||
esplugin {
|
||||
name 'custom-settings'
|
||||
description 'An example plugin showing how to register custom settings'
|
||||
classname 'org.elasticsearch.example.customsettings.ExampleCustomSettingsPlugin'
|
||||
licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt')
|
||||
noticeFile rootProject.file('NOTICE.txt')
|
||||
}
|
||||
|
||||
integTestCluster {
|
||||
|
|
|
@ -23,6 +23,8 @@ esplugin {
|
|||
name 'custom-suggester'
|
||||
description 'An example plugin showing how to write and register a custom suggester'
|
||||
classname 'org.elasticsearch.example.customsuggester.CustomSuggesterPlugin'
|
||||
licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt')
|
||||
noticeFile rootProject.file('NOTICE.txt')
|
||||
}
|
||||
|
||||
integTestCluster {
|
||||
|
@ -30,4 +32,4 @@ integTestCluster {
|
|||
}
|
||||
|
||||
// this plugin has no unit tests, only rest tests
|
||||
tasks.test.enabled = false
|
||||
tasks.test.enabled = false
|
||||
|
|
|
@ -16,7 +16,6 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
apply plugin: 'elasticsearch.esplugin'
|
||||
|
||||
esplugin {
|
||||
|
@ -24,10 +23,12 @@ esplugin {
|
|||
description 'An example whitelisting additional classes and methods in painless'
|
||||
classname 'org.elasticsearch.example.painlesswhitelist.MyWhitelistPlugin'
|
||||
extendedPlugins = ['lang-painless']
|
||||
licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt')
|
||||
noticeFile rootProject.file('NOTICE.txt')
|
||||
}
|
||||
|
||||
dependencies {
|
||||
compileOnly project(':modules:lang-painless')
|
||||
compileOnly "org.elasticsearch.plugin:elasticsearch-scripting-painless-spi:${versions.elasticsearch}"
|
||||
}
|
||||
|
||||
if (System.getProperty('tests.distribution') == null) {
|
||||
|
|
|
@ -16,11 +16,13 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
apply plugin: 'elasticsearch.esplugin'
|
||||
|
||||
esplugin {
|
||||
name 'example-rescore'
|
||||
description 'An example plugin implementing rescore and verifying that plugins *can* implement rescore'
|
||||
classname 'org.elasticsearch.example.rescore.ExampleRescorePlugin'
|
||||
licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt')
|
||||
noticeFile rootProject.file('NOTICE.txt')
|
||||
}
|
||||
|
||||
|
|
|
@ -16,13 +16,14 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
apply plugin: 'elasticsearch.esplugin'
|
||||
|
||||
esplugin {
|
||||
name 'rest-handler'
|
||||
description 'An example plugin showing how to register a REST handler'
|
||||
classname 'org.elasticsearch.example.resthandler.ExampleRestHandlerPlugin'
|
||||
licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt')
|
||||
noticeFile rootProject.file('NOTICE.txt')
|
||||
}
|
||||
|
||||
// No unit tests in this example
|
||||
|
@ -40,4 +41,4 @@ integTestCluster {
|
|||
}
|
||||
integTestRunner {
|
||||
systemProperty 'external.address', "${ -> exampleFixture.addressAndPort }"
|
||||
}
|
||||
}
|
|
@ -16,13 +16,15 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
apply plugin: 'elasticsearch.esplugin'
|
||||
|
||||
esplugin {
|
||||
name 'script-expert-scoring'
|
||||
description 'An example script engine to use low level Lucene internals for expert scoring'
|
||||
classname 'org.elasticsearch.example.expertscript.ExpertScriptPlugin'
|
||||
licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt')
|
||||
noticeFile rootProject.file('NOTICE.txt')
|
||||
}
|
||||
|
||||
test.enabled = false
|
||||
|
||||
|
|
|
@ -342,3 +342,15 @@ if (isEclipse == false || project.path == ":server-tests") {
|
|||
integTest.mustRunAfter test
|
||||
}
|
||||
|
||||
// TODO: remove these compatibility tests in 7.0
|
||||
additionalTest('testScriptedMetricAggParamsV6Compatibility') {
|
||||
include '**/ScriptedMetricAggregatorAggStateV6CompatTests.class'
|
||||
include '**/InternalScriptedMetricAggStateV6CompatTests.class'
|
||||
systemProperty 'es.aggregations.enable_scripted_metric_agg_param', 'true'
|
||||
}
|
||||
|
||||
test {
|
||||
// these are tested explicitly in separate test tasks
|
||||
exclude '**/ScriptedMetricAggregatorAggStateV6CompatTests.class'
|
||||
exclude '**/InternalScriptedMetricAggStateV6CompatTests.class'
|
||||
}
|
||||
|
|
|
@ -19,142 +19,22 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.node.reload;
|
||||
|
||||
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.support.nodes.BaseNodesRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.SecureString;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.CharBuffer;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Arrays;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
|
||||
/**
|
||||
* Request for a reload secure settings action
|
||||
* Request for a reload secure settings action.
|
||||
*/
|
||||
public class NodesReloadSecureSettingsRequest extends BaseNodesRequest<NodesReloadSecureSettingsRequest> {
|
||||
|
||||
/**
|
||||
* The password which is broadcasted to all nodes, but is never stored on
|
||||
* persistent storage. The password is used to reread and decrypt the contents
|
||||
* of the node's keystore (backing the implementation of
|
||||
* {@code SecureSettings}).
|
||||
*/
|
||||
private SecureString secureSettingsPassword;
|
||||
|
||||
public NodesReloadSecureSettingsRequest() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Reload secure settings only on certain nodes, based on the nodes ids
|
||||
* specified. If none are passed, secure settings will be reloaded on all the
|
||||
* nodes.
|
||||
* Reload secure settings only on certain nodes, based on the nodes IDs specified. If none are passed, secure settings will be reloaded
|
||||
* on all the nodes.
|
||||
*/
|
||||
public NodesReloadSecureSettingsRequest(String... nodesIds) {
|
||||
public NodesReloadSecureSettingsRequest(final String... nodesIds) {
|
||||
super(nodesIds);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
ActionRequestValidationException validationException = null;
|
||||
if (secureSettingsPassword == null) {
|
||||
validationException = addValidationError("secure settings password cannot be null (use empty string instead)",
|
||||
validationException);
|
||||
}
|
||||
return validationException;
|
||||
}
|
||||
|
||||
public SecureString secureSettingsPassword() {
|
||||
return secureSettingsPassword;
|
||||
}
|
||||
|
||||
public NodesReloadSecureSettingsRequest secureStorePassword(SecureString secureStorePassword) {
|
||||
this.secureSettingsPassword = secureStorePassword;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
final byte[] passwordBytes = in.readByteArray();
|
||||
try {
|
||||
this.secureSettingsPassword = new SecureString(utf8BytesToChars(passwordBytes));
|
||||
} finally {
|
||||
Arrays.fill(passwordBytes, (byte) 0);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
final byte[] passwordBytes = charsToUtf8Bytes(this.secureSettingsPassword.getChars());
|
||||
try {
|
||||
out.writeByteArray(passwordBytes);
|
||||
} finally {
|
||||
Arrays.fill(passwordBytes, (byte) 0);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Encodes the provided char[] to a UTF-8 byte[]. This is done while avoiding
|
||||
* conversions to String. The provided char[] is not modified by this method, so
|
||||
* the caller needs to take care of clearing the value if it is sensitive.
|
||||
*/
|
||||
private static byte[] charsToUtf8Bytes(char[] chars) {
|
||||
final CharBuffer charBuffer = CharBuffer.wrap(chars);
|
||||
final ByteBuffer byteBuffer = StandardCharsets.UTF_8.encode(charBuffer);
|
||||
final byte[] bytes;
|
||||
if (byteBuffer.hasArray()) {
|
||||
// there is no guarantee that the byte buffers backing array is the right size
|
||||
// so we need to make a copy
|
||||
bytes = Arrays.copyOfRange(byteBuffer.array(), byteBuffer.position(), byteBuffer.limit());
|
||||
Arrays.fill(byteBuffer.array(), (byte) 0); // clear sensitive data
|
||||
} else {
|
||||
final int length = byteBuffer.limit() - byteBuffer.position();
|
||||
bytes = new byte[length];
|
||||
byteBuffer.get(bytes);
|
||||
// if the buffer is not read only we can reset and fill with 0's
|
||||
if (byteBuffer.isReadOnly() == false) {
|
||||
byteBuffer.clear(); // reset
|
||||
for (int i = 0; i < byteBuffer.limit(); i++) {
|
||||
byteBuffer.put((byte) 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
return bytes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Decodes the provided byte[] to a UTF-8 char[]. This is done while avoiding
|
||||
* conversions to String. The provided byte[] is not modified by this method, so
|
||||
* the caller needs to take care of clearing the value if it is sensitive.
|
||||
*/
|
||||
public static char[] utf8BytesToChars(byte[] utf8Bytes) {
|
||||
final ByteBuffer byteBuffer = ByteBuffer.wrap(utf8Bytes);
|
||||
final CharBuffer charBuffer = StandardCharsets.UTF_8.decode(byteBuffer);
|
||||
final char[] chars;
|
||||
if (charBuffer.hasArray()) {
|
||||
// there is no guarantee that the char buffers backing array is the right size
|
||||
// so we need to make a copy
|
||||
chars = Arrays.copyOfRange(charBuffer.array(), charBuffer.position(), charBuffer.limit());
|
||||
Arrays.fill(charBuffer.array(), (char) 0); // clear sensitive data
|
||||
} else {
|
||||
final int length = charBuffer.limit() - charBuffer.position();
|
||||
chars = new char[length];
|
||||
charBuffer.get(chars);
|
||||
// if the buffer is not read only we can reset and fill with 0's
|
||||
if (charBuffer.isReadOnly() == false) {
|
||||
charBuffer.clear(); // reset
|
||||
for (int i = 0; i < charBuffer.limit(); i++) {
|
||||
charBuffer.put((char) 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
return chars;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,19 +19,8 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.node.reload;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.settings.SecureString;
|
||||
import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Builder for the reload secure settings nodes request
|
||||
|
@ -39,46 +28,8 @@ import java.util.Objects;
|
|||
public class NodesReloadSecureSettingsRequestBuilder extends NodesOperationRequestBuilder<NodesReloadSecureSettingsRequest,
|
||||
NodesReloadSecureSettingsResponse, NodesReloadSecureSettingsRequestBuilder> {
|
||||
|
||||
public static final String SECURE_SETTINGS_PASSWORD_FIELD_NAME = "secure_settings_password";
|
||||
|
||||
public NodesReloadSecureSettingsRequestBuilder(ElasticsearchClient client, NodesReloadSecureSettingsAction action) {
|
||||
super(client, action, new NodesReloadSecureSettingsRequest());
|
||||
}
|
||||
|
||||
public NodesReloadSecureSettingsRequestBuilder setSecureStorePassword(SecureString secureStorePassword) {
|
||||
request.secureStorePassword(secureStorePassword);
|
||||
return this;
|
||||
}
|
||||
|
||||
public NodesReloadSecureSettingsRequestBuilder source(BytesReference source, XContentType xContentType) throws IOException {
|
||||
Objects.requireNonNull(xContentType);
|
||||
// EMPTY is ok here because we never call namedObject
|
||||
try (InputStream stream = source.streamInput();
|
||||
XContentParser parser = xContentType.xContent().createParser(NamedXContentRegistry.EMPTY,
|
||||
LoggingDeprecationHandler.INSTANCE, stream)) {
|
||||
XContentParser.Token token;
|
||||
token = parser.nextToken();
|
||||
if (token != XContentParser.Token.START_OBJECT) {
|
||||
throw new ElasticsearchParseException("expected an object, but found token [{}]", token);
|
||||
}
|
||||
token = parser.nextToken();
|
||||
if (token != XContentParser.Token.FIELD_NAME || false == SECURE_SETTINGS_PASSWORD_FIELD_NAME.equals(parser.currentName())) {
|
||||
throw new ElasticsearchParseException("expected a field named [{}], but found [{}]", SECURE_SETTINGS_PASSWORD_FIELD_NAME,
|
||||
token);
|
||||
}
|
||||
token = parser.nextToken();
|
||||
if (token != XContentParser.Token.VALUE_STRING) {
|
||||
throw new ElasticsearchParseException("expected field [{}] to be of type string, but found [{}] instead",
|
||||
SECURE_SETTINGS_PASSWORD_FIELD_NAME, token);
|
||||
}
|
||||
final String password = parser.text();
|
||||
setSecureStorePassword(new SecureString(password.toCharArray()));
|
||||
token = parser.nextToken();
|
||||
if (token != XContentParser.Token.END_OBJECT) {
|
||||
throw new ElasticsearchParseException("expected end of object, but found token [{}]", token);
|
||||
}
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -31,7 +31,6 @@ import org.elasticsearch.common.inject.Inject;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.KeyStoreWrapper;
|
||||
import org.elasticsearch.common.settings.SecureString;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.plugins.PluginsService;
|
||||
|
@ -82,16 +81,13 @@ public class TransportNodesReloadSecureSettingsAction extends TransportNodesActi
|
|||
|
||||
@Override
|
||||
protected NodesReloadSecureSettingsResponse.NodeResponse nodeOperation(NodeRequest nodeReloadRequest) {
|
||||
final NodesReloadSecureSettingsRequest request = nodeReloadRequest.request;
|
||||
final SecureString secureSettingsPassword = request.secureSettingsPassword();
|
||||
try (KeyStoreWrapper keystore = KeyStoreWrapper.load(environment.configFile())) {
|
||||
// reread keystore from config file
|
||||
if (keystore == null) {
|
||||
return new NodesReloadSecureSettingsResponse.NodeResponse(clusterService.localNode(),
|
||||
new IllegalStateException("Keystore is missing"));
|
||||
}
|
||||
// decrypt the keystore using the password from the request
|
||||
keystore.decrypt(secureSettingsPassword.getChars());
|
||||
keystore.decrypt(new char[0]);
|
||||
// add the keystore to the original node settings object
|
||||
final Settings settingsWithKeystore = Settings.builder()
|
||||
.put(environment.settings(), false)
|
||||
|
|
|
@ -52,6 +52,7 @@ import org.elasticsearch.index.query.QueryShardContext;
|
|||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
import org.elasticsearch.indices.InvalidTypeNameException;
|
||||
import org.elasticsearch.indices.mapper.MapperRegistry;
|
||||
import org.elasticsearch.search.suggest.completion.context.ContextMapping;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
|
@ -421,6 +422,8 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
MapperMergeValidator.validateFieldReferences(fieldMappers, fieldAliasMappers,
|
||||
fullPathObjectMappers, fieldTypes);
|
||||
|
||||
ContextMapping.validateContextPaths(indexSettings.getIndexVersionCreated(), fieldMappers, fieldTypes::get);
|
||||
|
||||
if (reason == MergeReason.MAPPING_UPDATE) {
|
||||
// this check will only be performed on the master node when there is
|
||||
// a call to the update mapping API. For all other cases like
|
||||
|
|
|
@ -59,7 +59,6 @@ public final class RestReloadSecureSettingsAction extends BaseRestHandler {
|
|||
.cluster()
|
||||
.prepareReloadSecureSettings()
|
||||
.setTimeout(request.param("timeout"))
|
||||
.source(request.requiredContent(), request.getXContentType())
|
||||
.setNodesIds(nodesIds);
|
||||
final NodesReloadSecureSettingsRequest nodesRequest = nodesRequestBuilder.request();
|
||||
return channel -> nodesRequestBuilder
|
||||
|
@ -68,12 +67,12 @@ public final class RestReloadSecureSettingsAction extends BaseRestHandler {
|
|||
public RestResponse buildResponse(NodesReloadSecureSettingsResponse response, XContentBuilder builder)
|
||||
throws Exception {
|
||||
builder.startObject();
|
||||
RestActions.buildNodesHeader(builder, channel.request(), response);
|
||||
builder.field("cluster_name", response.getClusterName().value());
|
||||
response.toXContent(builder, channel.request());
|
||||
{
|
||||
RestActions.buildNodesHeader(builder, channel.request(), response);
|
||||
builder.field("cluster_name", response.getClusterName().value());
|
||||
response.toXContent(builder, channel.request());
|
||||
}
|
||||
builder.endObject();
|
||||
// clear password for the original request
|
||||
nodesRequest.secureSettingsPassword().close();
|
||||
return new BytesRestResponse(RestStatus.OK, builder);
|
||||
}
|
||||
});
|
||||
|
|
|
@ -22,6 +22,8 @@ package org.elasticsearch.script;
|
|||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.index.fielddata.ScriptDocValues;
|
||||
import org.elasticsearch.search.lookup.LeafSearchLookup;
|
||||
import org.elasticsearch.search.lookup.SearchLookup;
|
||||
|
@ -31,6 +33,25 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
|
||||
public class ScriptedMetricAggContexts {
|
||||
private static final DeprecationLogger DEPRECATION_LOGGER =
|
||||
new DeprecationLogger(Loggers.getLogger(ScriptedMetricAggContexts.class));
|
||||
|
||||
// Public for access from tests
|
||||
public static final String AGG_PARAM_DEPRECATION_WARNING =
|
||||
"params._agg/_aggs for scripted metric aggregations are deprecated, use state/states (not in params) instead. " +
|
||||
"Use -Des.aggregations.enable_scripted_metric_agg_param=false to disable.";
|
||||
|
||||
public static boolean deprecatedAggParamEnabled() {
|
||||
boolean enabled = Boolean.parseBoolean(
|
||||
System.getProperty("es.aggregations.enable_scripted_metric_agg_param", "true"));
|
||||
|
||||
if (enabled) {
|
||||
DEPRECATION_LOGGER.deprecatedAndMaybeLog("enable_scripted_metric_agg_param", AGG_PARAM_DEPRECATION_WARNING);
|
||||
}
|
||||
|
||||
return enabled;
|
||||
}
|
||||
|
||||
private abstract static class ParamsAndStateBase {
|
||||
private final Map<String, Object> params;
|
||||
private final Object state;
|
||||
|
|
|
@ -209,7 +209,10 @@ public class FiltersAggregationBuilder extends AbstractAggregationBuilder<Filter
|
|||
}
|
||||
}
|
||||
if (changed) {
|
||||
return new FiltersAggregationBuilder(getName(), rewrittenFilters, this.keyed);
|
||||
FiltersAggregationBuilder rewritten = new FiltersAggregationBuilder(getName(), rewrittenFilters, this.keyed);
|
||||
rewritten.otherBucket(otherBucket);
|
||||
rewritten.otherBucketKey(otherBucketKey);
|
||||
return rewritten;
|
||||
} else {
|
||||
return this;
|
||||
}
|
||||
|
|
|
@ -156,7 +156,7 @@ public class AutoDateHistogramAggregationBuilder
|
|||
return new AutoDateHistogramAggregatorFactory(name, config, numBuckets, roundings, context, parent, subFactoriesBuilder, metaData);
|
||||
}
|
||||
|
||||
private static Rounding createRounding(DateTimeUnit interval, DateTimeZone timeZone) {
|
||||
static Rounding createRounding(DateTimeUnit interval, DateTimeZone timeZone) {
|
||||
Rounding.Builder tzRoundingBuilder = Rounding.builder(interval);
|
||||
if (timeZone != null) {
|
||||
tzRoundingBuilder.timeZone(timeZone);
|
||||
|
|
|
@ -418,7 +418,7 @@ public final class InternalAutoDateHistogram extends
|
|||
return currentResult;
|
||||
}
|
||||
int roundingIdx = getAppropriateRounding(list.get(0).key, list.get(list.size() - 1).key, currentResult.roundingIdx,
|
||||
bucketInfo.roundingInfos);
|
||||
bucketInfo.roundingInfos, targetBuckets);
|
||||
RoundingInfo roundingInfo = bucketInfo.roundingInfos[roundingIdx];
|
||||
Rounding rounding = roundingInfo.rounding;
|
||||
// merge buckets using the new rounding
|
||||
|
@ -447,8 +447,8 @@ public final class InternalAutoDateHistogram extends
|
|||
return new BucketReduceResult(list, roundingInfo, roundingIdx);
|
||||
}
|
||||
|
||||
private int getAppropriateRounding(long minKey, long maxKey, int roundingIdx,
|
||||
RoundingInfo[] roundings) {
|
||||
static int getAppropriateRounding(long minKey, long maxKey, int roundingIdx,
|
||||
RoundingInfo[] roundings, int targetBuckets) {
|
||||
if (roundingIdx == roundings.length - 1) {
|
||||
return roundingIdx;
|
||||
}
|
||||
|
@ -480,7 +480,7 @@ public final class InternalAutoDateHistogram extends
|
|||
currentKey = currentRounding.nextRoundingValue(currentKey);
|
||||
}
|
||||
currentRoundingIdx++;
|
||||
} while (requiredBuckets > (targetBuckets * roundings[roundingIdx].getMaximumInnerInterval())
|
||||
} while (requiredBuckets > (targetBuckets * roundings[currentRoundingIdx - 1].getMaximumInnerInterval())
|
||||
&& currentRoundingIdx < roundings.length);
|
||||
// The loop will increase past the correct rounding index here so we
|
||||
// need to subtract one to get the rounding index we need
|
||||
|
|
|
@ -96,7 +96,9 @@ public class InternalScriptedMetric extends InternalAggregation implements Scrip
|
|||
}
|
||||
|
||||
// Add _aggs to params map for backwards compatibility (redundant with a context variable on the ReduceScript created below).
|
||||
params.put("_aggs", aggregationObjects);
|
||||
if (ScriptedMetricAggContexts.deprecatedAggParamEnabled()) {
|
||||
params.put("_aggs", aggregationObjects);
|
||||
}
|
||||
|
||||
ScriptedMetricAggContexts.ReduceScript.Factory factory = reduceContext.scriptService().compile(
|
||||
firstAggregation.reduceScript, ScriptedMetricAggContexts.ReduceScript.CONTEXT);
|
||||
|
|
|
@ -83,10 +83,17 @@ public class ScriptedMetricAggregatorFactory extends AggregatorFactory<ScriptedM
|
|||
// Add _agg to params map for backwards compatibility (redundant with context variables on the scripts created below).
|
||||
// When this is removed, aggState (as passed to ScriptedMetricAggregator) can be changed to Map<String, Object>, since
|
||||
// it won't be possible to completely replace it with another type as is possible when it's an entry in params.
|
||||
if (aggParams.containsKey("_agg") == false) {
|
||||
aggParams.put("_agg", new HashMap<String, Object>());
|
||||
Object aggState = new HashMap<String, Object>();
|
||||
if (ScriptedMetricAggContexts.deprecatedAggParamEnabled()) {
|
||||
if (aggParams.containsKey("_agg") == false) {
|
||||
// Add _agg if it wasn't added manually
|
||||
aggParams.put("_agg", aggState);
|
||||
} else {
|
||||
// If it was added manually, also use it for the agg context variable to reduce the likelihood of
|
||||
// weird behavior due to multiple different variables.
|
||||
aggState = aggParams.get("_agg");
|
||||
}
|
||||
}
|
||||
Object aggState = aggParams.get("_agg");
|
||||
|
||||
final ScriptedMetricAggContexts.InitScript initScript = this.initScript.newInstance(
|
||||
mergeParams(aggParams, initScriptParams), aggState);
|
||||
|
|
|
@ -220,7 +220,7 @@ public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilde
|
|||
|
||||
/**
|
||||
* Set encoder for the highlighting
|
||||
* are {@code styled} and {@code default}.
|
||||
* are {@code html} and {@code default}.
|
||||
*
|
||||
* @param encoder name
|
||||
*/
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.search.suggest.completion.context;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentFragment;
|
||||
|
@ -28,6 +29,8 @@ import org.elasticsearch.common.xcontent.XContentParser;
|
|||
import org.elasticsearch.common.xcontent.XContentParser.Token;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.index.mapper.CompletionFieldMapper;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -35,6 +38,7 @@ import java.util.ArrayList;
|
|||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.function.Function;
|
||||
|
||||
/**
|
||||
* A {@link ContextMapping} defines criteria that can be used to
|
||||
|
@ -131,6 +135,31 @@ public abstract class ContextMapping<T extends ToXContent> implements ToXContent
|
|||
*/
|
||||
protected abstract XContentBuilder toInnerXContent(XContentBuilder builder, Params params) throws IOException;
|
||||
|
||||
/**
|
||||
* Checks if the current context is consistent with the rest of the fields. For example, the GeoContext
|
||||
* should check that the field that it points to has the correct type.
|
||||
*/
|
||||
protected void validateReferences(Version indexVersionCreated, Function<String, MappedFieldType> fieldResolver) {
|
||||
// No validation is required by default
|
||||
}
|
||||
|
||||
/**
|
||||
* Verifies that all field paths specified in contexts point to the fields with correct mappings
|
||||
*/
|
||||
public static void validateContextPaths(Version indexVersionCreated, List<FieldMapper> fieldMappers,
|
||||
Function<String, MappedFieldType> fieldResolver) {
|
||||
for (FieldMapper fieldMapper : fieldMappers) {
|
||||
if (CompletionFieldMapper.CONTENT_TYPE.equals(fieldMapper.typeName())) {
|
||||
CompletionFieldMapper.CompletionFieldType fieldType = ((CompletionFieldMapper) fieldMapper).fieldType();
|
||||
if (fieldType.hasContextMappings()) {
|
||||
for (ContextMapping context : fieldType.getContextMappings()) {
|
||||
context.validateReferences(indexVersionCreated, fieldResolver);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field(FIELD_NAME, name);
|
||||
|
|
|
@ -37,6 +37,7 @@ import java.util.ArrayList;
|
|||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
@ -50,7 +51,7 @@ import static org.elasticsearch.search.suggest.completion.context.ContextMapping
|
|||
* and creates context queries for defined {@link ContextMapping}s
|
||||
* for a {@link CompletionFieldMapper}
|
||||
*/
|
||||
public class ContextMappings implements ToXContent {
|
||||
public class ContextMappings implements ToXContent, Iterable<ContextMapping<?>> {
|
||||
|
||||
private final List<ContextMapping<?>> contextMappings;
|
||||
private final Map<String, ContextMapping<?>> contextNameMap;
|
||||
|
@ -97,6 +98,11 @@ public class ContextMappings implements ToXContent {
|
|||
document.add(new TypedContextField(name, input, weight, contexts, document));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterator<ContextMapping<?>> iterator() {
|
||||
return contextMappings.iterator();
|
||||
}
|
||||
|
||||
/**
|
||||
* Field prepends context values with a suggestion
|
||||
* Context values are associated with a type, denoted by
|
||||
|
|
|
@ -19,12 +19,17 @@
|
|||
|
||||
package org.elasticsearch.search.suggest.completion.context;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.lucene.document.LatLonDocValuesField;
|
||||
import org.apache.lucene.document.LatLonPoint;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.index.DocValuesType;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
import org.elasticsearch.common.geo.GeoUtils;
|
||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||
import org.elasticsearch.common.unit.DistanceUnit;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
@ -42,6 +47,7 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.elasticsearch.common.geo.GeoHashUtils.addNeighbors;
|
||||
|
@ -69,6 +75,8 @@ public class GeoContextMapping extends ContextMapping<GeoQueryContext> {
|
|||
static final String CONTEXT_PRECISION = "precision";
|
||||
static final String CONTEXT_NEIGHBOURS = "neighbours";
|
||||
|
||||
private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(GeoContextMapping.class));
|
||||
|
||||
private final int precision;
|
||||
private final String fieldName;
|
||||
|
||||
|
@ -205,11 +213,11 @@ public class GeoContextMapping extends ContextMapping<GeoQueryContext> {
|
|||
for (IndexableField field : fields) {
|
||||
if (field instanceof StringField) {
|
||||
spare.resetFromString(field.stringValue());
|
||||
} else {
|
||||
// todo return this to .stringValue() once LatLonPoint implements it
|
||||
geohashes.add(spare.geohash());
|
||||
} else if (field instanceof LatLonPoint || field instanceof LatLonDocValuesField) {
|
||||
spare.resetFromIndexableField(field);
|
||||
geohashes.add(spare.geohash());
|
||||
}
|
||||
geohashes.add(spare.geohash());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -279,6 +287,32 @@ public class GeoContextMapping extends ContextMapping<GeoQueryContext> {
|
|||
return internalQueryContextList;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void validateReferences(Version indexVersionCreated, Function<String, MappedFieldType> fieldResolver) {
|
||||
if (fieldName != null) {
|
||||
MappedFieldType mappedFieldType = fieldResolver.apply(fieldName);
|
||||
if (mappedFieldType == null) {
|
||||
if (indexVersionCreated.before(Version.V_7_0_0_alpha1)) {
|
||||
DEPRECATION_LOGGER.deprecatedAndMaybeLog("geo_context_mapping",
|
||||
"field [{}] referenced in context [{}] is not defined in the mapping", fieldName, name);
|
||||
} else {
|
||||
throw new ElasticsearchParseException(
|
||||
"field [{}] referenced in context [{}] is not defined in the mapping", fieldName, name);
|
||||
}
|
||||
} else if (GeoPointFieldMapper.CONTENT_TYPE.equals(mappedFieldType.typeName()) == false) {
|
||||
if (indexVersionCreated.before(Version.V_7_0_0_alpha1)) {
|
||||
DEPRECATION_LOGGER.deprecatedAndMaybeLog("geo_context_mapping",
|
||||
"field [{}] referenced in context [{}] must be mapped to geo_point, found [{}]",
|
||||
fieldName, name, mappedFieldType.typeName());
|
||||
} else {
|
||||
throw new ElasticsearchParseException(
|
||||
"field [{}] referenced in context [{}] must be mapped to geo_point, found [{}]",
|
||||
fieldName, name, mappedFieldType.typeName());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
*/
|
||||
package org.elasticsearch.transport;
|
||||
|
||||
import java.util.function.Supplier;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.ClusterNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
|
@ -48,9 +49,20 @@ public abstract class RemoteClusterAware extends AbstractComponent {
|
|||
/**
|
||||
* A list of initial seed nodes to discover eligible nodes from the remote cluster
|
||||
*/
|
||||
public static final Setting.AffixSetting<List<InetSocketAddress>> REMOTE_CLUSTERS_SEEDS = Setting.affixKeySetting("search.remote.",
|
||||
"seeds", (key) -> Setting.listSetting(key, Collections.emptyList(), RemoteClusterAware::parseSeedAddress,
|
||||
Setting.Property.NodeScope, Setting.Property.Dynamic));
|
||||
public static final Setting.AffixSetting<List<String>> REMOTE_CLUSTERS_SEEDS = Setting.affixKeySetting(
|
||||
"search.remote.",
|
||||
"seeds",
|
||||
key -> Setting.listSetting(
|
||||
key, Collections.emptyList(),
|
||||
s -> {
|
||||
// validate seed address
|
||||
parsePort(s);
|
||||
return s;
|
||||
},
|
||||
Setting.Property.NodeScope,
|
||||
Setting.Property.Dynamic
|
||||
)
|
||||
);
|
||||
public static final char REMOTE_CLUSTER_INDEX_SEPARATOR = ':';
|
||||
public static final String LOCAL_CLUSTER_GROUP_KEY = "";
|
||||
|
||||
|
@ -65,18 +77,20 @@ public abstract class RemoteClusterAware extends AbstractComponent {
|
|||
this.clusterNameResolver = new ClusterNameExpressionResolver(settings);
|
||||
}
|
||||
|
||||
protected static Map<String, List<DiscoveryNode>> buildRemoteClustersSeeds(Settings settings) {
|
||||
Stream<Setting<List<InetSocketAddress>>> allConcreteSettings = REMOTE_CLUSTERS_SEEDS.getAllConcreteSettings(settings);
|
||||
protected static Map<String, List<Supplier<DiscoveryNode>>> buildRemoteClustersSeeds(Settings settings) {
|
||||
Stream<Setting<List<String>>> allConcreteSettings = REMOTE_CLUSTERS_SEEDS.getAllConcreteSettings(settings);
|
||||
return allConcreteSettings.collect(
|
||||
Collectors.toMap(REMOTE_CLUSTERS_SEEDS::getNamespace, concreteSetting -> {
|
||||
String clusterName = REMOTE_CLUSTERS_SEEDS.getNamespace(concreteSetting);
|
||||
List<DiscoveryNode> nodes = new ArrayList<>();
|
||||
for (InetSocketAddress address : concreteSetting.get(settings)) {
|
||||
TransportAddress transportAddress = new TransportAddress(address);
|
||||
DiscoveryNode node = new DiscoveryNode(clusterName + "#" + transportAddress.toString(),
|
||||
transportAddress,
|
||||
Version.CURRENT.minimumCompatibilityVersion());
|
||||
nodes.add(node);
|
||||
List<String> addresses = concreteSetting.get(settings);
|
||||
List<Supplier<DiscoveryNode>> nodes = new ArrayList<>(addresses.size());
|
||||
for (String address : addresses) {
|
||||
nodes.add(() -> {
|
||||
TransportAddress transportAddress = new TransportAddress(RemoteClusterAware.parseSeedAddress(address));
|
||||
return new DiscoveryNode(clusterName + "#" + transportAddress.toString(),
|
||||
transportAddress,
|
||||
Version.CURRENT.minimumCompatibilityVersion());
|
||||
});
|
||||
}
|
||||
return nodes;
|
||||
}));
|
||||
|
@ -128,7 +142,7 @@ public abstract class RemoteClusterAware extends AbstractComponent {
|
|||
* Subclasses must implement this to receive information about updated cluster aliases. If the given address list is
|
||||
* empty the cluster alias is unregistered and should be removed.
|
||||
*/
|
||||
protected abstract void updateRemoteCluster(String clusterAlias, List<InetSocketAddress> addresses);
|
||||
protected abstract void updateRemoteCluster(String clusterAlias, List<String> addresses);
|
||||
|
||||
/**
|
||||
* Registers this instance to listen to updates on the cluster settings.
|
||||
|
@ -138,29 +152,37 @@ public abstract class RemoteClusterAware extends AbstractComponent {
|
|||
(namespace, value) -> {});
|
||||
}
|
||||
|
||||
private static InetSocketAddress parseSeedAddress(String remoteHost) {
|
||||
int portSeparator = remoteHost.lastIndexOf(':'); // in case we have a IPv6 address ie. [::1]:9300
|
||||
if (portSeparator == -1 || portSeparator == remoteHost.length()) {
|
||||
throw new IllegalArgumentException("remote hosts need to be configured as [host:port], found [" + remoteHost + "] instead");
|
||||
}
|
||||
String host = remoteHost.substring(0, portSeparator);
|
||||
protected static InetSocketAddress parseSeedAddress(String remoteHost) {
|
||||
String host = remoteHost.substring(0, indexOfPortSeparator(remoteHost));
|
||||
InetAddress hostAddress;
|
||||
try {
|
||||
hostAddress = InetAddress.getByName(host);
|
||||
} catch (UnknownHostException e) {
|
||||
throw new IllegalArgumentException("unknown host [" + host + "]", e);
|
||||
}
|
||||
return new InetSocketAddress(hostAddress, parsePort(remoteHost));
|
||||
}
|
||||
|
||||
private static int parsePort(String remoteHost) {
|
||||
try {
|
||||
int port = Integer.valueOf(remoteHost.substring(portSeparator + 1));
|
||||
int port = Integer.valueOf(remoteHost.substring(indexOfPortSeparator(remoteHost) + 1));
|
||||
if (port <= 0) {
|
||||
throw new IllegalArgumentException("port number must be > 0 but was: [" + port + "]");
|
||||
}
|
||||
return new InetSocketAddress(hostAddress, port);
|
||||
return port;
|
||||
} catch (NumberFormatException e) {
|
||||
throw new IllegalArgumentException("port must be a number", e);
|
||||
throw new IllegalArgumentException("failed to parse port", e);
|
||||
}
|
||||
}
|
||||
|
||||
private static int indexOfPortSeparator(String remoteHost) {
|
||||
int portSeparator = remoteHost.lastIndexOf(':'); // in case we have a IPv6 address ie. [::1]:9300
|
||||
if (portSeparator == -1 || portSeparator == remoteHost.length()) {
|
||||
throw new IllegalArgumentException("remote hosts need to be configured as [host:port], found [" + remoteHost + "] instead");
|
||||
}
|
||||
return portSeparator;
|
||||
}
|
||||
|
||||
public static String buildRemoteIndexName(String clusterAlias, String indexName) {
|
||||
return clusterAlias != null ? clusterAlias + REMOTE_CLUSTER_INDEX_SEPARATOR + indexName : indexName;
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
*/
|
||||
package org.elasticsearch.transport;
|
||||
|
||||
import java.util.function.Supplier;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.lucene.util.SetOnce;
|
||||
|
@ -84,7 +85,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo
|
|||
private final String clusterAlias;
|
||||
private final int maxNumRemoteConnections;
|
||||
private final Predicate<DiscoveryNode> nodePredicate;
|
||||
private volatile List<DiscoveryNode> seedNodes;
|
||||
private volatile List<Supplier<DiscoveryNode>> seedNodes;
|
||||
private volatile boolean skipUnavailable;
|
||||
private final ConnectHandler connectHandler;
|
||||
private SetOnce<ClusterName> remoteClusterName = new SetOnce<>();
|
||||
|
@ -99,7 +100,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo
|
|||
* @param maxNumRemoteConnections the maximum number of connections to the remote cluster
|
||||
* @param nodePredicate a predicate to filter eligible remote nodes to connect to
|
||||
*/
|
||||
RemoteClusterConnection(Settings settings, String clusterAlias, List<DiscoveryNode> seedNodes,
|
||||
RemoteClusterConnection(Settings settings, String clusterAlias, List<Supplier<DiscoveryNode>> seedNodes,
|
||||
TransportService transportService, int maxNumRemoteConnections, Predicate<DiscoveryNode> nodePredicate) {
|
||||
super(settings);
|
||||
this.localClusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings);
|
||||
|
@ -127,7 +128,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo
|
|||
/**
|
||||
* Updates the list of seed nodes for this cluster connection
|
||||
*/
|
||||
synchronized void updateSeedNodes(List<DiscoveryNode> seedNodes, ActionListener<Void> connectListener) {
|
||||
synchronized void updateSeedNodes(List<Supplier<DiscoveryNode>> seedNodes, ActionListener<Void> connectListener) {
|
||||
this.seedNodes = Collections.unmodifiableList(new ArrayList<>(seedNodes));
|
||||
connectHandler.connect(connectListener);
|
||||
}
|
||||
|
@ -456,7 +457,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo
|
|||
});
|
||||
}
|
||||
|
||||
void collectRemoteNodes(Iterator<DiscoveryNode> seedNodes,
|
||||
private void collectRemoteNodes(Iterator<Supplier<DiscoveryNode>> seedNodes,
|
||||
final TransportService transportService, ActionListener<Void> listener) {
|
||||
if (Thread.currentThread().isInterrupted()) {
|
||||
listener.onFailure(new InterruptedException("remote connect thread got interrupted"));
|
||||
|
@ -464,7 +465,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo
|
|||
try {
|
||||
if (seedNodes.hasNext()) {
|
||||
cancellableThreads.executeIO(() -> {
|
||||
final DiscoveryNode seedNode = seedNodes.next();
|
||||
final DiscoveryNode seedNode = seedNodes.next().get();
|
||||
final TransportService.HandshakeResponse handshakeResponse;
|
||||
Transport.Connection connection = transportService.openConnection(seedNode,
|
||||
ConnectionProfile.buildSingleChannelProfile(TransportRequestOptions.Type.REG, null, null));
|
||||
|
@ -554,11 +555,11 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo
|
|||
private final TransportService transportService;
|
||||
private final Transport.Connection connection;
|
||||
private final ActionListener<Void> listener;
|
||||
private final Iterator<DiscoveryNode> seedNodes;
|
||||
private final Iterator<Supplier<DiscoveryNode>> seedNodes;
|
||||
private final CancellableThreads cancellableThreads;
|
||||
|
||||
SniffClusterStateResponseHandler(TransportService transportService, Transport.Connection connection,
|
||||
ActionListener<Void> listener, Iterator<DiscoveryNode> seedNodes,
|
||||
ActionListener<Void> listener, Iterator<Supplier<DiscoveryNode>> seedNodes,
|
||||
CancellableThreads cancellableThreads) {
|
||||
this.transportService = transportService;
|
||||
this.connection = connection;
|
||||
|
@ -651,7 +652,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo
|
|||
* Get the information about remote nodes to be rendered on {@code _remote/info} requests.
|
||||
*/
|
||||
public RemoteConnectionInfo getConnectionInfo() {
|
||||
List<TransportAddress> seedNodeAddresses = seedNodes.stream().map(DiscoveryNode::getAddress).collect(Collectors.toList());
|
||||
List<TransportAddress> seedNodeAddresses = seedNodes.stream().map(node -> node.get().getAddress()).collect(Collectors.toList());
|
||||
TimeValue initialConnectionTimeout = RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings);
|
||||
return new RemoteConnectionInfo(clusterAlias, seedNodeAddresses, maxNumRemoteConnections, connectedNodes.size(),
|
||||
initialConnectionTimeout, skipUnavailable);
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
*/
|
||||
package org.elasticsearch.transport;
|
||||
|
||||
import java.util.function.Supplier;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.OriginalIndices;
|
||||
|
@ -40,7 +41,6 @@ import org.elasticsearch.threadpool.ThreadPool;
|
|||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
|
@ -115,7 +115,8 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl
|
|||
* @param seeds a cluster alias to discovery node mapping representing the remote clusters seeds nodes
|
||||
* @param connectionListener a listener invoked once every configured cluster has been connected to
|
||||
*/
|
||||
private synchronized void updateRemoteClusters(Map<String, List<DiscoveryNode>> seeds, ActionListener<Void> connectionListener) {
|
||||
private synchronized void updateRemoteClusters(Map<String, List<Supplier<DiscoveryNode>>> seeds,
|
||||
ActionListener<Void> connectionListener) {
|
||||
if (seeds.containsKey(LOCAL_CLUSTER_GROUP_KEY)) {
|
||||
throw new IllegalArgumentException("remote clusters must not have the empty string as its key");
|
||||
}
|
||||
|
@ -125,7 +126,7 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl
|
|||
} else {
|
||||
CountDown countDown = new CountDown(seeds.size());
|
||||
remoteClusters.putAll(this.remoteClusters);
|
||||
for (Map.Entry<String, List<DiscoveryNode>> entry : seeds.entrySet()) {
|
||||
for (Map.Entry<String, List<Supplier<DiscoveryNode>>> entry : seeds.entrySet()) {
|
||||
RemoteClusterConnection remote = this.remoteClusters.get(entry.getKey());
|
||||
if (entry.getValue().isEmpty()) { // with no seed nodes we just remove the connection
|
||||
try {
|
||||
|
@ -310,16 +311,17 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl
|
|||
}
|
||||
}
|
||||
|
||||
protected void updateRemoteCluster(String clusterAlias, List<InetSocketAddress> addresses) {
|
||||
@Override
|
||||
protected void updateRemoteCluster(String clusterAlias, List<String> addresses) {
|
||||
updateRemoteCluster(clusterAlias, addresses, ActionListener.wrap((x) -> {}, (x) -> {}));
|
||||
}
|
||||
|
||||
void updateRemoteCluster(
|
||||
final String clusterAlias,
|
||||
final List<InetSocketAddress> addresses,
|
||||
final List<String> addresses,
|
||||
final ActionListener<Void> connectionListener) {
|
||||
final List<DiscoveryNode> nodes = addresses.stream().map(address -> {
|
||||
final TransportAddress transportAddress = new TransportAddress(address);
|
||||
final List<Supplier<DiscoveryNode>> nodes = addresses.stream().<Supplier<DiscoveryNode>>map(address -> () -> {
|
||||
final TransportAddress transportAddress = new TransportAddress(RemoteClusterAware.parseSeedAddress(address));
|
||||
final String id = clusterAlias + "#" + transportAddress.toString();
|
||||
final Version version = Version.CURRENT.minimumCompatibilityVersion();
|
||||
return new DiscoveryNode(id, transportAddress, version);
|
||||
|
@ -334,7 +336,7 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl
|
|||
void initializeRemoteClusters() {
|
||||
final TimeValue timeValue = REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings);
|
||||
final PlainActionFuture<Void> future = new PlainActionFuture<>();
|
||||
Map<String, List<DiscoveryNode>> seeds = RemoteClusterAware.buildRemoteClustersSeeds(settings);
|
||||
Map<String, List<Supplier<DiscoveryNode>>> seeds = RemoteClusterAware.buildRemoteClustersSeeds(settings);
|
||||
updateRemoteClusters(seeds, future);
|
||||
try {
|
||||
future.get(timeValue.millis(), TimeUnit.MILLISECONDS);
|
||||
|
|
|
@ -20,11 +20,9 @@
|
|||
package org.elasticsearch.action.admin;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsResponse;
|
||||
import org.elasticsearch.common.settings.KeyStoreWrapper;
|
||||
import org.elasticsearch.common.settings.SecureSettings;
|
||||
import org.elasticsearch.common.settings.SecureString;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
|
@ -44,11 +42,11 @@ import java.util.Map;
|
|||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
||||
public class ReloadSecureSettingsIT extends ESIntegTestCase {
|
||||
|
||||
|
@ -62,7 +60,7 @@ public class ReloadSecureSettingsIT extends ESIntegTestCase {
|
|||
Files.deleteIfExists(KeyStoreWrapper.keystorePath(environment.configFile()));
|
||||
final int initialReloadCount = mockReloadablePlugin.getReloadCount();
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
client().admin().cluster().prepareReloadSecureSettings().setSecureStorePassword(new SecureString(new char[0])).execute(
|
||||
client().admin().cluster().prepareReloadSecureSettings().execute(
|
||||
new ActionListener<NodesReloadSecureSettingsResponse>() {
|
||||
@Override
|
||||
public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) {
|
||||
|
@ -96,44 +94,6 @@ public class ReloadSecureSettingsIT extends ESIntegTestCase {
|
|||
assertThat(mockReloadablePlugin.getReloadCount(), equalTo(initialReloadCount));
|
||||
}
|
||||
|
||||
public void testNullKeystorePassword() throws Exception {
|
||||
final PluginsService pluginsService = internalCluster().getInstance(PluginsService.class);
|
||||
final MockReloadablePlugin mockReloadablePlugin = pluginsService.filterPlugins(MockReloadablePlugin.class)
|
||||
.stream().findFirst().get();
|
||||
final AtomicReference<AssertionError> reloadSettingsError = new AtomicReference<>();
|
||||
final int initialReloadCount = mockReloadablePlugin.getReloadCount();
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
client().admin().cluster().prepareReloadSecureSettings().execute(
|
||||
new ActionListener<NodesReloadSecureSettingsResponse>() {
|
||||
@Override
|
||||
public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) {
|
||||
try {
|
||||
reloadSettingsError.set(new AssertionError("Null keystore password should fail"));
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
try {
|
||||
assertThat(e, instanceOf(ActionRequestValidationException.class));
|
||||
assertThat(e.getMessage(), containsString("secure settings password cannot be null"));
|
||||
} catch (final AssertionError ae) {
|
||||
reloadSettingsError.set(ae);
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
}
|
||||
});
|
||||
latch.await();
|
||||
if (reloadSettingsError.get() != null) {
|
||||
throw reloadSettingsError.get();
|
||||
}
|
||||
// in the null password case no reload should be triggered
|
||||
assertThat(mockReloadablePlugin.getReloadCount(), equalTo(initialReloadCount));
|
||||
}
|
||||
|
||||
public void testInvalidKeystoreFile() throws Exception {
|
||||
final PluginsService pluginsService = internalCluster().getInstance(PluginsService.class);
|
||||
final MockReloadablePlugin mockReloadablePlugin = pluginsService.filterPlugins(MockReloadablePlugin.class)
|
||||
|
@ -149,7 +109,7 @@ public class ReloadSecureSettingsIT extends ESIntegTestCase {
|
|||
Files.copy(keystore, KeyStoreWrapper.keystorePath(environment.configFile()), StandardCopyOption.REPLACE_EXISTING);
|
||||
}
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
client().admin().cluster().prepareReloadSecureSettings().setSecureStorePassword(new SecureString(new char[0])).execute(
|
||||
client().admin().cluster().prepareReloadSecureSettings().execute(
|
||||
new ActionListener<NodesReloadSecureSettingsResponse>() {
|
||||
@Override
|
||||
public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) {
|
||||
|
@ -181,52 +141,6 @@ public class ReloadSecureSettingsIT extends ESIntegTestCase {
|
|||
assertThat(mockReloadablePlugin.getReloadCount(), equalTo(initialReloadCount));
|
||||
}
|
||||
|
||||
public void testWrongKeystorePassword() throws Exception {
|
||||
final PluginsService pluginsService = internalCluster().getInstance(PluginsService.class);
|
||||
final MockReloadablePlugin mockReloadablePlugin = pluginsService.filterPlugins(MockReloadablePlugin.class)
|
||||
.stream().findFirst().get();
|
||||
final Environment environment = internalCluster().getInstance(Environment.class);
|
||||
final AtomicReference<AssertionError> reloadSettingsError = new AtomicReference<>();
|
||||
final int initialReloadCount = mockReloadablePlugin.getReloadCount();
|
||||
// "some" keystore should be present in this case
|
||||
writeEmptyKeystore(environment, new char[0]);
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
client().admin()
|
||||
.cluster()
|
||||
.prepareReloadSecureSettings()
|
||||
.setSecureStorePassword(new SecureString(new char[] { 'W', 'r', 'o', 'n', 'g' }))
|
||||
.execute(new ActionListener<NodesReloadSecureSettingsResponse>() {
|
||||
@Override
|
||||
public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) {
|
||||
try {
|
||||
assertThat(nodesReloadResponse, notNullValue());
|
||||
final Map<String, NodesReloadSecureSettingsResponse.NodeResponse> nodesMap = nodesReloadResponse.getNodesMap();
|
||||
assertThat(nodesMap.size(), equalTo(cluster().size()));
|
||||
for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) {
|
||||
assertThat(nodeResponse.reloadException(), notNullValue());
|
||||
assertThat(nodeResponse.reloadException(), instanceOf(SecurityException.class));
|
||||
}
|
||||
} catch (final AssertionError e) {
|
||||
reloadSettingsError.set(e);
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
reloadSettingsError.set(new AssertionError("Nodes request failed", e));
|
||||
latch.countDown();
|
||||
}
|
||||
});
|
||||
latch.await();
|
||||
if (reloadSettingsError.get() != null) {
|
||||
throw reloadSettingsError.get();
|
||||
}
|
||||
// in the wrong password case no reload should be triggered
|
||||
assertThat(mockReloadablePlugin.getReloadCount(), equalTo(initialReloadCount));
|
||||
}
|
||||
|
||||
public void testMisbehavingPlugin() throws Exception {
|
||||
final Environment environment = internalCluster().getInstance(Environment.class);
|
||||
final PluginsService pluginsService = internalCluster().getInstance(PluginsService.class);
|
||||
|
@ -247,7 +161,7 @@ public class ReloadSecureSettingsIT extends ESIntegTestCase {
|
|||
.get(Settings.builder().put(environment.settings()).setSecureSettings(secureSettings).build())
|
||||
.toString();
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
client().admin().cluster().prepareReloadSecureSettings().setSecureStorePassword(new SecureString(new char[0])).execute(
|
||||
client().admin().cluster().prepareReloadSecureSettings().execute(
|
||||
new ActionListener<NodesReloadSecureSettingsResponse>() {
|
||||
@Override
|
||||
public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) {
|
||||
|
@ -314,7 +228,7 @@ public class ReloadSecureSettingsIT extends ESIntegTestCase {
|
|||
private void successfulReloadCall() throws InterruptedException {
|
||||
final AtomicReference<AssertionError> reloadSettingsError = new AtomicReference<>();
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
client().admin().cluster().prepareReloadSecureSettings().setSecureStorePassword(new SecureString(new char[0])).execute(
|
||||
client().admin().cluster().prepareReloadSecureSettings().execute(
|
||||
new ActionListener<NodesReloadSecureSettingsResponse>() {
|
||||
@Override
|
||||
public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) {
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.cluster.ack;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase.AwaitsFix;
|
||||
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse;
|
||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
|
||||
|
@ -50,6 +51,7 @@ import static org.hamcrest.Matchers.equalTo;
|
|||
import static org.hamcrest.Matchers.notNullValue;
|
||||
|
||||
@ClusterScope(minNumDataNodes = 2)
|
||||
@AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/32767")
|
||||
public class AckIT extends ESIntegTestCase {
|
||||
|
||||
@Override
|
||||
|
|
|
@ -69,7 +69,7 @@ public class DateTimeUnitTests extends ESTestCase {
|
|||
public void testConversion() {
|
||||
long millis = randomLongBetween(0, Instant.now().toEpochMilli());
|
||||
DateTimeZone zone = randomDateTimeZone();
|
||||
ZoneId zoneId = ZoneId.of(zone.getID());
|
||||
ZoneId zoneId = zone.toTimeZone().toZoneId();
|
||||
|
||||
int offsetSeconds = zoneId.getRules().getOffset(Instant.ofEpochMilli(millis)).getTotalSeconds();
|
||||
long parsedMillisJavaTime = ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), zoneId)
|
||||
|
|
|
@ -2400,8 +2400,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
closeShards(sourceShard, targetShard);
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32766")
|
||||
public void testDocStats() throws IOException {
|
||||
public void testDocStats() throws IOException, InterruptedException {
|
||||
IndexShard indexShard = null;
|
||||
try {
|
||||
indexShard = newStartedShard(
|
||||
|
@ -2460,15 +2459,6 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
assertTrue(searcher.reader().numDocs() <= docStats.getCount());
|
||||
}
|
||||
assertThat(docStats.getCount(), equalTo(numDocs));
|
||||
// Lucene will delete a segment if all docs are deleted from it;
|
||||
// this means that we lose the deletes when deleting all docs.
|
||||
// If soft-delete is enabled, each delete op will add a deletion marker.
|
||||
final long deleteTombstones = indexShard.indexSettings.isSoftDeleteEnabled() ? numDocsToDelete : 0L;
|
||||
if (numDocsToDelete == numDocs) {
|
||||
assertThat(docStats.getDeleted(), equalTo(deleteTombstones));
|
||||
} else {
|
||||
assertThat(docStats.getDeleted(), equalTo(numDocsToDelete + deleteTombstones));
|
||||
}
|
||||
}
|
||||
|
||||
// merge them away
|
||||
|
|
|
@ -44,7 +44,6 @@ import org.elasticsearch.index.seqno.SequenceNumbers;
|
|||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.translog.SnapshotMatchers;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
|
@ -75,7 +74,7 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
@TestLogging("_root:TRACE")
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32089")
|
||||
public void testRetentionPolicyChangeDuringRecovery() throws Exception {
|
||||
try (ReplicationGroup shards = createGroup(0)) {
|
||||
shards.startPrimary();
|
||||
|
|
|
@ -178,4 +178,18 @@ public class FiltersTests extends BaseAggregationTestCase<FiltersAggregationBuil
|
|||
assertSame(rewritten,
|
||||
rewritten.rewrite(new QueryRewriteContext(xContentRegistry(), null, null, () -> 0L)));
|
||||
}
|
||||
|
||||
public void testRewritePreservesOtherBucket() throws IOException {
|
||||
FiltersAggregationBuilder originalFilters = new FiltersAggregationBuilder("my-agg", new BoolQueryBuilder());
|
||||
originalFilters.otherBucket(randomBoolean());
|
||||
originalFilters.otherBucketKey(randomAlphaOfLength(10));
|
||||
|
||||
AggregationBuilder rewritten = originalFilters.rewrite(new QueryRewriteContext(xContentRegistry(),
|
||||
null, null, () -> 0L));
|
||||
assertThat(rewritten, instanceOf(FiltersAggregationBuilder.class));
|
||||
|
||||
FiltersAggregationBuilder rewrittenFilters = (FiltersAggregationBuilder) rewritten;
|
||||
assertEquals(originalFilters.otherBucket(), rewrittenFilters.otherBucket());
|
||||
assertEquals(originalFilters.otherBucketKey(), rewrittenFilters.otherBucketKey());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.search.aggregations.bucket.histogram;
|
||||
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.rounding.DateTimeUnit;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregations;
|
||||
import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation;
|
||||
|
@ -28,7 +29,11 @@ import org.elasticsearch.search.aggregations.bucket.histogram.InternalAutoDateHi
|
|||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
import org.elasticsearch.test.InternalMultiBucketAggregationTestCase;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
||||
import java.time.Instant;
|
||||
import java.time.OffsetDateTime;
|
||||
import java.time.ZoneOffset;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
|
@ -39,6 +44,8 @@ import java.util.TreeMap;
|
|||
import static org.elasticsearch.common.unit.TimeValue.timeValueHours;
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes;
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
|
||||
import static org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder.createRounding;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public class InternalAutoDateHistogramTests extends InternalMultiBucketAggregationTestCase<InternalAutoDateHistogram> {
|
||||
|
||||
|
@ -61,6 +68,7 @@ public class InternalAutoDateHistogramTests extends InternalMultiBucketAggregati
|
|||
int nbBuckets = randomNumberOfBuckets();
|
||||
int targetBuckets = randomIntBetween(1, nbBuckets * 2 + 1);
|
||||
List<InternalAutoDateHistogram.Bucket> buckets = new ArrayList<>(nbBuckets);
|
||||
|
||||
long startingDate = System.currentTimeMillis();
|
||||
|
||||
long interval = randomIntBetween(1, 3);
|
||||
|
@ -72,23 +80,41 @@ public class InternalAutoDateHistogramTests extends InternalMultiBucketAggregati
|
|||
}
|
||||
InternalAggregations subAggregations = new InternalAggregations(Collections.emptyList());
|
||||
BucketInfo bucketInfo = new BucketInfo(roundingInfos, randomIntBetween(0, roundingInfos.length - 1), subAggregations);
|
||||
|
||||
|
||||
return new InternalAutoDateHistogram(name, buckets, targetBuckets, bucketInfo, format, pipelineAggregators, metaData);
|
||||
}
|
||||
|
||||
/*
|
||||
This test was added to reproduce a bug where getAppropriateRounding was only ever using the first innerIntervals
|
||||
passed in, instead of using the interval associated with the loop.
|
||||
*/
|
||||
public void testGetAppropriateRoundingUsesCorrectIntervals() {
|
||||
RoundingInfo[] roundings = new RoundingInfo[6];
|
||||
DateTimeZone timeZone = DateTimeZone.UTC;
|
||||
// Since we pass 0 as the starting index to getAppropriateRounding, we'll also use
|
||||
// an innerInterval that is quite large, such that targetBuckets * roundings[i].getMaximumInnerInterval()
|
||||
// will be larger than the estimate.
|
||||
roundings[0] = new RoundingInfo(createRounding(DateTimeUnit.SECOND_OF_MINUTE, timeZone),
|
||||
1000L, 1000);
|
||||
roundings[1] = new RoundingInfo(createRounding(DateTimeUnit.MINUTES_OF_HOUR, timeZone),
|
||||
60 * 1000L, 1, 5, 10, 30);
|
||||
roundings[2] = new RoundingInfo(createRounding(DateTimeUnit.HOUR_OF_DAY, timeZone),
|
||||
60 * 60 * 1000L, 1, 3, 12);
|
||||
|
||||
OffsetDateTime timestamp = Instant.parse("2018-01-01T00:00:01.000Z").atOffset(ZoneOffset.UTC);
|
||||
// We want to pass a roundingIdx of zero, because in order to reproduce this bug, we need the function
|
||||
// to increment the rounding (because the bug was that the function would not use the innerIntervals
|
||||
// from the new rounding.
|
||||
int result = InternalAutoDateHistogram.getAppropriateRounding(timestamp.toEpochSecond()*1000,
|
||||
timestamp.plusDays(1).toEpochSecond()*1000, 0, roundings, 25);
|
||||
assertThat(result, equalTo(2));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void assertReduced(InternalAutoDateHistogram reduced, List<InternalAutoDateHistogram> inputs) {
|
||||
int roundingIdx = 0;
|
||||
for (InternalAutoDateHistogram histogram : inputs) {
|
||||
if (histogram.getBucketInfo().roundingIdx > roundingIdx) {
|
||||
roundingIdx = histogram.getBucketInfo().roundingIdx;
|
||||
}
|
||||
}
|
||||
RoundingInfo roundingInfo = roundingInfos[roundingIdx];
|
||||
|
||||
long lowest = Long.MAX_VALUE;
|
||||
long highest = 0;
|
||||
|
||||
for (InternalAutoDateHistogram histogram : inputs) {
|
||||
for (Histogram.Bucket bucket : histogram.getBuckets()) {
|
||||
long bucketKey = ((DateTime) bucket.getKey()).getMillis();
|
||||
|
@ -100,35 +126,72 @@ public class InternalAutoDateHistogramTests extends InternalMultiBucketAggregati
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
int roundingIndex = reduced.getBucketInfo().roundingIdx;
|
||||
RoundingInfo roundingInfo = roundingInfos[roundingIndex];
|
||||
|
||||
long normalizedDuration = (highest - lowest) / roundingInfo.getRoughEstimateDurationMillis();
|
||||
long innerIntervalToUse = 0;
|
||||
for (int interval : roundingInfo.innerIntervals) {
|
||||
if (normalizedDuration / interval < maxNumberOfBuckets()) {
|
||||
innerIntervalToUse = interval;
|
||||
long innerIntervalToUse = roundingInfo.innerIntervals[0];
|
||||
int innerIntervalIndex = 0;
|
||||
|
||||
// First, try to calculate the correct innerInterval using the normalizedDuration.
|
||||
// This handles cases where highest and lowest are further apart than the interval being used.
|
||||
if (normalizedDuration != 0) {
|
||||
for (int j = roundingInfo.innerIntervals.length-1; j >= 0; j--) {
|
||||
int interval = roundingInfo.innerIntervals[j];
|
||||
if (normalizedDuration / interval < reduced.getBuckets().size()) {
|
||||
innerIntervalToUse = interval;
|
||||
innerIntervalIndex = j;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
long intervalInMillis = innerIntervalToUse * roundingInfo.getRoughEstimateDurationMillis();
|
||||
int bucketCount = getBucketCount(lowest, highest, roundingInfo, intervalInMillis);
|
||||
|
||||
//Next, if our bucketCount is still above what we need, we'll go back and determine the interval
|
||||
// based on a size calculation.
|
||||
if (bucketCount > reduced.getBuckets().size()) {
|
||||
for (int i = innerIntervalIndex; i < roundingInfo.innerIntervals.length; i++) {
|
||||
long newIntervalMillis = roundingInfo.innerIntervals[i] * roundingInfo.getRoughEstimateDurationMillis();
|
||||
if (getBucketCount(lowest, highest, roundingInfo, newIntervalMillis) <= reduced.getBuckets().size()) {
|
||||
innerIntervalToUse = roundingInfo.innerIntervals[i];
|
||||
intervalInMillis = innerIntervalToUse * roundingInfo.getRoughEstimateDurationMillis();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Map<Long, Long> expectedCounts = new TreeMap<>();
|
||||
long intervalInMillis = innerIntervalToUse*roundingInfo.getRoughEstimateDurationMillis();
|
||||
for (long keyForBucket = roundingInfo.rounding.round(lowest);
|
||||
keyForBucket <= highest;
|
||||
keyForBucket <= roundingInfo.rounding.round(highest);
|
||||
keyForBucket = keyForBucket + intervalInMillis) {
|
||||
expectedCounts.put(keyForBucket, 0L);
|
||||
|
||||
// Iterate through the input buckets, and for each bucket, determine if it's inside
|
||||
// the range of the bucket in the outer loop. if it is, add the doc count to the total
|
||||
// for that bucket.
|
||||
|
||||
for (InternalAutoDateHistogram histogram : inputs) {
|
||||
for (Histogram.Bucket bucket : histogram.getBuckets()) {
|
||||
long bucketKey = ((DateTime) bucket.getKey()).getMillis();
|
||||
long roundedBucketKey = roundingInfo.rounding.round(bucketKey);
|
||||
long roundedBucketKey = roundingInfo.rounding.round(((DateTime) bucket.getKey()).getMillis());
|
||||
long docCount = bucket.getDocCount();
|
||||
if (roundedBucketKey >= keyForBucket
|
||||
&& roundedBucketKey < keyForBucket + intervalInMillis) {
|
||||
long count = bucket.getDocCount();
|
||||
expectedCounts.compute(keyForBucket,
|
||||
(key, oldValue) -> (oldValue == null ? 0 : oldValue) + count);
|
||||
(key, oldValue) -> (oldValue == null ? 0 : oldValue) + docCount);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If there is only a single bucket, and we haven't added it above, add a bucket with no documents.
|
||||
// this step is necessary because of the roundedBucketKey < keyForBucket + intervalInMillis above.
|
||||
if (roundingInfo.rounding.round(lowest) == roundingInfo.rounding.round(highest) && expectedCounts.isEmpty()) {
|
||||
expectedCounts.put(roundingInfo.rounding.round(lowest), 0L);
|
||||
}
|
||||
|
||||
|
||||
// pick out the actual reduced values to the make the assertion more readable
|
||||
Map<Long, Long> actualCounts = new TreeMap<>();
|
||||
for (Histogram.Bucket bucket : reduced.getBuckets()) {
|
||||
actualCounts.compute(((DateTime) bucket.getKey()).getMillis(),
|
||||
|
@ -137,12 +200,16 @@ public class InternalAutoDateHistogramTests extends InternalMultiBucketAggregati
|
|||
assertEquals(expectedCounts, actualCounts);
|
||||
}
|
||||
|
||||
@Override
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32215")
|
||||
public void testReduceRandom() {
|
||||
super.testReduceRandom();
|
||||
private int getBucketCount(long lowest, long highest, RoundingInfo roundingInfo, long intervalInMillis) {
|
||||
int bucketCount = 0;
|
||||
for (long keyForBucket = roundingInfo.rounding.round(lowest);
|
||||
keyForBucket <= roundingInfo.rounding.round(highest);
|
||||
keyForBucket = keyForBucket + intervalInMillis) {
|
||||
bucketCount++;
|
||||
}
|
||||
return bucketCount;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected Writeable.Reader<InternalAutoDateHistogram> instanceReader() {
|
||||
return InternalAutoDateHistogram::new;
|
||||
|
|
|
@ -67,6 +67,7 @@ import static org.hamcrest.Matchers.containsString;
|
|||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.hasKey;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.lessThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
|
@ -90,42 +91,57 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
protected Map<String, Function<Map<String, Object>, Object>> pluginScripts() {
|
||||
Map<String, Function<Map<String, Object>, Object>> scripts = new HashMap<>();
|
||||
|
||||
scripts.put("_agg['count'] = 1", vars ->
|
||||
aggScript(vars, agg -> ((Map<String, Object>) agg).put("count", 1)));
|
||||
scripts.put("state['count'] = 1", vars ->
|
||||
aggScript(vars, state -> state.put("count", 1)));
|
||||
|
||||
scripts.put("_agg.add(1)", vars ->
|
||||
aggScript(vars, agg -> ((List) agg).add(1)));
|
||||
scripts.put("state.list.add(1)", vars ->
|
||||
aggScript(vars, state -> {
|
||||
// Lazily populate state.list for tests without an init script
|
||||
if (state.containsKey("list") == false) {
|
||||
state.put("list", new ArrayList());
|
||||
}
|
||||
|
||||
scripts.put("_agg[param1] = param2", vars ->
|
||||
aggScript(vars, agg -> ((Map) agg).put(XContentMapValues.extractValue("params.param1", vars),
|
||||
((List) state.get("list")).add(1);
|
||||
}));
|
||||
|
||||
scripts.put("state[param1] = param2", vars ->
|
||||
aggScript(vars, state -> state.put((String) XContentMapValues.extractValue("params.param1", vars),
|
||||
XContentMapValues.extractValue("params.param2", vars))));
|
||||
|
||||
scripts.put("vars.multiplier = 3", vars ->
|
||||
((Map<String, Object>) vars.get("vars")).put("multiplier", 3));
|
||||
|
||||
scripts.put("_agg.add(vars.multiplier)", vars ->
|
||||
aggScript(vars, agg -> ((List) agg).add(XContentMapValues.extractValue("vars.multiplier", vars))));
|
||||
scripts.put("state.list.add(vars.multiplier)", vars ->
|
||||
aggScript(vars, state -> {
|
||||
// Lazily populate state.list for tests without an init script
|
||||
if (state.containsKey("list") == false) {
|
||||
state.put("list", new ArrayList());
|
||||
}
|
||||
|
||||
((List) state.get("list")).add(XContentMapValues.extractValue("vars.multiplier", vars));
|
||||
}));
|
||||
|
||||
// Equivalent to:
|
||||
//
|
||||
// newaggregation = [];
|
||||
// sum = 0;
|
||||
//
|
||||
// for (a in _agg) {
|
||||
// sum += a
|
||||
// for (s in state.list) {
|
||||
// sum += s
|
||||
// };
|
||||
//
|
||||
// newaggregation.add(sum);
|
||||
// return newaggregation"
|
||||
//
|
||||
scripts.put("sum agg values as a new aggregation", vars -> {
|
||||
scripts.put("sum state values as a new aggregation", vars -> {
|
||||
List newAggregation = new ArrayList();
|
||||
List<?> agg = (List<?>) vars.get("_agg");
|
||||
Map<String, Object> state = (Map<String, Object>) vars.get("state");
|
||||
List<?> list = (List<?>) state.get("list");
|
||||
|
||||
if (agg != null) {
|
||||
if (list != null) {
|
||||
Integer sum = 0;
|
||||
for (Object a : (List) agg) {
|
||||
sum += ((Number) a).intValue();
|
||||
for (Object s : list) {
|
||||
sum += ((Number) s).intValue();
|
||||
}
|
||||
newAggregation.add(sum);
|
||||
}
|
||||
|
@ -137,24 +153,41 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
// newaggregation = [];
|
||||
// sum = 0;
|
||||
//
|
||||
// for (aggregation in _aggs) {
|
||||
// for (a in aggregation) {
|
||||
// sum += a
|
||||
// for (state in states) {
|
||||
// for (s in state) {
|
||||
// sum += s
|
||||
// }
|
||||
// };
|
||||
//
|
||||
// newaggregation.add(sum);
|
||||
// return newaggregation"
|
||||
//
|
||||
scripts.put("sum aggs of agg values as a new aggregation", vars -> {
|
||||
scripts.put("sum all states (lists) values as a new aggregation", vars -> {
|
||||
List newAggregation = new ArrayList();
|
||||
Integer sum = 0;
|
||||
|
||||
List<?> aggs = (List<?>) vars.get("_aggs");
|
||||
for (Object aggregation : (List) aggs) {
|
||||
if (aggregation != null) {
|
||||
for (Object a : (List) aggregation) {
|
||||
sum += ((Number) a).intValue();
|
||||
List<List<?>> states = (List<List<?>>) vars.get("states");
|
||||
for (List<?> list : states) {
|
||||
if (list != null) {
|
||||
for (Object s : list) {
|
||||
sum += ((Number) s).intValue();
|
||||
}
|
||||
}
|
||||
}
|
||||
newAggregation.add(sum);
|
||||
return newAggregation;
|
||||
});
|
||||
|
||||
scripts.put("sum all states' state.list values as a new aggregation", vars -> {
|
||||
List newAggregation = new ArrayList();
|
||||
Integer sum = 0;
|
||||
|
||||
List<Map<String, Object>> states = (List<Map<String, Object>>) vars.get("states");
|
||||
for (Map<String, Object> state : states) {
|
||||
List<?> list = (List<?>) state.get("list");
|
||||
if (list != null) {
|
||||
for (Object s : list) {
|
||||
sum += ((Number) s).intValue();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -167,25 +200,25 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
// newaggregation = [];
|
||||
// sum = 0;
|
||||
//
|
||||
// for (aggregation in _aggs) {
|
||||
// for (a in aggregation) {
|
||||
// sum += a
|
||||
// for (state in states) {
|
||||
// for (s in state) {
|
||||
// sum += s
|
||||
// }
|
||||
// };
|
||||
//
|
||||
// newaggregation.add(sum * multiplier);
|
||||
// return newaggregation"
|
||||
//
|
||||
scripts.put("multiplied sum aggs of agg values as a new aggregation", vars -> {
|
||||
scripts.put("multiplied sum all states (lists) values as a new aggregation", vars -> {
|
||||
Integer multiplier = (Integer) vars.get("multiplier");
|
||||
List newAggregation = new ArrayList();
|
||||
Integer sum = 0;
|
||||
|
||||
List<?> aggs = (List<?>) vars.get("_aggs");
|
||||
for (Object aggregation : (List) aggs) {
|
||||
if (aggregation != null) {
|
||||
for (Object a : (List) aggregation) {
|
||||
sum += ((Number) a).intValue();
|
||||
List<List<?>> states = (List<List<?>>) vars.get("states");
|
||||
for (List<?> list : states) {
|
||||
if (list != null) {
|
||||
for (Object s : list) {
|
||||
sum += ((Number) s).intValue();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -193,53 +226,12 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
return newAggregation;
|
||||
});
|
||||
|
||||
scripts.put("state.items = new ArrayList()", vars ->
|
||||
aggContextScript(vars, state -> ((HashMap) state).put("items", new ArrayList())));
|
||||
|
||||
scripts.put("state.items.add(1)", vars ->
|
||||
aggContextScript(vars, state -> {
|
||||
HashMap stateMap = (HashMap) state;
|
||||
List items = (List) stateMap.get("items");
|
||||
items.add(1);
|
||||
}));
|
||||
|
||||
scripts.put("sum context state values", vars -> {
|
||||
int sum = 0;
|
||||
HashMap state = (HashMap) vars.get("state");
|
||||
List items = (List) state.get("items");
|
||||
|
||||
for (Object x : items) {
|
||||
sum += (Integer)x;
|
||||
}
|
||||
|
||||
return sum;
|
||||
});
|
||||
|
||||
scripts.put("sum context states", vars -> {
|
||||
Integer sum = 0;
|
||||
|
||||
List<?> states = (List<?>) vars.get("states");
|
||||
for (Object state : states) {
|
||||
sum += ((Number) state).intValue();
|
||||
}
|
||||
|
||||
return sum;
|
||||
});
|
||||
|
||||
return scripts;
|
||||
}
|
||||
|
||||
static <T> Object aggScript(Map<String, Object> vars, Consumer<T> fn) {
|
||||
return aggScript(vars, fn, "_agg");
|
||||
}
|
||||
|
||||
static <T> Object aggContextScript(Map<String, Object> vars, Consumer<T> fn) {
|
||||
return aggScript(vars, fn, "state");
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private static <T> Object aggScript(Map<String, Object> vars, Consumer<T> fn, String stateVarName) {
|
||||
T aggState = (T) vars.get(stateVarName);
|
||||
static Map<String, Object> aggScript(Map<String, Object> vars, Consumer<Map<String, Object>> fn) {
|
||||
Map<String, Object> aggState = (Map<String, Object>) vars.get("state");
|
||||
fn.accept(aggState);
|
||||
return aggState;
|
||||
}
|
||||
|
@ -285,17 +277,17 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
assertAcked(client().admin().cluster().preparePutStoredScript()
|
||||
.setId("mapScript_stored")
|
||||
.setContent(new BytesArray("{\"script\": {\"lang\": \"" + MockScriptPlugin.NAME + "\"," +
|
||||
" \"source\": \"_agg.add(vars.multiplier)\"} }"), XContentType.JSON));
|
||||
" \"source\": \"state.list.add(vars.multiplier)\"} }"), XContentType.JSON));
|
||||
|
||||
assertAcked(client().admin().cluster().preparePutStoredScript()
|
||||
.setId("combineScript_stored")
|
||||
.setContent(new BytesArray("{\"script\": {\"lang\": \"" + MockScriptPlugin.NAME + "\"," +
|
||||
" \"source\": \"sum agg values as a new aggregation\"} }"), XContentType.JSON));
|
||||
" \"source\": \"sum state values as a new aggregation\"} }"), XContentType.JSON));
|
||||
|
||||
assertAcked(client().admin().cluster().preparePutStoredScript()
|
||||
.setId("reduceScript_stored")
|
||||
.setContent(new BytesArray("{\"script\": {\"lang\": \"" + MockScriptPlugin.NAME + "\"," +
|
||||
" \"source\": \"sum aggs of agg values as a new aggregation\"} }"), XContentType.JSON));
|
||||
" \"source\": \"sum all states (lists) values as a new aggregation\"} }"), XContentType.JSON));
|
||||
|
||||
indexRandom(true, builders);
|
||||
ensureSearchable();
|
||||
|
@ -315,9 +307,10 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
// the name of the file script is used in test method while the source of the file script
|
||||
// must match a predefined script from CustomScriptPlugin.pluginScripts() method
|
||||
Files.write(scripts.resolve("init_script.mockscript"), "vars.multiplier = 3".getBytes("UTF-8"));
|
||||
Files.write(scripts.resolve("map_script.mockscript"), "_agg.add(vars.multiplier)".getBytes("UTF-8"));
|
||||
Files.write(scripts.resolve("combine_script.mockscript"), "sum agg values as a new aggregation".getBytes("UTF-8"));
|
||||
Files.write(scripts.resolve("reduce_script.mockscript"), "sum aggs of agg values as a new aggregation".getBytes("UTF-8"));
|
||||
Files.write(scripts.resolve("map_script.mockscript"), "state.list.add(vars.multiplier)".getBytes("UTF-8"));
|
||||
Files.write(scripts.resolve("combine_script.mockscript"), "sum state values as a new aggregation".getBytes("UTF-8"));
|
||||
Files.write(scripts.resolve("reduce_script.mockscript"),
|
||||
"sum all states (lists) values as a new aggregation".getBytes("UTF-8"));
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException("failed to create scripts");
|
||||
}
|
||||
|
@ -329,7 +322,7 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
public void testMap() {
|
||||
Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg['count'] = 1", Collections.emptyMap());
|
||||
Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state['count'] = 1", Collections.emptyMap());
|
||||
|
||||
SearchResponse response = client().prepareSearch("idx")
|
||||
.setQuery(matchAllQuery())
|
||||
|
@ -365,52 +358,12 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
assertThat(numShardsRun, greaterThan(0));
|
||||
}
|
||||
|
||||
public void testExplicitAggParam() {
|
||||
Map<String, Object> params = new HashMap<>();
|
||||
params.put("_agg", new ArrayList<>());
|
||||
|
||||
Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(1)", Collections.emptyMap());
|
||||
|
||||
SearchResponse response = client().prepareSearch("idx")
|
||||
.setQuery(matchAllQuery())
|
||||
.addAggregation(scriptedMetric("scripted").params(params).mapScript(mapScript))
|
||||
.get();
|
||||
assertSearchResponse(response);
|
||||
assertThat(response.getHits().getTotalHits(), equalTo(numDocs));
|
||||
|
||||
Aggregation aggregation = response.getAggregations().get("scripted");
|
||||
assertThat(aggregation, notNullValue());
|
||||
assertThat(aggregation, instanceOf(ScriptedMetric.class));
|
||||
ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation;
|
||||
assertThat(scriptedMetricAggregation.getName(), equalTo("scripted"));
|
||||
assertThat(scriptedMetricAggregation.aggregation(), notNullValue());
|
||||
assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class));
|
||||
List<?> aggregationList = (List<?>) scriptedMetricAggregation.aggregation();
|
||||
assertThat(aggregationList.size(), equalTo(getNumShards("idx").numPrimaries));
|
||||
long totalCount = 0;
|
||||
for (Object object : aggregationList) {
|
||||
assertThat(object, notNullValue());
|
||||
assertThat(object, instanceOf(List.class));
|
||||
List<?> list = (List<?>) object;
|
||||
for (Object o : list) {
|
||||
assertThat(o, notNullValue());
|
||||
assertThat(o, instanceOf(Number.class));
|
||||
Number numberValue = (Number) o;
|
||||
assertThat(numberValue, equalTo((Number) 1));
|
||||
totalCount += numberValue.longValue();
|
||||
}
|
||||
}
|
||||
assertThat(totalCount, equalTo(numDocs));
|
||||
}
|
||||
|
||||
public void testMapWithParamsAndImplicitAggMap() {
|
||||
public void testMapWithParams() {
|
||||
// Split the params up between the script and the aggregation.
|
||||
// Don't put any _agg map in params.
|
||||
Map<String, Object> scriptParams = Collections.singletonMap("param1", "12");
|
||||
Map<String, Object> aggregationParams = Collections.singletonMap("param2", 1);
|
||||
|
||||
// The _agg hashmap will be available even if not declared in the params map
|
||||
Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg[param1] = param2", scriptParams);
|
||||
Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state[param1] = param2", scriptParams);
|
||||
|
||||
SearchResponse response = client().prepareSearch("idx")
|
||||
.setQuery(matchAllQuery())
|
||||
|
@ -454,7 +407,6 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
varsMap.put("multiplier", 1);
|
||||
|
||||
Map<String, Object> params = new HashMap<>();
|
||||
params.put("_agg", new ArrayList<>());
|
||||
params.put("vars", varsMap);
|
||||
|
||||
SearchResponse response = client()
|
||||
|
@ -466,7 +418,7 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
.initScript(
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap()))
|
||||
.mapScript(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME,
|
||||
"_agg.add(vars.multiplier)", Collections.emptyMap())))
|
||||
"state.list.add(vars.multiplier)", Collections.emptyMap())))
|
||||
.get();
|
||||
assertSearchResponse(response);
|
||||
assertThat(response.getHits().getTotalHits(), equalTo(numDocs));
|
||||
|
@ -483,8 +435,11 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
long totalCount = 0;
|
||||
for (Object object : aggregationList) {
|
||||
assertThat(object, notNullValue());
|
||||
assertThat(object, instanceOf(List.class));
|
||||
List<?> list = (List<?>) object;
|
||||
assertThat(object, instanceOf(HashMap.class));
|
||||
Map<String, Object> map = (Map<String, Object>) object;
|
||||
assertThat(map, hasKey("list"));
|
||||
assertThat(map.get("list"), instanceOf(List.class));
|
||||
List<?> list = (List<?>) map.get("list");
|
||||
for (Object o : list) {
|
||||
assertThat(o, notNullValue());
|
||||
assertThat(o, instanceOf(Number.class));
|
||||
|
@ -501,12 +456,11 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
varsMap.put("multiplier", 1);
|
||||
|
||||
Map<String, Object> params = new HashMap<>();
|
||||
params.put("_agg", new ArrayList<>());
|
||||
params.put("vars", varsMap);
|
||||
|
||||
Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(1)", Collections.emptyMap());
|
||||
Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(1)", Collections.emptyMap());
|
||||
Script combineScript =
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap());
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap());
|
||||
|
||||
SearchResponse response = client()
|
||||
.prepareSearch("idx")
|
||||
|
@ -553,13 +507,13 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
varsMap.put("multiplier", 1);
|
||||
|
||||
Map<String, Object> params = new HashMap<>();
|
||||
params.put("_agg", new ArrayList<>());
|
||||
params.put("vars", varsMap);
|
||||
|
||||
Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap());
|
||||
Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap());
|
||||
Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)",
|
||||
Collections.emptyMap());
|
||||
Script combineScript =
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap());
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap());
|
||||
|
||||
SearchResponse response = client()
|
||||
.prepareSearch("idx")
|
||||
|
@ -607,15 +561,15 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
varsMap.put("multiplier", 1);
|
||||
|
||||
Map<String, Object> params = new HashMap<>();
|
||||
params.put("_agg", new ArrayList<>());
|
||||
params.put("vars", varsMap);
|
||||
|
||||
Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap());
|
||||
Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap());
|
||||
Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)",
|
||||
Collections.emptyMap());
|
||||
Script combineScript =
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap());
|
||||
Script reduceScript =
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum aggs of agg values as a new aggregation", Collections.emptyMap());
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap());
|
||||
Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME,
|
||||
"sum all states (lists) values as a new aggregation", Collections.emptyMap());
|
||||
|
||||
SearchResponse response = client()
|
||||
.prepareSearch("idx")
|
||||
|
@ -652,15 +606,15 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
varsMap.put("multiplier", 1);
|
||||
|
||||
Map<String, Object> params = new HashMap<>();
|
||||
params.put("_agg", new ArrayList<>());
|
||||
params.put("vars", varsMap);
|
||||
|
||||
Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap());
|
||||
Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap());
|
||||
Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)",
|
||||
Collections.emptyMap());
|
||||
Script combineScript =
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap());
|
||||
Script reduceScript =
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum aggs of agg values as a new aggregation", Collections.emptyMap());
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap());
|
||||
Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME,
|
||||
"sum all states (lists) values as a new aggregation", Collections.emptyMap());
|
||||
|
||||
SearchResponse searchResponse = client()
|
||||
.prepareSearch("idx")
|
||||
|
@ -707,14 +661,14 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
varsMap.put("multiplier", 1);
|
||||
|
||||
Map<String, Object> params = new HashMap<>();
|
||||
params.put("_agg", new ArrayList<>());
|
||||
params.put("vars", varsMap);
|
||||
|
||||
Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap());
|
||||
Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)",
|
||||
Collections.emptyMap());
|
||||
Script combineScript =
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap());
|
||||
Script reduceScript =
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum aggs of agg values as a new aggregation", Collections.emptyMap());
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap());
|
||||
Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME,
|
||||
"sum all states (lists) values as a new aggregation", Collections.emptyMap());
|
||||
|
||||
SearchResponse response = client()
|
||||
.prepareSearch("idx")
|
||||
|
@ -749,13 +703,13 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
varsMap.put("multiplier", 1);
|
||||
|
||||
Map<String, Object> params = new HashMap<>();
|
||||
params.put("_agg", new ArrayList<>());
|
||||
params.put("vars", varsMap);
|
||||
|
||||
Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap());
|
||||
Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap());
|
||||
Script reduceScript =
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum aggs of agg values as a new aggregation", Collections.emptyMap());
|
||||
Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)",
|
||||
Collections.emptyMap());
|
||||
Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME,
|
||||
"sum all states' state.list values as a new aggregation", Collections.emptyMap());
|
||||
|
||||
SearchResponse response = client()
|
||||
.prepareSearch("idx")
|
||||
|
@ -789,12 +743,12 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
Map<String, Object> varsMap = new HashMap<>();
|
||||
varsMap.put("multiplier", 1);
|
||||
Map<String, Object> params = new HashMap<>();
|
||||
params.put("_agg", new ArrayList<>());
|
||||
params.put("vars", varsMap);
|
||||
|
||||
Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap());
|
||||
Script reduceScript =
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum aggs of agg values as a new aggregation", Collections.emptyMap());
|
||||
Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)",
|
||||
Collections.emptyMap());
|
||||
Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME,
|
||||
"sum all states' state.list values as a new aggregation", Collections.emptyMap());
|
||||
|
||||
SearchResponse response = client()
|
||||
.prepareSearch("idx")
|
||||
|
@ -828,18 +782,18 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
varsMap.put("multiplier", 1);
|
||||
|
||||
Map<String, Object> params = new HashMap<>();
|
||||
params.put("_agg", new ArrayList<>());
|
||||
params.put("vars", varsMap);
|
||||
|
||||
Map<String, Object> reduceParams = new HashMap<>();
|
||||
reduceParams.put("multiplier", 4);
|
||||
|
||||
Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap());
|
||||
Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap());
|
||||
Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)",
|
||||
Collections.emptyMap());
|
||||
Script combineScript =
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap());
|
||||
Script reduceScript =
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "multiplied sum aggs of agg values as a new aggregation", reduceParams);
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap());
|
||||
Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME,
|
||||
"multiplied sum all states (lists) values as a new aggregation", reduceParams);
|
||||
|
||||
SearchResponse response = client()
|
||||
.prepareSearch("idx")
|
||||
|
@ -875,7 +829,6 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
varsMap.put("multiplier", 1);
|
||||
|
||||
Map<String, Object> params = new HashMap<>();
|
||||
params.put("_agg", new ArrayList<>());
|
||||
params.put("vars", varsMap);
|
||||
|
||||
SearchResponse response = client()
|
||||
|
@ -916,15 +869,15 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
varsMap.put("multiplier", 1);
|
||||
|
||||
Map<String, Object> params = new HashMap<>();
|
||||
params.put("_agg", new ArrayList<>());
|
||||
params.put("vars", varsMap);
|
||||
|
||||
Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap());
|
||||
Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap());
|
||||
Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)",
|
||||
Collections.emptyMap());
|
||||
Script combineScript =
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap());
|
||||
Script reduceScript =
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum aggs of agg values as a new aggregation", Collections.emptyMap());
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap());
|
||||
Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME,
|
||||
"sum all states (lists) values as a new aggregation", Collections.emptyMap());
|
||||
|
||||
SearchResponse response = client()
|
||||
.prepareSearch("idx")
|
||||
|
@ -977,15 +930,15 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
varsMap.put("multiplier", 1);
|
||||
|
||||
Map<String, Object> params = new HashMap<>();
|
||||
params.put("_agg", new ArrayList<>());
|
||||
params.put("vars", varsMap);
|
||||
|
||||
Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap());
|
||||
Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap());
|
||||
Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)",
|
||||
Collections.emptyMap());
|
||||
Script combineScript =
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap());
|
||||
Script reduceScript =
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum aggs of agg values as a new aggregation", Collections.emptyMap());
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap());
|
||||
Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME,
|
||||
"sum all states (lists) values as a new aggregation", Collections.emptyMap());
|
||||
|
||||
SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
|
||||
.setQuery(matchAllQuery())
|
||||
|
@ -1021,7 +974,7 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
* not using a script does get cached.
|
||||
*/
|
||||
public void testDontCacheScripts() throws Exception {
|
||||
Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg['count'] = 1", Collections.emptyMap());
|
||||
Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state['count'] = 1", Collections.emptyMap());
|
||||
assertAcked(prepareCreate("cache_test_idx").addMapping("type", "d", "type=long")
|
||||
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1))
|
||||
.get());
|
||||
|
@ -1047,7 +1000,7 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
|
||||
public void testConflictingAggAndScriptParams() {
|
||||
Map<String, Object> params = Collections.singletonMap("param1", "12");
|
||||
Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(1)", params);
|
||||
Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(1)", params);
|
||||
|
||||
SearchRequestBuilder builder = client().prepareSearch("idx")
|
||||
.setQuery(matchAllQuery())
|
||||
|
@ -1056,37 +1009,4 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
SearchPhaseExecutionException ex = expectThrows(SearchPhaseExecutionException.class, builder::get);
|
||||
assertThat(ex.getCause().getMessage(), containsString("Parameter name \"param1\" used in both aggregation and script parameters"));
|
||||
}
|
||||
|
||||
public void testAggFromContext() {
|
||||
Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.items = new ArrayList()", Collections.emptyMap());
|
||||
Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.items.add(1)", Collections.emptyMap());
|
||||
Script combineScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum context state values", Collections.emptyMap());
|
||||
Script reduceScript =
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum context states",
|
||||
Collections.emptyMap());
|
||||
|
||||
SearchResponse response = client()
|
||||
.prepareSearch("idx")
|
||||
.setQuery(matchAllQuery())
|
||||
.addAggregation(
|
||||
scriptedMetric("scripted")
|
||||
.initScript(initScript)
|
||||
.mapScript(mapScript)
|
||||
.combineScript(combineScript)
|
||||
.reduceScript(reduceScript))
|
||||
.get();
|
||||
|
||||
Aggregation aggregation = response.getAggregations().get("scripted");
|
||||
assertThat(aggregation, notNullValue());
|
||||
assertThat(aggregation, instanceOf(ScriptedMetric.class));
|
||||
|
||||
ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation;
|
||||
assertThat(scriptedMetricAggregation.getName(), equalTo("scripted"));
|
||||
assertThat(scriptedMetricAggregation.aggregation(), notNullValue());
|
||||
|
||||
assertThat(scriptedMetricAggregation.aggregation(), instanceOf(Integer.class));
|
||||
Integer aggResult = (Integer) scriptedMetricAggregation.aggregation();
|
||||
long totalAgg = aggResult.longValue();
|
||||
assertThat(totalAgg, equalTo(numDocs));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,109 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.aggregations.metrics.scripted;
|
||||
|
||||
import org.elasticsearch.common.io.stream.Writeable.Reader;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.script.MockScriptEngine;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptedMetricAggContexts;
|
||||
import org.elasticsearch.script.ScriptEngine;
|
||||
import org.elasticsearch.script.ScriptModule;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.script.ScriptType;
|
||||
import org.elasticsearch.search.aggregations.Aggregation.CommonFields;
|
||||
import org.elasticsearch.search.aggregations.ParsedAggregation;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
import org.elasticsearch.test.InternalAggregationTestCase;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.sameInstance;
|
||||
|
||||
/**
|
||||
* This test verifies that the _aggs param is added correctly when the system property
|
||||
* "es.aggregations.enable_scripted_metric_agg_param" is set to true.
|
||||
*/
|
||||
public class InternalScriptedMetricAggStateV6CompatTests extends InternalAggregationTestCase<InternalScriptedMetric> {
|
||||
|
||||
private static final String REDUCE_SCRIPT_NAME = "reduceScript";
|
||||
|
||||
@Override
|
||||
protected InternalScriptedMetric createTestInstance(String name, List<PipelineAggregator> pipelineAggregators,
|
||||
Map<String, Object> metaData) {
|
||||
Script reduceScript = new Script(ScriptType.INLINE, MockScriptEngine.NAME, REDUCE_SCRIPT_NAME, Collections.emptyMap());
|
||||
return new InternalScriptedMetric(name, "agg value", reduceScript, pipelineAggregators, metaData);
|
||||
}
|
||||
|
||||
/**
|
||||
* Mock of the script service. The script that is run looks at the
|
||||
* "_aggs" parameter to verify that it was put in place by InternalScriptedMetric.
|
||||
*/
|
||||
@Override
|
||||
protected ScriptService mockScriptService() {
|
||||
Function<Map<String, Object>, Object> script = params -> {
|
||||
Object aggs = params.get("_aggs");
|
||||
Object states = params.get("states");
|
||||
assertThat(aggs, instanceOf(List.class));
|
||||
assertThat(aggs, sameInstance(states));
|
||||
return aggs;
|
||||
};
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
MockScriptEngine scriptEngine = new MockScriptEngine(MockScriptEngine.NAME,
|
||||
Collections.singletonMap(REDUCE_SCRIPT_NAME, script));
|
||||
Map<String, ScriptEngine> engines = Collections.singletonMap(scriptEngine.getType(), scriptEngine);
|
||||
return new ScriptService(Settings.EMPTY, engines, ScriptModule.CORE_CONTEXTS);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void assertReduced(InternalScriptedMetric reduced, List<InternalScriptedMetric> inputs) {
|
||||
assertWarnings(ScriptedMetricAggContexts.AGG_PARAM_DEPRECATION_WARNING);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Reader<InternalScriptedMetric> instanceReader() {
|
||||
return InternalScriptedMetric::new;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void assertFromXContent(InternalScriptedMetric aggregation, ParsedAggregation parsedAggregation) {}
|
||||
|
||||
@Override
|
||||
protected Predicate<String> excludePathsFromXContentInsertion() {
|
||||
return path -> path.contains(CommonFields.VALUE.getPreferredName());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected InternalScriptedMetric mutateInstance(InternalScriptedMetric instance) {
|
||||
String name = instance.getName();
|
||||
Object value = instance.aggregation();
|
||||
Script reduceScript = instance.reduceScript;
|
||||
List<PipelineAggregator> pipelineAggregators = instance.pipelineAggregators();
|
||||
Map<String, Object> metaData = instance.getMetaData();
|
||||
return new InternalScriptedMetric(name + randomAlphaOfLength(5), value, reduceScript, pipelineAggregators,
|
||||
metaData);
|
||||
}
|
||||
}
|
|
@ -107,7 +107,7 @@ public class InternalScriptedMetricTests extends InternalAggregationTestCase<Int
|
|||
|
||||
/**
|
||||
* Mock of the script service. The script that is run looks at the
|
||||
* "_aggs" parameter visible when executing the script and simply returns the count.
|
||||
* "states" context variable visible when executing the script and simply returns the count.
|
||||
* This should be equal to the number of input InternalScriptedMetrics that are reduced
|
||||
* in total.
|
||||
*/
|
||||
|
@ -116,7 +116,7 @@ public class InternalScriptedMetricTests extends InternalAggregationTestCase<Int
|
|||
// mock script always retuns the size of the input aggs list as result
|
||||
@SuppressWarnings("unchecked")
|
||||
MockScriptEngine scriptEngine = new MockScriptEngine(MockScriptEngine.NAME,
|
||||
Collections.singletonMap(REDUCE_SCRIPT_NAME, script -> ((List<Object>) script.get("_aggs")).size()));
|
||||
Collections.singletonMap(REDUCE_SCRIPT_NAME, script -> ((List<Object>) script.get("states")).size()));
|
||||
Map<String, ScriptEngine> engines = Collections.singletonMap(scriptEngine.getType(), scriptEngine);
|
||||
return new ScriptService(Settings.EMPTY, engines, ScriptModule.CORE_CONTEXTS);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,180 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.aggregations.metrics.scripted;
|
||||
|
||||
import org.apache.lucene.document.SortedNumericDocValuesField;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.script.MockScriptEngine;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptedMetricAggContexts;
|
||||
import org.elasticsearch.script.ScriptEngine;
|
||||
import org.elasticsearch.script.ScriptModule;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.script.ScriptType;
|
||||
import org.elasticsearch.search.aggregations.AggregatorTestCase;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.function.Function;
|
||||
|
||||
import static java.util.Collections.singleton;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.sameInstance;
|
||||
|
||||
/**
|
||||
* This test verifies that the _agg param is added correctly when the system property
|
||||
* "es.aggregations.enable_scripted_metric_agg_param" is set to true.
|
||||
*/
|
||||
public class ScriptedMetricAggregatorAggStateV6CompatTests extends AggregatorTestCase {
|
||||
|
||||
private static final String AGG_NAME = "scriptedMetric";
|
||||
private static final Script INIT_SCRIPT = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "initScript", Collections.emptyMap());
|
||||
private static final Script MAP_SCRIPT = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "mapScript", Collections.emptyMap());
|
||||
private static final Script COMBINE_SCRIPT = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "combineScript",
|
||||
Collections.emptyMap());
|
||||
|
||||
private static final Script INIT_SCRIPT_EXPLICIT_AGG = new Script(ScriptType.INLINE, MockScriptEngine.NAME,
|
||||
"initScriptExplicitAgg", Collections.emptyMap());
|
||||
private static final Script MAP_SCRIPT_EXPLICIT_AGG = new Script(ScriptType.INLINE, MockScriptEngine.NAME,
|
||||
"mapScriptExplicitAgg", Collections.emptyMap());
|
||||
private static final Script COMBINE_SCRIPT_EXPLICIT_AGG = new Script(ScriptType.INLINE, MockScriptEngine.NAME,
|
||||
"combineScriptExplicitAgg", Collections.emptyMap());
|
||||
private static final String EXPLICIT_AGG_OBJECT = "Explicit agg object";
|
||||
|
||||
private static final Map<String, Function<Map<String, Object>, Object>> SCRIPTS = new HashMap<>();
|
||||
|
||||
@BeforeClass
|
||||
@SuppressWarnings("unchecked")
|
||||
public static void initMockScripts() {
|
||||
// If _agg is provided implicitly, it should be the same objects as "state" from the context.
|
||||
SCRIPTS.put("initScript", params -> {
|
||||
Object agg = params.get("_agg");
|
||||
Object state = params.get("state");
|
||||
assertThat(agg, instanceOf(Map.class));
|
||||
assertThat(agg, sameInstance(state));
|
||||
return agg;
|
||||
});
|
||||
SCRIPTS.put("mapScript", params -> {
|
||||
Object agg = params.get("_agg");
|
||||
Object state = params.get("state");
|
||||
assertThat(agg, instanceOf(Map.class));
|
||||
assertThat(agg, sameInstance(state));
|
||||
return agg;
|
||||
});
|
||||
SCRIPTS.put("combineScript", params -> {
|
||||
Object agg = params.get("_agg");
|
||||
Object state = params.get("state");
|
||||
assertThat(agg, instanceOf(Map.class));
|
||||
assertThat(agg, sameInstance(state));
|
||||
return agg;
|
||||
});
|
||||
|
||||
SCRIPTS.put("initScriptExplicitAgg", params -> {
|
||||
Object agg = params.get("_agg");
|
||||
assertThat(agg, equalTo(EXPLICIT_AGG_OBJECT));
|
||||
return agg;
|
||||
});
|
||||
SCRIPTS.put("mapScriptExplicitAgg", params -> {
|
||||
Object agg = params.get("_agg");
|
||||
assertThat(agg, equalTo(EXPLICIT_AGG_OBJECT));
|
||||
return agg;
|
||||
});
|
||||
SCRIPTS.put("combineScriptExplicitAgg", params -> {
|
||||
Object agg = params.get("_agg");
|
||||
assertThat(agg, equalTo(EXPLICIT_AGG_OBJECT));
|
||||
return agg;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Test that the _agg param is implicitly added
|
||||
*/
|
||||
public void testWithImplicitAggParam() throws IOException {
|
||||
try (Directory directory = newDirectory()) {
|
||||
Integer numDocs = 10;
|
||||
try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) {
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
indexWriter.addDocument(singleton(new SortedNumericDocValuesField("number", i)));
|
||||
}
|
||||
}
|
||||
try (IndexReader indexReader = DirectoryReader.open(directory)) {
|
||||
ScriptedMetricAggregationBuilder aggregationBuilder = new ScriptedMetricAggregationBuilder(AGG_NAME);
|
||||
aggregationBuilder.initScript(INIT_SCRIPT).mapScript(MAP_SCRIPT).combineScript(COMBINE_SCRIPT);
|
||||
search(newSearcher(indexReader, true, true), new MatchAllDocsQuery(), aggregationBuilder);
|
||||
}
|
||||
}
|
||||
|
||||
assertWarnings(ScriptedMetricAggContexts.AGG_PARAM_DEPRECATION_WARNING);
|
||||
}
|
||||
|
||||
/**
|
||||
* Test that an explicitly added _agg param is honored
|
||||
*/
|
||||
public void testWithExplicitAggParam() throws IOException {
|
||||
try (Directory directory = newDirectory()) {
|
||||
Integer numDocs = 10;
|
||||
try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) {
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
indexWriter.addDocument(singleton(new SortedNumericDocValuesField("number", i)));
|
||||
}
|
||||
}
|
||||
|
||||
Map<String, Object> aggParams = new HashMap<>();
|
||||
aggParams.put("_agg", EXPLICIT_AGG_OBJECT);
|
||||
|
||||
try (IndexReader indexReader = DirectoryReader.open(directory)) {
|
||||
ScriptedMetricAggregationBuilder aggregationBuilder = new ScriptedMetricAggregationBuilder(AGG_NAME);
|
||||
aggregationBuilder
|
||||
.params(aggParams)
|
||||
.initScript(INIT_SCRIPT_EXPLICIT_AGG)
|
||||
.mapScript(MAP_SCRIPT_EXPLICIT_AGG)
|
||||
.combineScript(COMBINE_SCRIPT_EXPLICIT_AGG);
|
||||
search(newSearcher(indexReader, true, true), new MatchAllDocsQuery(), aggregationBuilder);
|
||||
}
|
||||
}
|
||||
|
||||
assertWarnings(ScriptedMetricAggContexts.AGG_PARAM_DEPRECATION_WARNING);
|
||||
}
|
||||
|
||||
/**
|
||||
* We cannot use Mockito for mocking QueryShardContext in this case because
|
||||
* script-related methods (e.g. QueryShardContext#getLazyExecutableScript)
|
||||
* is final and cannot be mocked
|
||||
*/
|
||||
@Override
|
||||
protected QueryShardContext queryShardContextMock(MapperService mapperService) {
|
||||
MockScriptEngine scriptEngine = new MockScriptEngine(MockScriptEngine.NAME, SCRIPTS);
|
||||
Map<String, ScriptEngine> engines = Collections.singletonMap(scriptEngine.getType(), scriptEngine);
|
||||
ScriptService scriptService = new ScriptService(Settings.EMPTY, engines, ScriptModule.CORE_CONTEXTS);
|
||||
return new QueryShardContext(0, mapperService.getIndexSettings(), null, null, mapperService, null, scriptService,
|
||||
xContentRegistry(), writableRegistry(), null, null, System::currentTimeMillis, null);
|
||||
}
|
||||
}
|
|
@ -83,72 +83,72 @@ public class ScriptedMetricAggregatorTests extends AggregatorTestCase {
|
|||
@SuppressWarnings("unchecked")
|
||||
public static void initMockScripts() {
|
||||
SCRIPTS.put("initScript", params -> {
|
||||
Map<String, Object> agg = (Map<String, Object>) params.get("_agg");
|
||||
agg.put("collector", new ArrayList<Integer>());
|
||||
return agg;
|
||||
});
|
||||
Map<String, Object> state = (Map<String, Object>) params.get("state");
|
||||
state.put("collector", new ArrayList<Integer>());
|
||||
return state;
|
||||
});
|
||||
SCRIPTS.put("mapScript", params -> {
|
||||
Map<String, Object> agg = (Map<String, Object>) params.get("_agg");
|
||||
((List<Integer>) agg.get("collector")).add(1); // just add 1 for each doc the script is run on
|
||||
return agg;
|
||||
Map<String, Object> state = (Map<String, Object>) params.get("state");
|
||||
((List<Integer>) state.get("collector")).add(1); // just add 1 for each doc the script is run on
|
||||
return state;
|
||||
});
|
||||
SCRIPTS.put("combineScript", params -> {
|
||||
Map<String, Object> agg = (Map<String, Object>) params.get("_agg");
|
||||
return ((List<Integer>) agg.get("collector")).stream().mapToInt(Integer::intValue).sum();
|
||||
Map<String, Object> state = (Map<String, Object>) params.get("state");
|
||||
return ((List<Integer>) state.get("collector")).stream().mapToInt(Integer::intValue).sum();
|
||||
});
|
||||
|
||||
SCRIPTS.put("initScriptScore", params -> {
|
||||
Map<String, Object> agg = (Map<String, Object>) params.get("_agg");
|
||||
agg.put("collector", new ArrayList<Double>());
|
||||
return agg;
|
||||
});
|
||||
Map<String, Object> state = (Map<String, Object>) params.get("state");
|
||||
state.put("collector", new ArrayList<Double>());
|
||||
return state;
|
||||
});
|
||||
SCRIPTS.put("mapScriptScore", params -> {
|
||||
Map<String, Object> agg = (Map<String, Object>) params.get("_agg");
|
||||
((List<Double>) agg.get("collector")).add(((Number) params.get("_score")).doubleValue());
|
||||
return agg;
|
||||
Map<String, Object> state = (Map<String, Object>) params.get("state");
|
||||
((List<Double>) state.get("collector")).add(((Number) params.get("_score")).doubleValue());
|
||||
return state;
|
||||
});
|
||||
SCRIPTS.put("combineScriptScore", params -> {
|
||||
Map<String, Object> agg = (Map<String, Object>) params.get("_agg");
|
||||
return ((List<Double>) agg.get("collector")).stream().mapToDouble(Double::doubleValue).sum();
|
||||
Map<String, Object> state = (Map<String, Object>) params.get("state");
|
||||
return ((List<Double>) state.get("collector")).stream().mapToDouble(Double::doubleValue).sum();
|
||||
});
|
||||
|
||||
SCRIPTS.put("initScriptParams", params -> {
|
||||
Map<String, Object> agg = (Map<String, Object>) params.get("_agg");
|
||||
Map<String, Object> state = (Map<String, Object>) params.get("state");
|
||||
Integer initialValue = (Integer)params.get("initialValue");
|
||||
ArrayList<Integer> collector = new ArrayList<>();
|
||||
collector.add(initialValue);
|
||||
agg.put("collector", collector);
|
||||
return agg;
|
||||
state.put("collector", collector);
|
||||
return state;
|
||||
});
|
||||
SCRIPTS.put("mapScriptParams", params -> {
|
||||
Map<String, Object> agg = (Map<String, Object>) params.get("_agg");
|
||||
Map<String, Object> state = (Map<String, Object>) params.get("state");
|
||||
Integer itemValue = (Integer) params.get("itemValue");
|
||||
((List<Integer>) agg.get("collector")).add(itemValue);
|
||||
return agg;
|
||||
((List<Integer>) state.get("collector")).add(itemValue);
|
||||
return state;
|
||||
});
|
||||
SCRIPTS.put("combineScriptParams", params -> {
|
||||
Map<String, Object> agg = (Map<String, Object>) params.get("_agg");
|
||||
Map<String, Object> state = (Map<String, Object>) params.get("state");
|
||||
int divisor = ((Integer) params.get("divisor"));
|
||||
return ((List<Integer>) agg.get("collector")).stream().mapToInt(Integer::intValue).map(i -> i / divisor).sum();
|
||||
return ((List<Integer>) state.get("collector")).stream().mapToInt(Integer::intValue).map(i -> i / divisor).sum();
|
||||
});
|
||||
|
||||
SCRIPTS.put("initScriptSelfRef", params -> {
|
||||
Map<String, Object> agg = (Map<String, Object>) params.get("_agg");
|
||||
agg.put("collector", new ArrayList<Integer>());
|
||||
agg.put("selfRef", agg);
|
||||
return agg;
|
||||
Map<String, Object> state = (Map<String, Object>) params.get("state");
|
||||
state.put("collector", new ArrayList<Integer>());
|
||||
state.put("selfRef", state);
|
||||
return state;
|
||||
});
|
||||
|
||||
SCRIPTS.put("mapScriptSelfRef", params -> {
|
||||
Map<String, Object> agg = (Map<String, Object>) params.get("_agg");
|
||||
agg.put("selfRef", agg);
|
||||
return agg;
|
||||
Map<String, Object> state = (Map<String, Object>) params.get("state");
|
||||
state.put("selfRef", state);
|
||||
return state;
|
||||
});
|
||||
|
||||
SCRIPTS.put("combineScriptSelfRef", params -> {
|
||||
Map<String, Object> agg = (Map<String, Object>) params.get("_agg");
|
||||
agg.put("selfRef", agg);
|
||||
return agg;
|
||||
Map<String, Object> state = (Map<String, Object>) params.get("state");
|
||||
state.put("selfRef", state);
|
||||
return state;
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -170,7 +170,7 @@ public class ScriptedMetricAggregatorTests extends AggregatorTestCase {
|
|||
}
|
||||
|
||||
/**
|
||||
* without combine script, the "_aggs" map should contain a list of the size of the number of documents matched
|
||||
* without combine script, the "states" map should contain a list of the size of the number of documents matched
|
||||
*/
|
||||
public void testScriptedMetricWithoutCombine() throws IOException {
|
||||
try (Directory directory = newDirectory()) {
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.search.scroll;
|
|||
|
||||
import com.carrotsearch.hppc.IntHashSet;
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
|
||||
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.SearchType;
|
||||
|
@ -198,6 +199,8 @@ public class DuelScrollIT extends ESIntegTestCase {
|
|||
}
|
||||
// no replicas, as they might be ordered differently
|
||||
settings.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0);
|
||||
// we need to control refreshes as they might take different merges into account
|
||||
settings.put("index.refresh_interval", -1);
|
||||
|
||||
assertAcked(prepareCreate("test").setSettings(settings.build()).get());
|
||||
final int numDocs = randomIntBetween(10, 200);
|
||||
|
|
|
@ -493,15 +493,24 @@ public class ContextCompletionSuggestSearchIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
public void testGeoField() throws Exception {
|
||||
// Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_5_0_0_alpha5);
|
||||
// Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
|
||||
XContentBuilder mapping = jsonBuilder();
|
||||
mapping.startObject();
|
||||
mapping.startObject(TYPE);
|
||||
mapping.startObject("properties");
|
||||
mapping.startObject("location");
|
||||
mapping.startObject("properties");
|
||||
mapping.startObject("pin");
|
||||
mapping.field("type", "geo_point");
|
||||
// Enable store and disable indexing sometimes
|
||||
if (randomBoolean()) {
|
||||
mapping.field("store", "true");
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
mapping.field("index", "false");
|
||||
}
|
||||
mapping.endObject(); // pin
|
||||
mapping.endObject();
|
||||
mapping.endObject(); // location
|
||||
mapping.startObject(FIELD);
|
||||
mapping.field("type", "completion");
|
||||
mapping.field("analyzer", "simple");
|
||||
|
@ -510,7 +519,7 @@ public class ContextCompletionSuggestSearchIT extends ESIntegTestCase {
|
|||
mapping.startObject();
|
||||
mapping.field("name", "st");
|
||||
mapping.field("type", "geo");
|
||||
mapping.field("path", "pin");
|
||||
mapping.field("path", "location.pin");
|
||||
mapping.field("precision", 5);
|
||||
mapping.endObject();
|
||||
mapping.endArray();
|
||||
|
@ -524,7 +533,9 @@ public class ContextCompletionSuggestSearchIT extends ESIntegTestCase {
|
|||
|
||||
XContentBuilder source1 = jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("location")
|
||||
.latlon("pin", 52.529172, 13.407333)
|
||||
.endObject()
|
||||
.startObject(FIELD)
|
||||
.array("input", "Hotel Amsterdam in Berlin")
|
||||
.endObject()
|
||||
|
@ -533,7 +544,9 @@ public class ContextCompletionSuggestSearchIT extends ESIntegTestCase {
|
|||
|
||||
XContentBuilder source2 = jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("location")
|
||||
.latlon("pin", 52.363389, 4.888695)
|
||||
.endObject()
|
||||
.startObject(FIELD)
|
||||
.array("input", "Hotel Berlin in Amsterdam")
|
||||
.endObject()
|
||||
|
@ -600,6 +613,7 @@ public class ContextCompletionSuggestSearchIT extends ESIntegTestCase {
|
|||
private void createIndexAndMapping(CompletionMappingBuilder completionMappingBuilder) throws IOException {
|
||||
createIndexAndMappingAndSettings(Settings.EMPTY, completionMappingBuilder);
|
||||
}
|
||||
|
||||
private void createIndexAndMappingAndSettings(Settings settings, CompletionMappingBuilder completionMappingBuilder) throws IOException {
|
||||
XContentBuilder mapping = jsonBuilder().startObject()
|
||||
.startObject(TYPE).startObject("properties")
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.search.suggest.completion;
|
||||
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
@ -200,6 +201,70 @@ public class GeoContextMappingTests extends ESSingleNodeTestCase {
|
|||
assertContextSuggestFields(fields, 3);
|
||||
}
|
||||
|
||||
public void testMalformedGeoField() throws Exception {
|
||||
XContentBuilder mapping = jsonBuilder();
|
||||
mapping.startObject();
|
||||
mapping.startObject("type1");
|
||||
mapping.startObject("properties");
|
||||
mapping.startObject("pin");
|
||||
String type = randomFrom("text", "keyword", "long");
|
||||
mapping.field("type", type);
|
||||
mapping.endObject();
|
||||
mapping.startObject("suggestion");
|
||||
mapping.field("type", "completion");
|
||||
mapping.field("analyzer", "simple");
|
||||
|
||||
mapping.startArray("contexts");
|
||||
mapping.startObject();
|
||||
mapping.field("name", "st");
|
||||
mapping.field("type", "geo");
|
||||
mapping.field("path", "pin");
|
||||
mapping.field("precision", 5);
|
||||
mapping.endObject();
|
||||
mapping.endArray();
|
||||
|
||||
mapping.endObject();
|
||||
|
||||
mapping.endObject();
|
||||
mapping.endObject();
|
||||
mapping.endObject();
|
||||
|
||||
ElasticsearchParseException ex = expectThrows(ElasticsearchParseException.class,
|
||||
() -> createIndex("test", Settings.EMPTY, "type1", mapping));
|
||||
|
||||
assertThat(ex.getMessage(), equalTo("field [pin] referenced in context [st] must be mapped to geo_point, found [" + type + "]"));
|
||||
}
|
||||
|
||||
public void testMissingGeoField() throws Exception {
|
||||
XContentBuilder mapping = jsonBuilder();
|
||||
mapping.startObject();
|
||||
mapping.startObject("type1");
|
||||
mapping.startObject("properties");
|
||||
mapping.startObject("suggestion");
|
||||
mapping.field("type", "completion");
|
||||
mapping.field("analyzer", "simple");
|
||||
|
||||
mapping.startArray("contexts");
|
||||
mapping.startObject();
|
||||
mapping.field("name", "st");
|
||||
mapping.field("type", "geo");
|
||||
mapping.field("path", "pin");
|
||||
mapping.field("precision", 5);
|
||||
mapping.endObject();
|
||||
mapping.endArray();
|
||||
|
||||
mapping.endObject();
|
||||
|
||||
mapping.endObject();
|
||||
mapping.endObject();
|
||||
mapping.endObject();
|
||||
|
||||
ElasticsearchParseException ex = expectThrows(ElasticsearchParseException.class,
|
||||
() -> createIndex("test", Settings.EMPTY, "type1", mapping));
|
||||
|
||||
assertThat(ex.getMessage(), equalTo("field [pin] referenced in context [st] is not defined in the mapping"));
|
||||
}
|
||||
|
||||
public void testParsingQueryContextBasic() throws Exception {
|
||||
XContentBuilder builder = jsonBuilder().value("ezs42e44yx96");
|
||||
XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder));
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
*/
|
||||
package org.elasticsearch.transport;
|
||||
|
||||
import java.util.function.Supplier;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
|
@ -158,8 +159,8 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
service.start();
|
||||
service.acceptIncomingRequests();
|
||||
try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster",
|
||||
Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
updateSeedNodes(connection, Arrays.asList(seedNode));
|
||||
Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
updateSeedNodes(connection, Arrays.asList(() -> seedNode));
|
||||
assertTrue(service.nodeConnected(seedNode));
|
||||
assertTrue(service.nodeConnected(discoverableNode));
|
||||
assertTrue(connection.assertNoRunningConnections());
|
||||
|
@ -198,8 +199,8 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
service.start();
|
||||
service.acceptIncomingRequests();
|
||||
try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster",
|
||||
Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
updateSeedNodes(connection, Arrays.asList(seedNode));
|
||||
Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
updateSeedNodes(connection, Arrays.asList(() -> seedNode));
|
||||
assertTrue(service.nodeConnected(seedNode));
|
||||
assertTrue(service.nodeConnected(discoverableNode));
|
||||
assertTrue(connection.assertNoRunningConnections());
|
||||
|
@ -254,8 +255,8 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
service.start();
|
||||
service.acceptIncomingRequests();
|
||||
try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster",
|
||||
Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
updateSeedNodes(connection, Arrays.asList(seedNode));
|
||||
Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
updateSeedNodes(connection, Arrays.asList(() -> seedNode));
|
||||
assertTrue(service.nodeConnected(seedNode));
|
||||
assertTrue(service.nodeConnected(discoverableNode));
|
||||
assertTrue(connection.assertNoRunningConnections());
|
||||
|
@ -276,7 +277,7 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
knownNodes.add(discoverableTransport.getLocalDiscoNode());
|
||||
knownNodes.add(incompatibleTransport.getLocalDiscoNode());
|
||||
Collections.shuffle(knownNodes, random());
|
||||
List<DiscoveryNode> seedNodes = Arrays.asList(incompatibleSeedNode, seedNode);
|
||||
List<Supplier<DiscoveryNode>> seedNodes = Arrays.asList(() -> incompatibleSeedNode, () -> seedNode);
|
||||
Collections.shuffle(seedNodes, random());
|
||||
|
||||
try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) {
|
||||
|
@ -310,8 +311,8 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
service.start();
|
||||
service.acceptIncomingRequests();
|
||||
try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster",
|
||||
Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
updateSeedNodes(connection, Arrays.asList(seedNode));
|
||||
Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
updateSeedNodes(connection, Arrays.asList(() -> seedNode));
|
||||
assertTrue(service.nodeConnected(seedNode));
|
||||
assertTrue(service.nodeConnected(discoverableNode));
|
||||
assertFalse(service.nodeConnected(spareNode));
|
||||
|
@ -359,8 +360,8 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
service.start();
|
||||
service.acceptIncomingRequests();
|
||||
try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster",
|
||||
Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> n.equals(rejectedNode) == false)) {
|
||||
updateSeedNodes(connection, Arrays.asList(seedNode));
|
||||
Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> n.equals(rejectedNode) == false)) {
|
||||
updateSeedNodes(connection, Arrays.asList(() -> seedNode));
|
||||
if (rejectedNode.equals(seedNode)) {
|
||||
assertFalse(service.nodeConnected(seedNode));
|
||||
assertTrue(service.nodeConnected(discoverableNode));
|
||||
|
@ -374,7 +375,7 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private void updateSeedNodes(RemoteClusterConnection connection, List<DiscoveryNode> seedNodes) throws Exception {
|
||||
private void updateSeedNodes(RemoteClusterConnection connection, List<Supplier<DiscoveryNode>> seedNodes) throws Exception {
|
||||
CountDownLatch latch = new CountDownLatch(1);
|
||||
AtomicReference<Exception> exceptionAtomicReference = new AtomicReference<>();
|
||||
ActionListener<Void> listener = ActionListener.wrap(x -> latch.countDown(), x -> {
|
||||
|
@ -398,8 +399,8 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
service.start();
|
||||
service.acceptIncomingRequests();
|
||||
try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster",
|
||||
Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
expectThrows(Exception.class, () -> updateSeedNodes(connection, Arrays.asList(seedNode)));
|
||||
Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
expectThrows(Exception.class, () -> updateSeedNodes(connection, Arrays.asList(() -> seedNode)));
|
||||
assertFalse(service.nodeConnected(seedNode));
|
||||
assertTrue(connection.assertNoRunningConnections());
|
||||
}
|
||||
|
@ -461,7 +462,7 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
service.start();
|
||||
service.acceptIncomingRequests();
|
||||
try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster",
|
||||
Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
connection.addConnectedNode(seedNode);
|
||||
for (DiscoveryNode node : knownNodes) {
|
||||
final Transport.Connection transportConnection = connection.getConnection(node);
|
||||
|
@ -504,7 +505,7 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
CountDownLatch listenerCalled = new CountDownLatch(1);
|
||||
AtomicReference<Exception> exceptionReference = new AtomicReference<>();
|
||||
try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster",
|
||||
Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
ActionListener<Void> listener = ActionListener.wrap(x -> {
|
||||
listenerCalled.countDown();
|
||||
fail("expected exception");
|
||||
|
@ -512,7 +513,7 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
exceptionReference.set(x);
|
||||
listenerCalled.countDown();
|
||||
});
|
||||
connection.updateSeedNodes(Arrays.asList(seedNode), listener);
|
||||
connection.updateSeedNodes(Arrays.asList(() -> seedNode), listener);
|
||||
acceptedLatch.await();
|
||||
connection.close(); // now close it, this should trigger an interrupt on the socket and we can move on
|
||||
assertTrue(connection.assertNoRunningConnections());
|
||||
|
@ -539,7 +540,7 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) {
|
||||
service.start();
|
||||
service.acceptIncomingRequests();
|
||||
List<DiscoveryNode> nodes = Collections.singletonList(seedNode);
|
||||
List<Supplier<DiscoveryNode>> nodes = Collections.singletonList(() -> seedNode);
|
||||
try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster",
|
||||
nodes, service, Integer.MAX_VALUE, n -> true)) {
|
||||
if (randomBoolean()) {
|
||||
|
@ -579,7 +580,7 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) {
|
||||
service.start();
|
||||
service.acceptIncomingRequests();
|
||||
List<DiscoveryNode> nodes = Collections.singletonList(seedNode);
|
||||
List<Supplier<DiscoveryNode>> nodes = Collections.singletonList(() -> seedNode);
|
||||
try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster",
|
||||
nodes, service, Integer.MAX_VALUE, n -> true)) {
|
||||
SearchRequest request = new SearchRequest("test-index");
|
||||
|
@ -635,7 +636,7 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
service.start();
|
||||
service.acceptIncomingRequests();
|
||||
try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster",
|
||||
Collections.singletonList(seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
Collections.singletonList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
|
||||
SearchRequest request = new SearchRequest("test-index");
|
||||
ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest("test-index")
|
||||
|
@ -738,7 +739,7 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
knownNodes.add(discoverableTransport.getLocalDiscoNode());
|
||||
knownNodes.add(seedTransport1.getLocalDiscoNode());
|
||||
Collections.shuffle(knownNodes, random());
|
||||
List<DiscoveryNode> seedNodes = Arrays.asList(seedNode1, seedNode);
|
||||
List<Supplier<DiscoveryNode>> seedNodes = Arrays.asList(() -> seedNode1, () -> seedNode);
|
||||
Collections.shuffle(seedNodes, random());
|
||||
|
||||
try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) {
|
||||
|
@ -816,7 +817,7 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
knownNodes.add(discoverableTransport.getLocalDiscoNode());
|
||||
knownNodes.add(seedTransport1.getLocalDiscoNode());
|
||||
Collections.shuffle(knownNodes, random());
|
||||
List<DiscoveryNode> seedNodes = Arrays.asList(seedNode1, seedNode);
|
||||
List<Supplier<DiscoveryNode>> seedNodes = Arrays.asList(() -> seedNode1, () -> seedNode);
|
||||
Collections.shuffle(seedNodes, random());
|
||||
|
||||
try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) {
|
||||
|
@ -904,7 +905,7 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
knownNodes.add(transport3.getLocalDiscoNode());
|
||||
knownNodes.add(transport2.getLocalDiscoNode());
|
||||
Collections.shuffle(knownNodes, random());
|
||||
List<DiscoveryNode> seedNodes = Arrays.asList(node3, node1, node2);
|
||||
List<Supplier<DiscoveryNode>> seedNodes = Arrays.asList(() -> node3, () -> node1, () -> node2);
|
||||
Collections.shuffle(seedNodes, random());
|
||||
|
||||
try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) {
|
||||
|
@ -1059,7 +1060,7 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
service.start();
|
||||
service.acceptIncomingRequests();
|
||||
try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster",
|
||||
Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
assertFalse(service.nodeConnected(seedNode));
|
||||
assertFalse(service.nodeConnected(discoverableNode));
|
||||
assertTrue(connection.assertNoRunningConnections());
|
||||
|
@ -1108,9 +1109,9 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
service.start();
|
||||
service.acceptIncomingRequests();
|
||||
try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster",
|
||||
Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
if (randomBoolean()) {
|
||||
updateSeedNodes(connection, Arrays.asList(seedNode));
|
||||
updateSeedNodes(connection, Arrays.asList(() -> seedNode));
|
||||
}
|
||||
CountDownLatch responseLatch = new CountDownLatch(1);
|
||||
AtomicReference<Function<String, DiscoveryNode>> reference = new AtomicReference<>();
|
||||
|
@ -1142,14 +1143,14 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
List<MockTransportService> discoverableTransports = new CopyOnWriteArrayList<>();
|
||||
try {
|
||||
final int numDiscoverableNodes = randomIntBetween(5, 20);
|
||||
List<DiscoveryNode> discoverableNodes = new ArrayList<>(numDiscoverableNodes);
|
||||
for (int i = 0; i < numDiscoverableNodes; i++) {
|
||||
List<Supplier<DiscoveryNode>> discoverableNodes = new ArrayList<>(numDiscoverableNodes);
|
||||
for (int i = 0; i < numDiscoverableNodes; i++ ) {
|
||||
MockTransportService transportService = startTransport("discoverable_node" + i, knownNodes, Version.CURRENT);
|
||||
discoverableNodes.add(transportService.getLocalDiscoNode());
|
||||
discoverableNodes.add(transportService::getLocalDiscoNode);
|
||||
discoverableTransports.add(transportService);
|
||||
}
|
||||
|
||||
List<DiscoveryNode> seedNodes = randomSubsetOf(discoverableNodes);
|
||||
List<Supplier<DiscoveryNode>> seedNodes = randomSubsetOf(discoverableNodes);
|
||||
Collections.shuffle(seedNodes, random());
|
||||
|
||||
try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) {
|
||||
|
@ -1198,7 +1199,7 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
discoverableTransports.add(transportService);
|
||||
connection.addConnectedNode(transportService.getLocalDiscoNode());
|
||||
} else {
|
||||
DiscoveryNode node = randomFrom(discoverableNodes);
|
||||
DiscoveryNode node = randomFrom(discoverableNodes).get();
|
||||
connection.onNodeDisconnected(node);
|
||||
}
|
||||
}
|
||||
|
@ -1246,12 +1247,13 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
service.start();
|
||||
service.acceptIncomingRequests();
|
||||
try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster",
|
||||
Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
updateSeedNodes(connection, Arrays.asList(seedNode));
|
||||
Arrays.asList( () -> seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
updateSeedNodes(connection, Arrays.asList(() -> seedNode));
|
||||
assertTrue(service.nodeConnected(seedNode));
|
||||
assertTrue(service.nodeConnected(discoverableNode));
|
||||
assertTrue(connection.assertNoRunningConnections());
|
||||
List<DiscoveryNode> discoveryNodes = Arrays.asList(otherClusterTransport.getLocalDiscoNode(), seedNode);
|
||||
List<Supplier<DiscoveryNode>> discoveryNodes =
|
||||
Arrays.asList(() -> otherClusterTransport.getLocalDiscoNode(), () -> seedNode);
|
||||
Collections.shuffle(discoveryNodes, random());
|
||||
updateSeedNodes(connection, discoveryNodes);
|
||||
assertTrue(service.nodeConnected(seedNode));
|
||||
|
@ -1262,7 +1264,7 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
assertTrue(service.nodeConnected(discoverableNode));
|
||||
assertTrue(connection.assertNoRunningConnections());
|
||||
IllegalStateException illegalStateException = expectThrows(IllegalStateException.class, () ->
|
||||
updateSeedNodes(connection, Arrays.asList(otherClusterTransport.getLocalDiscoNode())));
|
||||
updateSeedNodes(connection, Arrays.asList(() -> otherClusterTransport.getLocalDiscoNode())));
|
||||
assertThat(illegalStateException.getMessage(),
|
||||
startsWith("handshake failed, mismatched cluster name [Cluster [otherCluster]]" +
|
||||
" - {other_cluster_discoverable_node}"));
|
||||
|
@ -1325,7 +1327,7 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
service.start();
|
||||
service.acceptIncomingRequests();
|
||||
try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster",
|
||||
Collections.singletonList(connectedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
Collections.singletonList(() -> connectedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
connection.addConnectedNode(connectedNode);
|
||||
for (int i = 0; i < 10; i++) {
|
||||
//always a direct connection as the remote node is already connected
|
||||
|
@ -1348,4 +1350,34 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testLazyResolveTransportAddress() throws Exception {
|
||||
List<DiscoveryNode> knownNodes = new CopyOnWriteArrayList<>();
|
||||
try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT);
|
||||
MockTransportService discoverableTransport = startTransport("discoverable_node", knownNodes, Version.CURRENT)) {
|
||||
DiscoveryNode seedNode = seedTransport.getLocalDiscoNode();
|
||||
knownNodes.add(seedTransport.getLocalDiscoNode());
|
||||
knownNodes.add(discoverableTransport.getLocalDiscoNode());
|
||||
Collections.shuffle(knownNodes, random());
|
||||
|
||||
try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) {
|
||||
service.start();
|
||||
service.acceptIncomingRequests();
|
||||
CountDownLatch multipleResolveLatch = new CountDownLatch(2);
|
||||
Supplier<DiscoveryNode> seedSupplier = () -> {
|
||||
multipleResolveLatch.countDown();
|
||||
return seedNode;
|
||||
};
|
||||
try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster",
|
||||
Arrays.asList(seedSupplier), service, Integer.MAX_VALUE, n -> true)) {
|
||||
updateSeedNodes(connection, Arrays.asList(seedSupplier));
|
||||
// Closing connections leads to RemoteClusterConnection.ConnectHandler.collectRemoteNodes
|
||||
// being called again so we try to resolve the same seed node's host twice
|
||||
discoverableTransport.close();
|
||||
seedTransport.close();
|
||||
assertTrue(multipleResolveLatch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
*/
|
||||
package org.elasticsearch.transport;
|
||||
|
||||
import java.util.function.Supplier;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.LatchedActionListener;
|
||||
|
@ -103,10 +104,19 @@ public class RemoteClusterServiceTests extends ESTestCase {
|
|||
.put("search.remote.foo.seeds", "192.168.0.1").build();
|
||||
expectThrows(IllegalArgumentException.class, () ->
|
||||
RemoteClusterAware.REMOTE_CLUSTERS_SEEDS.getAllConcreteSettings(brokenSettings).forEach(setting -> setting.get(brokenSettings)));
|
||||
|
||||
Settings brokenPortSettings = Settings.builder()
|
||||
.put("search.remote.foo.seeds", "192.168.0.1:123456789123456789").build();
|
||||
Exception e = expectThrows(
|
||||
IllegalArgumentException.class,
|
||||
() -> RemoteClusterAware.REMOTE_CLUSTERS_SEEDS.getAllConcreteSettings(brokenSettings)
|
||||
.forEach(setting -> setting.get(brokenPortSettings))
|
||||
);
|
||||
assertEquals("failed to parse port", e.getMessage());
|
||||
}
|
||||
|
||||
public void testBuiltRemoteClustersSeeds() throws Exception {
|
||||
Map<String, List<DiscoveryNode>> map = RemoteClusterService.buildRemoteClustersSeeds(
|
||||
Map<String, List<Supplier<DiscoveryNode>>> map = RemoteClusterService.buildRemoteClustersSeeds(
|
||||
Settings.builder().put("search.remote.foo.seeds", "192.168.0.1:8080").put("search.remote.bar.seeds", "[::1]:9090").build());
|
||||
assertEquals(2, map.size());
|
||||
assertTrue(map.containsKey("foo"));
|
||||
|
@ -114,13 +124,13 @@ public class RemoteClusterServiceTests extends ESTestCase {
|
|||
assertEquals(1, map.get("foo").size());
|
||||
assertEquals(1, map.get("bar").size());
|
||||
|
||||
DiscoveryNode foo = map.get("foo").get(0);
|
||||
DiscoveryNode foo = map.get("foo").get(0).get();
|
||||
|
||||
assertEquals(foo.getAddress(), new TransportAddress(new InetSocketAddress(InetAddress.getByName("192.168.0.1"), 8080)));
|
||||
assertEquals(foo.getId(), "foo#192.168.0.1:8080");
|
||||
assertEquals(foo.getVersion(), Version.CURRENT.minimumCompatibilityVersion());
|
||||
|
||||
DiscoveryNode bar = map.get("bar").get(0);
|
||||
DiscoveryNode bar = map.get("bar").get(0).get();
|
||||
assertEquals(bar.getAddress(), new TransportAddress(new InetSocketAddress(InetAddress.getByName("[::1]"), 9090)));
|
||||
assertEquals(bar.getId(), "bar#[::1]:9090");
|
||||
assertEquals(bar.getVersion(), Version.CURRENT.minimumCompatibilityVersion());
|
||||
|
@ -194,10 +204,10 @@ public class RemoteClusterServiceTests extends ESTestCase {
|
|||
assertFalse(service.isCrossClusterSearchEnabled());
|
||||
service.initializeRemoteClusters();
|
||||
assertFalse(service.isCrossClusterSearchEnabled());
|
||||
service.updateRemoteCluster("cluster_1", Collections.singletonList(seedNode.getAddress().address()));
|
||||
service.updateRemoteCluster("cluster_1", Collections.singletonList(seedNode.getAddress().toString()));
|
||||
assertTrue(service.isCrossClusterSearchEnabled());
|
||||
assertTrue(service.isRemoteClusterRegistered("cluster_1"));
|
||||
service.updateRemoteCluster("cluster_2", Collections.singletonList(otherSeedNode.getAddress().address()));
|
||||
service.updateRemoteCluster("cluster_2", Collections.singletonList(otherSeedNode.getAddress().toString()));
|
||||
assertTrue(service.isCrossClusterSearchEnabled());
|
||||
assertTrue(service.isRemoteClusterRegistered("cluster_1"));
|
||||
assertTrue(service.isRemoteClusterRegistered("cluster_2"));
|
||||
|
@ -252,22 +262,17 @@ public class RemoteClusterServiceTests extends ESTestCase {
|
|||
service.initializeRemoteClusters();
|
||||
assertFalse(service.isCrossClusterSearchEnabled());
|
||||
|
||||
final InetSocketAddress c1N1Address = c1N1Node.getAddress().address();
|
||||
final InetSocketAddress c1N2Address = c1N2Node.getAddress().address();
|
||||
final InetSocketAddress c2N1Address = c2N1Node.getAddress().address();
|
||||
final InetSocketAddress c2N2Address = c2N2Node.getAddress().address();
|
||||
|
||||
final CountDownLatch firstLatch = new CountDownLatch(1);
|
||||
service.updateRemoteCluster(
|
||||
"cluster_1",
|
||||
Arrays.asList(c1N1Address, c1N2Address),
|
||||
Arrays.asList(c1N1Node.getAddress().toString(), c1N2Node.getAddress().toString()),
|
||||
connectionListener(firstLatch));
|
||||
firstLatch.await();
|
||||
|
||||
final CountDownLatch secondLatch = new CountDownLatch(1);
|
||||
service.updateRemoteCluster(
|
||||
"cluster_2",
|
||||
Arrays.asList(c2N1Address, c2N2Address),
|
||||
Arrays.asList(c2N1Node.getAddress().toString(), c2N2Node.getAddress().toString()),
|
||||
connectionListener(secondLatch));
|
||||
secondLatch.await();
|
||||
|
||||
|
@ -321,22 +326,17 @@ public class RemoteClusterServiceTests extends ESTestCase {
|
|||
service.initializeRemoteClusters();
|
||||
assertFalse(service.isCrossClusterSearchEnabled());
|
||||
|
||||
final InetSocketAddress c1N1Address = c1N1Node.getAddress().address();
|
||||
final InetSocketAddress c1N2Address = c1N2Node.getAddress().address();
|
||||
final InetSocketAddress c2N1Address = c2N1Node.getAddress().address();
|
||||
final InetSocketAddress c2N2Address = c2N2Node.getAddress().address();
|
||||
|
||||
final CountDownLatch firstLatch = new CountDownLatch(1);
|
||||
service.updateRemoteCluster(
|
||||
"cluster_1",
|
||||
Arrays.asList(c1N1Address, c1N2Address),
|
||||
Arrays.asList(c1N1Node.getAddress().toString(), c1N2Node.getAddress().toString()),
|
||||
connectionListener(firstLatch));
|
||||
firstLatch.await();
|
||||
|
||||
final CountDownLatch secondLatch = new CountDownLatch(1);
|
||||
service.updateRemoteCluster(
|
||||
"cluster_2",
|
||||
Arrays.asList(c2N1Address, c2N2Address),
|
||||
Arrays.asList(c2N1Node.getAddress().toString(), c2N2Node.getAddress().toString()),
|
||||
connectionListener(secondLatch));
|
||||
secondLatch.await();
|
||||
|
||||
|
@ -398,22 +398,17 @@ public class RemoteClusterServiceTests extends ESTestCase {
|
|||
service.initializeRemoteClusters();
|
||||
assertFalse(service.isCrossClusterSearchEnabled());
|
||||
|
||||
final InetSocketAddress c1N1Address = c1N1Node.getAddress().address();
|
||||
final InetSocketAddress c1N2Address = c1N2Node.getAddress().address();
|
||||
final InetSocketAddress c2N1Address = c2N1Node.getAddress().address();
|
||||
final InetSocketAddress c2N2Address = c2N2Node.getAddress().address();
|
||||
|
||||
final CountDownLatch firstLatch = new CountDownLatch(1);
|
||||
service.updateRemoteCluster(
|
||||
"cluster_1",
|
||||
Arrays.asList(c1N1Address, c1N2Address),
|
||||
Arrays.asList(c1N1Node.getAddress().toString(), c1N2Node.getAddress().toString()),
|
||||
connectionListener(firstLatch));
|
||||
firstLatch.await();
|
||||
|
||||
final CountDownLatch secondLatch = new CountDownLatch(1);
|
||||
service.updateRemoteCluster(
|
||||
"cluster_2",
|
||||
Arrays.asList(c2N1Address, c2N2Address),
|
||||
Arrays.asList(c2N1Node.getAddress().toString(), c2N2Node.getAddress().toString()),
|
||||
connectionListener(secondLatch));
|
||||
secondLatch.await();
|
||||
CountDownLatch latch = new CountDownLatch(1);
|
||||
|
|
|
@ -722,3 +722,30 @@ setups['sensor_prefab_data'] = '''
|
|||
{"node.terms.value":"c","temperature.sum.value":202.0,"temperature.max.value":202.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":202.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":4.0,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516294800000,"voltage.avg._count":1.0,"_rollup.id":"sensor"}
|
||||
|
||||
'''
|
||||
setups['admin_role'] = '''
|
||||
- do:
|
||||
xpack.security.put_role:
|
||||
name: "my_admin_role"
|
||||
body: >
|
||||
{
|
||||
"cluster": ["all"],
|
||||
"indices": [
|
||||
{"names": ["index1", "index2" ], "privileges": ["all"], "field_security" : {"grant" : [ "title", "body" ]}}
|
||||
],
|
||||
"run_as": [ "other_user" ],
|
||||
"metadata" : {"version": 1}
|
||||
}
|
||||
'''
|
||||
setups['jacknich_user'] = '''
|
||||
- do:
|
||||
xpack.security.put_user:
|
||||
username: "jacknich"
|
||||
body: >
|
||||
{
|
||||
"password" : "test-password",
|
||||
"roles" : [ "admin", "other_role1" ],
|
||||
"full_name" : "Jack Nicholson",
|
||||
"email" : "jacknich@example.com",
|
||||
"metadata" : { "intelligence" : 7 }
|
||||
}
|
||||
'''
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
=== Data
|
||||
|
||||
* <<rollup-get-rollup-caps,Get Rollup Capabilities>>
|
||||
* <<rollup-get-rollup-index-caps,Get Rollup Index Capabilities>>
|
||||
|
||||
[float]
|
||||
[[rollup-search-endpoint]]
|
||||
|
@ -31,5 +32,6 @@ include::rollup/put-job.asciidoc[]
|
|||
include::rollup/start-job.asciidoc[]
|
||||
include::rollup/stop-job.asciidoc[]
|
||||
include::rollup/rollup-caps.asciidoc[]
|
||||
include::rollup/rollup-index-caps.asciidoc[]
|
||||
include::rollup/rollup-search.asciidoc[]
|
||||
include::rollup/rollup-job-config.asciidoc[]
|
|
@ -27,8 +27,8 @@ live?
|
|||
==== Path Parameters
|
||||
|
||||
`index`::
|
||||
(string) Index, indices or index-pattern to return rollup capabilities for. If omitted (or `_all` is used) all available
|
||||
rollup job capabilities will be returned
|
||||
(string) Index, indices or index-pattern to return rollup capabilities for. `_all` may be used to fetch
|
||||
rollup capabilities from all jobs
|
||||
|
||||
|
||||
==== Request Body
|
||||
|
|
|
@ -26,15 +26,13 @@ This API will allow you to determine:
|
|||
`index`::
|
||||
(string) Index or index-pattern of concrete rollup indices to check for capabilities.
|
||||
|
||||
|
||||
|
||||
==== Request Body
|
||||
|
||||
There is no request body for the Get Jobs API.
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `monitor`, `monitor_rollup`, `manage` or `manage_rollup` cluster privileges to use this API.
|
||||
You must have the `read` index privilege on the index that stores the rollup results.
|
||||
For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
|
||||
|
|
|
@ -82,6 +82,12 @@ In the above example, there are several pieces of logistical configuration for t
|
|||
will tend to execute faster, but will require more memory during processing. This has no effect on how the data is rolled up, it is
|
||||
merely used for tweaking the speed/memory cost of the indexer.
|
||||
|
||||
[NOTE]
|
||||
The `index_pattern` cannot be a pattern that would also match the destination `rollup_index`. E.g. the pattern
|
||||
`"foo-*"` would match the rollup index `"foo-rollup"`. This causes problems because the rollup job would attempt
|
||||
to rollup it's own data at runtime. If you attempt to configure a pattern that matches the `rollup_index`, an exception
|
||||
will be thrown to prevent this behavior.
|
||||
|
||||
[[rollup-groups-config]]
|
||||
==== Grouping Config
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ or using `_all`, is not permitted
|
|||
|
||||
The request body supports a subset of features from the regular Search API. It supports:
|
||||
|
||||
- `query` param for specifying an DSL query, subject to some limitations
|
||||
- `query` param for specifying an DSL query, subject to some limitations (see <<rollup-search-limitations>> and <<rollup-agg-limitations>>
|
||||
- `aggregations` param for specifying aggregations
|
||||
|
||||
Functionality that is not available:
|
||||
|
|
|
@ -2,21 +2,59 @@
|
|||
[[security-api]]
|
||||
== Security APIs
|
||||
|
||||
You can use the following APIs to perform {security} activities.
|
||||
|
||||
* <<security-api-authenticate>>
|
||||
* <<security-api-clear-cache>>
|
||||
* <<security-api-privileges>>
|
||||
* <<security-api-roles>>
|
||||
* <<security-api-role-mapping>>
|
||||
* <<security-api-ssl>>
|
||||
* <<security-api-tokens>>
|
||||
* <<security-api-users>>
|
||||
|
||||
[float]
|
||||
[[security-role-apis]]
|
||||
=== Roles
|
||||
|
||||
You can use the following APIs to add, remove, and retrieve roles in the native realm:
|
||||
|
||||
* <<security-api-put-role,Create role>>, <<security-api-delete-role,Delete role>>
|
||||
* <<security-api-clear-role-cache,Clear roles cache>>
|
||||
* <<security-api-get-role,Get roles>>
|
||||
|
||||
[float]
|
||||
[[security-token-apis]]
|
||||
=== Tokens
|
||||
|
||||
You can use the following APIs to create and invalidate bearer tokens for access
|
||||
without requiring basic authentication:
|
||||
|
||||
* <<security-api-get-token,Get token>>, <<security-api-invalidate-token,Invalidate token>>
|
||||
|
||||
[float]
|
||||
[[security-user-apis]]
|
||||
=== Users
|
||||
|
||||
You can use the following APIs to create, read, update, and delete users from the
|
||||
native realm:
|
||||
|
||||
* <<security-api-put-user,Create users>>, <<security-api-delete-user,Delete users>>
|
||||
* <<security-api-enable-user,Enable users>>, <<security-api-disable-user,Disable users>>
|
||||
* <<security-api-change-password,Change passwords>>
|
||||
* <<security-api-get-user,Get users>>
|
||||
|
||||
include::security/authenticate.asciidoc[]
|
||||
include::security/change-password.asciidoc[]
|
||||
include::security/clear-cache.asciidoc[]
|
||||
include::security/clear-roles-cache.asciidoc[]
|
||||
include::security/create-roles.asciidoc[]
|
||||
include::security/create-users.asciidoc[]
|
||||
include::security/delete-roles.asciidoc[]
|
||||
include::security/delete-tokens.asciidoc[]
|
||||
include::security/delete-users.asciidoc[]
|
||||
include::security/disable-users.asciidoc[]
|
||||
include::security/enable-users.asciidoc[]
|
||||
include::security/get-roles.asciidoc[]
|
||||
include::security/get-tokens.asciidoc[]
|
||||
include::security/get-users.asciidoc[]
|
||||
include::security/privileges.asciidoc[]
|
||||
include::security/roles.asciidoc[]
|
||||
include::security/role-mapping.asciidoc[]
|
||||
include::security/ssl.asciidoc[]
|
||||
include::security/tokens.asciidoc[]
|
||||
include::security/users.asciidoc[]
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
[role="xpack"]
|
||||
[[security-api-change-password]]
|
||||
=== Change Password API
|
||||
=== Change passwords API
|
||||
|
||||
The Change Password API enables you to submit a request to change the password
|
||||
of a user.
|
||||
Changes the passwords of users in the native realm.
|
||||
|
||||
==== Request
|
||||
|
||||
|
@ -12,6 +11,15 @@ of a user.
|
|||
`POST _xpack/security/user/<username>/_password`
|
||||
|
||||
|
||||
==== Description
|
||||
|
||||
You can use the <<security-api-put-user,create user API>> to update everything
|
||||
but a user's `username` and `password`. This API changes a user's password.
|
||||
|
||||
For more information about the native realm, see
|
||||
{stack-ov}/realms.html[Realms] and <<configuring-native-realm>>.
|
||||
|
||||
|
||||
==== Path Parameters
|
||||
|
||||
`username`::
|
||||
|
@ -33,16 +41,17 @@ privilege can change passwords of other users.
|
|||
|
||||
==== Examples
|
||||
|
||||
The following example updates the password for the `elastic` user:
|
||||
The following example updates the password for the `jacknich` user:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST _xpack/security/user/elastic/_password
|
||||
POST /_xpack/security/user/jacknich/_password
|
||||
{
|
||||
"password": "x-pack-test-password"
|
||||
"password" : "s3cr3t"
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:jacknich_user]
|
||||
|
||||
A successful call returns an empty JSON structure.
|
||||
|
||||
|
|
|
@ -0,0 +1,39 @@
|
|||
[role="xpack"]
|
||||
[[security-api-clear-role-cache]]
|
||||
=== Clear roles cache API
|
||||
|
||||
Evicts roles from the native role cache.
|
||||
|
||||
==== Request
|
||||
|
||||
`POST /_xpack/security/role/<name>/_clear_cache`
|
||||
|
||||
==== Description
|
||||
|
||||
For more information about the native realm, see
|
||||
{stack-ov}/realms.html[Realms] and <<configuring-native-realm>>.
|
||||
|
||||
==== Path Parameters
|
||||
|
||||
`name`::
|
||||
(string) The name of the role.
|
||||
|
||||
|
||||
//==== Request Body
|
||||
|
||||
==== Authorization
|
||||
|
||||
To use this API, you must have at least the `manage_security` cluster
|
||||
privilege.
|
||||
|
||||
|
||||
==== Examples
|
||||
|
||||
The clear roles cache API evicts roles from the native role cache. For example,
|
||||
to clear the cache for `my_admin_role`:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /_xpack/security/role/my_admin_role/_clear_cache
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
|
@ -0,0 +1,102 @@
|
|||
[role="xpack"]
|
||||
[[security-api-put-role]]
|
||||
=== Create roles API
|
||||
|
||||
Adds roles in the native realm.
|
||||
|
||||
==== Request
|
||||
|
||||
`POST /_xpack/security/role/<name>` +
|
||||
|
||||
`PUT /_xpack/security/role/<name>`
|
||||
|
||||
|
||||
==== Description
|
||||
|
||||
The role API is generally the preferred way to manage roles, rather than using
|
||||
file-based role management. For more information about the native realm, see
|
||||
{stack-ov}/realms.html[Realms] and <<configuring-native-realm>>.
|
||||
|
||||
|
||||
==== Path Parameters
|
||||
|
||||
`name`::
|
||||
(string) The name of the role.
|
||||
|
||||
|
||||
==== Request Body
|
||||
|
||||
The following parameters can be specified in the body of a PUT or POST request
|
||||
and pertain to adding a role:
|
||||
|
||||
`cluster`:: (list) A list of cluster privileges. These privileges define the
|
||||
cluster level actions that users with this role are able to execute.
|
||||
|
||||
`indices`:: (list) A list of indices permissions entries.
|
||||
`field_security`::: (list) The document fields that the owners of the role have
|
||||
read access to. For more information, see
|
||||
{stack-ov}/field-and-document-access-control.html[Setting up field and document level security].
|
||||
`names` (required)::: (list) A list of indices (or index name patterns) to which the
|
||||
permissions in this entry apply.
|
||||
`privileges`(required)::: (list) The index level privileges that the owners of the role
|
||||
have on the specified indices.
|
||||
`query`::: A search query that defines the documents the owners of the role have
|
||||
read access to. A document within the specified indices must match this query in
|
||||
order for it to be accessible by the owners of the role.
|
||||
|
||||
`metadata`:: (object) Optional meta-data. Within the `metadata` object, keys
|
||||
that begin with `_` are reserved for system usage.
|
||||
|
||||
`run_as`:: (list) A list of users that the owners of this role can impersonate.
|
||||
For more information, see
|
||||
{stack-ov}/run-as-privilege.html[Submitting requests on behalf of other users].
|
||||
|
||||
For more information, see {stack-ov}/defining-roles.html[Defining roles].
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
To use this API, you must have at least the `manage_security` cluster
|
||||
privilege.
|
||||
|
||||
|
||||
==== Examples
|
||||
|
||||
The following example adds a role called `my_admin_role`:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /_xpack/security/role/my_admin_role
|
||||
{
|
||||
"cluster": ["all"],
|
||||
"indices": [
|
||||
{
|
||||
"names": [ "index1", "index2" ],
|
||||
"privileges": ["all"],
|
||||
"field_security" : { // optional
|
||||
"grant" : [ "title", "body" ]
|
||||
},
|
||||
"query": "{\"match\": {\"title\": \"foo\"}}" // optional
|
||||
}
|
||||
],
|
||||
"run_as": [ "other_user" ], // optional
|
||||
"metadata" : { // optional
|
||||
"version" : 1
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
A successful call returns a JSON structure that shows whether the role has been
|
||||
created or updated.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"role": {
|
||||
"created": true <1>
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE
|
||||
<1> When an existing role is updated, `created` is set to false.
|
|
@ -0,0 +1,107 @@
|
|||
[role="xpack"]
|
||||
[[security-api-put-user]]
|
||||
=== Create users API
|
||||
|
||||
Creates and updates users in the native realm. These users are commonly referred
|
||||
to as _native users_.
|
||||
|
||||
|
||||
==== Request
|
||||
|
||||
`POST /_xpack/security/user/<username>` +
|
||||
|
||||
`PUT /_xpack/security/user/<username>`
|
||||
|
||||
|
||||
==== Description
|
||||
|
||||
When updating a user, you can update everything but its `username` and `password`.
|
||||
To change a user's password, use the
|
||||
<<security-api-change-password, change password API>>.
|
||||
|
||||
For more information about the native realm, see
|
||||
{stack-ov}/realms.html[Realms] and <<configuring-native-realm>>.
|
||||
|
||||
==== Path Parameters
|
||||
|
||||
`username` (required)::
|
||||
(string) An identifier for the user.
|
||||
+
|
||||
--
|
||||
[[username-validation]]
|
||||
NOTE: Usernames must be at least 1 and no more than 1024 characters. They can
|
||||
contain alphanumeric characters (`a-z`, `A-Z`, `0-9`), spaces, punctuation, and
|
||||
printable symbols in the https://en.wikipedia.org/wiki/Basic_Latin_(Unicode_block)[Basic Latin (ASCII) block]. Leading or trailing whitespace is not allowed.
|
||||
|
||||
--
|
||||
|
||||
|
||||
==== Request Body
|
||||
|
||||
The following parameters can be specified in the body of a POST or PUT request:
|
||||
|
||||
`enabled`::
|
||||
(boolean) Specifies whether the user is enabled. The default value is `true`.
|
||||
|
||||
`email`::
|
||||
(string) The email of the user.
|
||||
|
||||
`full_name`::
|
||||
(string) The full name of the user.
|
||||
|
||||
`metadata`::
|
||||
(object) Arbitrary metadata that you want to associate with the user.
|
||||
|
||||
`password` (required)::
|
||||
(string) The user's password. Passwords must be at least 6 characters long.
|
||||
|
||||
`roles` (required)::
|
||||
(list) A set of roles the user has. The roles determine the user's access
|
||||
permissions. To create a user without any roles, specify an empty list: `[]`.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
To use this API, you must have at least the `manage_security` cluster privilege.
|
||||
|
||||
|
||||
==== Examples
|
||||
|
||||
The following example creates a user `jacknich`:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /_xpack/security/user/jacknich
|
||||
{
|
||||
"password" : "j@rV1s",
|
||||
"roles" : [ "admin", "other_role1" ],
|
||||
"full_name" : "Jack Nicholson",
|
||||
"email" : "jacknich@example.com",
|
||||
"metadata" : {
|
||||
"intelligence" : 7
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
A successful call returns a JSON structure that shows whether the user has been
|
||||
created or updated.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"user": {
|
||||
"created" : true <1>
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE
|
||||
<1> When an existing user is updated, `created` is set to false.
|
||||
|
||||
After you add a user, requests from that user can be authenticated. For example:
|
||||
|
||||
[source,shell]
|
||||
--------------------------------------------------
|
||||
curl -u jacknich:j@rV1s http://localhost:9200/_cluster/health
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
|
@ -0,0 +1,53 @@
|
|||
[role="xpack"]
|
||||
[[security-api-delete-role]]
|
||||
=== Delete roles API
|
||||
|
||||
Removes roles in the native realm.
|
||||
|
||||
==== Request
|
||||
|
||||
`DELETE /_xpack/security/role/<name>`
|
||||
|
||||
|
||||
==== Description
|
||||
|
||||
The Roles API is generally the preferred way to manage roles, rather than using
|
||||
file-based role management. For more information about the native realm, see
|
||||
{stack-ov}/realms.html[Realms] and <<configuring-native-realm>>.
|
||||
|
||||
|
||||
==== Path Parameters
|
||||
|
||||
`name`::
|
||||
(string) The name of the role.
|
||||
|
||||
//==== Request Body
|
||||
|
||||
==== Authorization
|
||||
|
||||
To use this API, you must have at least the `manage_security` cluster
|
||||
privilege.
|
||||
|
||||
|
||||
==== Examples
|
||||
|
||||
The following example deletes a `my_admin_role` role:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
DELETE /_xpack/security/role/my_admin_role
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:admin_role]
|
||||
|
||||
If the role is successfully deleted, the request returns `{"found": true}`.
|
||||
Otherwise, `found` is set to false.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"found" : true
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE
|
||||
|
|
@ -0,0 +1,54 @@
|
|||
[role="xpack"]
|
||||
[[security-api-invalidate-token]]
|
||||
=== Delete token API
|
||||
|
||||
Invalidates a bearer token for access without requiring basic authentication.
|
||||
|
||||
==== Request
|
||||
|
||||
`DELETE /_xpack/security/oauth2/token`
|
||||
|
||||
==== Description
|
||||
|
||||
The tokens returned by the <<security-api-get-token,get token API>> have a
|
||||
finite period of time for which they are valid and after that time period, they
|
||||
can no longer be used. That time period is defined by the
|
||||
`xpack.security.authc.token.timeout` setting. For more information, see
|
||||
<<token-service-settings>>.
|
||||
|
||||
If you want to invalidate a token immediately, use this delete token API.
|
||||
|
||||
|
||||
==== Request Body
|
||||
|
||||
The following parameters can be specified in the body of a DELETE request and
|
||||
pertain to deleting a token:
|
||||
|
||||
`token` (required)::
|
||||
(string) An access token.
|
||||
|
||||
==== Examples
|
||||
|
||||
The following example invalidates the specified token immediately:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
DELETE /_xpack/security/oauth2/token
|
||||
{
|
||||
"token" : "dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ=="
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
A successful call returns a JSON structure that indicates whether the token
|
||||
has already been invalidated.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"created" : true <1>
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
<1> When a token has already been invalidated, `created` is set to false.
|
|
@ -0,0 +1,48 @@
|
|||
[role="xpack"]
|
||||
[[security-api-delete-user]]
|
||||
=== Delete users API
|
||||
|
||||
Deletes users from the native realm.
|
||||
|
||||
==== Request
|
||||
|
||||
`DELETE /_xpack/security/user/<username>`
|
||||
|
||||
==== Description
|
||||
|
||||
For more information about the native realm, see
|
||||
{stack-ov}/realms.html[Realms] and <<configuring-native-realm>>.
|
||||
|
||||
==== Path Parameters
|
||||
|
||||
`username` (required)::
|
||||
(string) An identifier for the user.
|
||||
|
||||
//==== Request Body
|
||||
|
||||
==== Authorization
|
||||
|
||||
To use this API, you must have at least the `manage_security` cluster privilege.
|
||||
|
||||
|
||||
==== Examples
|
||||
|
||||
The following example deletes the user `jacknich`:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
DELETE /_xpack/security/user/jacknich
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:jacknich_user]
|
||||
|
||||
If the user is successfully deleted, the request returns `{"found": true}`.
|
||||
Otherwise, `found` is set to false.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"found" : true
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE
|
|
@ -0,0 +1,43 @@
|
|||
[role="xpack"]
|
||||
[[security-api-disable-user]]
|
||||
=== Disable users API
|
||||
|
||||
Disables users in the native realm.
|
||||
|
||||
|
||||
==== Request
|
||||
|
||||
`PUT /_xpack/security/user/<username>/_disable`
|
||||
|
||||
|
||||
==== Description
|
||||
|
||||
By default, when you create users, they are enabled. You can use this API to
|
||||
revoke a user's access to {es}. To re-enable a user, there is an
|
||||
<<security-api-enable-user,enable users API>>.
|
||||
|
||||
For more information about the native realm, see
|
||||
{stack-ov}/realms.html[Realms] and <<configuring-native-realm>>.
|
||||
|
||||
==== Path Parameters
|
||||
|
||||
`username` (required)::
|
||||
(string) An identifier for the user.
|
||||
|
||||
//==== Request Body
|
||||
|
||||
==== Authorization
|
||||
|
||||
To use this API, you must have at least the `manage_security` cluster privilege.
|
||||
|
||||
|
||||
==== Examples
|
||||
|
||||
The following example disables the user `jacknich`:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /_xpack/security/user/jacknich/_disable
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:jacknich_user]
|
|
@ -0,0 +1,42 @@
|
|||
[role="xpack"]
|
||||
[[security-api-enable-user]]
|
||||
=== Enable users API
|
||||
|
||||
Enables users in the native realm.
|
||||
|
||||
|
||||
==== Request
|
||||
|
||||
`PUT /_xpack/security/user/<username>/_enable`
|
||||
|
||||
|
||||
==== Description
|
||||
|
||||
By default, when you create users, they are enabled. You can use this enable
|
||||
users API and the <<security-api-disable-user,disable users API>> to change that attribute.
|
||||
|
||||
For more information about the native realm, see
|
||||
{stack-ov}/realms.html[Realms] and <<configuring-native-realm>>.
|
||||
|
||||
==== Path Parameters
|
||||
|
||||
`username` (required)::
|
||||
(string) An identifier for the user.
|
||||
|
||||
//==== Request Body
|
||||
|
||||
==== Authorization
|
||||
|
||||
To use this API, you must have at least the `manage_security` cluster privilege.
|
||||
|
||||
|
||||
==== Examples
|
||||
|
||||
The following example enables the user `jacknich`:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /_xpack/security/user/jacknich/_enable
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:jacknich_user]
|
|
@ -0,0 +1,85 @@
|
|||
[role="xpack"]
|
||||
[[security-api-get-role]]
|
||||
=== Get roles API
|
||||
|
||||
Retrieves roles in the native realm.
|
||||
|
||||
==== Request
|
||||
|
||||
`GET /_xpack/security/role` +
|
||||
|
||||
`GET /_xpack/security/role/<name>` +
|
||||
|
||||
==== Description
|
||||
|
||||
For more information about the native realm, see
|
||||
{stack-ov}/realms.html[Realms] and <<configuring-native-realm>>.
|
||||
|
||||
==== Path Parameters
|
||||
|
||||
`name`::
|
||||
(string) The name of the role. You can specify multiple roles as a
|
||||
comma-separated list. If you do not specify this parameter, the API
|
||||
returns information about all roles.
|
||||
|
||||
//==== Request Body
|
||||
|
||||
==== Authorization
|
||||
|
||||
To use this API, you must have at least the `manage_security` cluster
|
||||
privilege.
|
||||
|
||||
|
||||
==== Examples
|
||||
|
||||
The following example retrieves information about the `my_admin_role` role in
|
||||
the native realm:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /_xpack/security/role/my_admin_role
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:admin_role]
|
||||
|
||||
A successful call returns an array of roles with the JSON representation of the
|
||||
role. If the role is not defined in the native realm, the request returns 404.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"my_admin_role": {
|
||||
"cluster" : [ "all" ],
|
||||
"indices" : [
|
||||
{
|
||||
"names" : [ "index1", "index2" ],
|
||||
"privileges" : [ "all" ],
|
||||
"field_security" : {
|
||||
"grant" : [ "title", "body" ]}
|
||||
}
|
||||
],
|
||||
"applications" : [ ],
|
||||
"run_as" : [ "other_user" ],
|
||||
"metadata" : {
|
||||
"version" : 1
|
||||
},
|
||||
"transient_metadata": {
|
||||
"enabled": true
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE
|
||||
|
||||
To retrieve all roles, omit the role name:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /_xpack/security/role
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
NOTE: If single role is requested, that role is returned as the response. When
|
||||
requesting multiple roles, an object is returned holding the found roles, each
|
||||
keyed by the relevant role name.
|
|
@ -1,15 +1,12 @@
|
|||
[role="xpack"]
|
||||
[[security-api-tokens]]
|
||||
=== Token Management APIs
|
||||
[[security-api-get-token]]
|
||||
=== Get token API
|
||||
|
||||
The `token` API enables you to create and invalidate bearer tokens for access
|
||||
without requiring basic authentication.
|
||||
Creates a bearer token for access without requiring basic authentication.
|
||||
|
||||
==== Request
|
||||
|
||||
`POST /_xpack/security/oauth2/token` +
|
||||
|
||||
`DELETE /_xpack/security/oauth2/token`
|
||||
`POST /_xpack/security/oauth2/token`
|
||||
|
||||
==== Description
|
||||
|
||||
|
@ -19,20 +16,20 @@ you can explicitly enable the `xpack.security.authc.token.enabled` setting. When
|
|||
you are running in production mode, a bootstrap check prevents you from enabling
|
||||
the token service unless you also enable TLS on the HTTP interface.
|
||||
|
||||
The Get Token API takes the same parameters as a typical OAuth 2.0 token API
|
||||
The get token API takes the same parameters as a typical OAuth 2.0 token API
|
||||
except for the use of a JSON request body.
|
||||
|
||||
A successful Get Token API call returns a JSON structure that contains the access
|
||||
A successful get token API call returns a JSON structure that contains the access
|
||||
token, the amount of time (seconds) that the token expires in, the type, and the
|
||||
scope if available.
|
||||
|
||||
The tokens returned by the Get Token API have a finite period of time for which
|
||||
The tokens returned by the get token API have a finite period of time for which
|
||||
they are valid and after that time period, they can no longer be used. That time
|
||||
period is defined by the `xpack.security.authc.token.timeout` setting. For more
|
||||
information, see <<token-service-settings>>.
|
||||
|
||||
If you want to invalidate a token immediately, you can do so by using the Delete
|
||||
Token API.
|
||||
If you want to invalidate a token immediately, you can do so by using the
|
||||
<<security-api-invalidate-token,delete token API>>.
|
||||
|
||||
|
||||
==== Request Body
|
||||
|
@ -41,28 +38,28 @@ The following parameters can be specified in the body of a POST request and
|
|||
pertain to creating a token:
|
||||
|
||||
`grant_type`::
|
||||
(string) The type of grant. Currently only the `password` grant type is supported.
|
||||
(string) The type of grant. Valid grant types are: `password` and `refresh_token`.
|
||||
|
||||
`password` (required)::
|
||||
(string) The user's password.
|
||||
`password`::
|
||||
(string) The user's password. If you specify the `password` grant type, this
|
||||
parameter is required.
|
||||
|
||||
`refresh_token`::
|
||||
(string) If you specify the `refresh_token` grant type, this parameter is
|
||||
required. It contains the string that was returned when you created the token
|
||||
and enables you to extend its life.
|
||||
|
||||
`scope`::
|
||||
(string) The scope of the token. Currently tokens are only issued for a scope of
|
||||
`FULL` regardless of the value sent with the request.
|
||||
|
||||
`username` (required)::
|
||||
(string) The username that identifies the user.
|
||||
|
||||
The following parameters can be specified in the body of a DELETE request and
|
||||
pertain to deleting a token:
|
||||
|
||||
`token`::
|
||||
(string) An access token.
|
||||
`username`::
|
||||
(string) The username that identifies the user. If you specify the `password`
|
||||
grant type, this parameter is required.
|
||||
|
||||
==== Examples
|
||||
[[security-api-get-token]]
|
||||
To obtain a token, submit a POST request to the `/_xpack/security/oauth2/token`
|
||||
endpoint.
|
||||
|
||||
The following example obtains a token for the `test_admin` user:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
@ -101,8 +98,8 @@ curl -H "Authorization: Bearer dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvb
|
|||
// NOTCONSOLE
|
||||
|
||||
[[security-api-refresh-token]]
|
||||
To extend the life of an existing token, the token api may be called again with the refresh
|
||||
token within 24 hours of the token's creation.
|
||||
To extend the life of an existing token, you can call the API again with the
|
||||
refresh token within 24 hours of the token's creation. For example:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
@ -116,7 +113,8 @@ POST /_xpack/security/oauth2/token
|
|||
// TEST[s/vLBPvmAB6KvwvJZr27cS/$body.refresh_token/]
|
||||
// TEST[continued]
|
||||
|
||||
The API will return a new token and refresh token. Each refresh token may only be used one time.
|
||||
The API will return a new token and refresh token. Each refresh token may only
|
||||
be used one time.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
@ -128,32 +126,4 @@ The API will return a new token and refresh token. Each refresh token may only b
|
|||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE[s/dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==/$body.access_token/]
|
||||
// TESTRESPONSE[s/vLBPvmAB6KvwvJZr27cS/$body.refresh_token/]
|
||||
|
||||
[[security-api-invalidate-token]]
|
||||
If a token must be invalidated immediately, you can do so by submitting a DELETE
|
||||
request to `/_xpack/security/oauth2/token`. For example:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
DELETE /_xpack/security/oauth2/token
|
||||
{
|
||||
"token" : "dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ=="
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[s/dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==/$body.access_token/]
|
||||
// TEST[continued]
|
||||
|
||||
A successful call returns a JSON structure that indicates whether the token
|
||||
has already been invalidated.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"created" : true <1>
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE
|
||||
|
||||
<1> When a token has already been invalidated, `created` is set to false.
|
||||
// TESTRESPONSE[s/vLBPvmAB6KvwvJZr27cS/$body.refresh_token/]
|
|
@ -0,0 +1,74 @@
|
|||
[role="xpack"]
|
||||
[[security-api-get-user]]
|
||||
=== Get users API
|
||||
|
||||
Retrieves information about users in the native realm.
|
||||
|
||||
|
||||
==== Request
|
||||
|
||||
`GET /_xpack/security/user` +
|
||||
|
||||
`GET /_xpack/security/user/<username>`
|
||||
|
||||
==== Description
|
||||
|
||||
For more information about the native realm, see
|
||||
{stack-ov}/realms.html[Realms] and <<configuring-native-realm>>.
|
||||
|
||||
==== Path Parameters
|
||||
|
||||
`username`::
|
||||
(string) An identifier for the user. You can specify multiple usernames as a comma-separated list. If you omit this parameter, the API retrieves
|
||||
information about all users.
|
||||
|
||||
//==== Request Body
|
||||
|
||||
==== Authorization
|
||||
|
||||
To use this API, you must have at least the `manage_security` cluster privilege.
|
||||
|
||||
|
||||
==== Examples
|
||||
|
||||
To retrieve a native user, submit a GET request to the `/_xpack/security/user/<username>`
|
||||
endpoint:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /_xpack/security/user/jacknich
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:jacknich_user]
|
||||
|
||||
A successful call returns an array of users with the JSON representation of the
|
||||
user. Note that user passwords are not included.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"jacknich": {
|
||||
"username": "jacknich",
|
||||
"roles": [
|
||||
"admin", "other_role1"
|
||||
],
|
||||
"full_name": "Jack Nicholson",
|
||||
"email": "jacknich@example.com",
|
||||
"metadata": { "intelligence" : 7 },
|
||||
"enabled": true
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TESTRESPONSE
|
||||
|
||||
If the user is not defined in the `native` realm, the request 404s.
|
||||
|
||||
Omit the username to retrieve all users:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /_xpack/security/user
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
|
@ -22,7 +22,7 @@ Role mappings have _rules_ that identify users and a list of _roles_ that are
|
|||
granted to those users.
|
||||
|
||||
NOTE: This API does not create roles. Rather, it maps users to existing roles.
|
||||
Roles can be created by using <<security-api-roles, Role Management APIs>> or
|
||||
Roles can be created by using <<security-role-apis,role management APIs>> or
|
||||
{xpack-ref}/defining-roles.html#roles-management-file[roles files].
|
||||
|
||||
The role mapping rule is a logical condition that is expressed using a JSON DSL.
|
||||
|
|
|
@ -1,205 +0,0 @@
|
|||
[role="xpack"]
|
||||
[[security-api-roles]]
|
||||
=== Role Management APIs
|
||||
|
||||
The Roles API enables you to add, remove, and retrieve roles in the `native`
|
||||
realm.
|
||||
|
||||
==== Request
|
||||
|
||||
`GET /_xpack/security/role` +
|
||||
|
||||
`GET /_xpack/security/role/<name>` +
|
||||
|
||||
`DELETE /_xpack/security/role/<name>` +
|
||||
|
||||
`POST /_xpack/security/role/<name>/_clear_cache` +
|
||||
|
||||
`POST /_xpack/security/role/<name>` +
|
||||
|
||||
`PUT /_xpack/security/role/<name>`
|
||||
|
||||
|
||||
==== Description
|
||||
|
||||
The Roles API is generally the preferred way to manage roles, rather than using
|
||||
file-based role management. For more information, see
|
||||
{xpack-ref}/authorization.html[Configuring Role-based Access Control].
|
||||
|
||||
|
||||
==== Path Parameters
|
||||
|
||||
`name`::
|
||||
(string) The name of the role. If you do not specify this parameter, the
|
||||
Get Roles API returns information about all roles.
|
||||
|
||||
|
||||
==== Request Body
|
||||
|
||||
The following parameters can be specified in the body of a PUT or POST request
|
||||
and pertain to adding a role:
|
||||
|
||||
`cluster`:: (list) A list of cluster privileges. These privileges define the
|
||||
cluster level actions that users with this role are able to execute.
|
||||
|
||||
`indices`:: (list) A list of indices permissions entries.
|
||||
`field_security`::: (list) The document fields that the owners of the role have
|
||||
read access to. For more information, see
|
||||
{xpack-ref}/field-and-document-access-control.html[Setting Up Field and Document Level Security].
|
||||
`names` (required)::: (list) A list of indices (or index name patterns) to which the
|
||||
permissions in this entry apply.
|
||||
`privileges`(required)::: (list) The index level privileges that the owners of the role
|
||||
have on the specified indices.
|
||||
`query`::: A search query that defines the documents the owners of the role have
|
||||
read access to. A document within the specified indices must match this query in
|
||||
order for it to be accessible by the owners of the role.
|
||||
|
||||
`metadata`:: (object) Optional meta-data. Within the `metadata` object, keys
|
||||
that begin with `_` are reserved for system usage.
|
||||
|
||||
`run_as`:: (list) A list of users that the owners of this role can impersonate.
|
||||
For more information, see
|
||||
{xpack-ref}/run-as-privilege.html[Submitting Requests on Behalf of Other Users].
|
||||
|
||||
For more information, see {xpack-ref}/defining-roles.html[Defining Roles].
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
To use this API, you must have at least the `manage_security` cluster
|
||||
privilege.
|
||||
|
||||
|
||||
==== Examples
|
||||
|
||||
[[security-api-put-role]]
|
||||
To add a role, submit a PUT or POST request to the `/_xpack/security/role/<rolename>`
|
||||
endpoint:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /_xpack/security/role/my_admin_role
|
||||
{
|
||||
"cluster": ["all"],
|
||||
"indices": [
|
||||
{
|
||||
"names": [ "index1", "index2" ],
|
||||
"privileges": ["all"],
|
||||
"field_security" : { // optional
|
||||
"grant" : [ "title", "body" ]
|
||||
},
|
||||
"query": "{\"match\": {\"title\": \"foo\"}}" // optional
|
||||
}
|
||||
],
|
||||
"run_as": [ "other_user" ], // optional
|
||||
"metadata" : { // optional
|
||||
"version" : 1
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
A successful call returns a JSON structure that shows whether the role has been
|
||||
created or updated.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"role": {
|
||||
"created": true <1>
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE
|
||||
<1> When an existing role is updated, `created` is set to false.
|
||||
|
||||
[[security-api-get-role]]
|
||||
To retrieve a role from the `native` Security realm, issue a GET request to the
|
||||
`/_xpack/security/role/<rolename>` endpoint:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /_xpack/security/role/my_admin_role
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
A successful call returns an array of roles with the JSON representation of the
|
||||
role. If the role is not defined in the `native` realm, the request 404s.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"my_admin_role": {
|
||||
"cluster" : [ "all" ],
|
||||
"indices" : [ {
|
||||
"names" : [ "index1", "index2" ],
|
||||
"privileges" : [ "all" ],
|
||||
"field_security" : {
|
||||
"grant" : [ "title", "body" ]
|
||||
},
|
||||
"query" : "{\"match\": {\"title\": \"foo\"}}"
|
||||
} ],
|
||||
"applications" : [ ],
|
||||
"run_as" : [ "other_user" ],
|
||||
"metadata" : {
|
||||
"version" : 1
|
||||
},
|
||||
"transient_metadata": {
|
||||
"enabled": true
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE
|
||||
|
||||
You can specify multiple roles as a comma-separated list. To retrieve all roles,
|
||||
omit the role name.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
# Retrieve roles "r1", "r2", and "my_admin_role"
|
||||
GET /_xpack/security/role/r1,r2,my_admin_role
|
||||
|
||||
# Retrieve all roles
|
||||
GET /_xpack/security/role
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
NOTE: If single role is requested, that role is returned as the response. When
|
||||
requesting multiple roles, an object is returned holding the found roles, each
|
||||
keyed by the relevant role name.
|
||||
|
||||
[[security-api-delete-role]]
|
||||
To delete a role, submit a DELETE request to the `/_xpack/security/role/<rolename>`
|
||||
endpoint:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
DELETE /_xpack/security/role/my_admin_role
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
If the role is successfully deleted, the request returns `{"found": true}`.
|
||||
Otherwise, `found` is set to false.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"found" : true
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE
|
||||
|
||||
[[security-api-clear-role-cache]]
|
||||
The Clear Roles Cache API evicts roles from the native role cache. To clear the
|
||||
cache for a role, submit a POST request `/_xpack/security/role/<rolename>/_clear_cache`
|
||||
endpoint:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /_xpack/security/role/my_admin_role/_clear_cache
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
|
@ -1,226 +0,0 @@
|
|||
[role="xpack"]
|
||||
[[security-api-users]]
|
||||
=== User Management APIs
|
||||
|
||||
The `user` API enables you to create, read, update, and delete users from the
|
||||
`native` realm. These users are commonly referred to as *native users*.
|
||||
|
||||
|
||||
==== Request
|
||||
|
||||
`GET /_xpack/security/user` +
|
||||
|
||||
`GET /_xpack/security/user/<username>` +
|
||||
|
||||
`DELETE /_xpack/security/user/<username>` +
|
||||
|
||||
`POST /_xpack/security/user/<username>` +
|
||||
|
||||
`PUT /_xpack/security/user/<username>` +
|
||||
|
||||
`PUT /_xpack/security/user/<username>/_disable` +
|
||||
|
||||
`PUT /_xpack/security/user/<username>/_enable` +
|
||||
|
||||
`PUT /_xpack/security/user/<username>/_password`
|
||||
|
||||
|
||||
==== Description
|
||||
|
||||
You can use the PUT user API to create or update users. When updating a user,
|
||||
you can update everything but its `username` and `password`. To change a user's
|
||||
password, use the <<security-api-reset-user-password, reset password API>>.
|
||||
|
||||
[[username-validation]]
|
||||
NOTE: Usernames must be at least 1 and no more than 1024 characters. They can
|
||||
contain alphanumeric characters (`a-z`, `A-Z`, `0-9`), spaces, punctuation, and
|
||||
printable symbols in the https://en.wikipedia.org/wiki/Basic_Latin_(Unicode_block)[Basic Latin (ASCII) block].
|
||||
Leading or trailing whitespace is not allowed.
|
||||
|
||||
==== Path Parameters
|
||||
|
||||
`username`::
|
||||
(string) An identifier for the user. If you omit this parameter from a Get
|
||||
User API request, it retrieves information about all users.
|
||||
|
||||
|
||||
==== Request Body
|
||||
|
||||
The following parameters can be specified in the body of a POST or PUT request
|
||||
and pertain to creating a user:
|
||||
|
||||
`enabled`::
|
||||
(boolean) Specifies whether the user is enabled. The default value is `true`.
|
||||
|
||||
`email`::
|
||||
(string) The email of the user.
|
||||
|
||||
`full_name`::
|
||||
(string) The full name of the user.
|
||||
|
||||
`metadata`::
|
||||
(object) Arbitrary metadata that you want to associate with the user.
|
||||
|
||||
`password` (required)::
|
||||
(string) The user's password. Passwords must be at least 6 characters long.
|
||||
|
||||
`roles` (required)::
|
||||
(list) A set of roles the user has. The roles determine the user's access
|
||||
permissions. To create a user without any roles, specify an empty list: `[]`.
|
||||
|
||||
==== Authorization
|
||||
|
||||
To use this API, you must have at least the `manage_security` cluster privilege.
|
||||
|
||||
|
||||
==== Examples
|
||||
|
||||
[[security-api-put-user]]
|
||||
To add a user, submit a PUT or POST request to the `/_xpack/security/user/<username>`
|
||||
endpoint.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /_xpack/security/user/jacknich
|
||||
{
|
||||
"password" : "j@rV1s",
|
||||
"roles" : [ "admin", "other_role1" ],
|
||||
"full_name" : "Jack Nicholson",
|
||||
"email" : "jacknich@example.com",
|
||||
"metadata" : {
|
||||
"intelligence" : 7
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
A successful call returns a JSON structure that shows whether the user has been
|
||||
created or updated.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"user": {
|
||||
"created" : true <1>
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE
|
||||
<1> When an existing user is updated, `created` is set to false.
|
||||
|
||||
After you add a user through the Users API, requests from that user can be
|
||||
authenticated. For example:
|
||||
|
||||
[source,shell]
|
||||
--------------------------------------------------
|
||||
curl -u jacknich:j@rV1s http://localhost:9200/_cluster/health
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
[[security-api-get-user]]
|
||||
To retrieve a native user, submit a GET request to the `/_xpack/security/user/<username>`
|
||||
endpoint:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /_xpack/security/user/jacknich
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
A successful call returns an array of users with the JSON representation of the
|
||||
user. Note that user passwords are not included.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"jacknich": { <1>
|
||||
"username" : "jacknich",
|
||||
"roles" : [ "admin", "other_role1" ],
|
||||
"full_name" : "Jack Nicholson",
|
||||
"email" : "jacknich@example.com",
|
||||
"enabled": true,
|
||||
"metadata" : {
|
||||
"intelligence" : 7
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE
|
||||
<1> If the user is not defined in the `native` realm, the request 404s.
|
||||
|
||||
You can specify multiple usernames as a comma-separated list:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /_xpack/security/user/jacknich,rdinero
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
Omit the username to retrieve all users:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /_xpack/security/user
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
[[security-api-reset-user-password]]
|
||||
To reset the password for a user, submit a PUT request to the
|
||||
`/_xpack/security/user/<username>/_password` endpoint:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /_xpack/security/user/jacknich/_password
|
||||
{
|
||||
"password" : "s3cr3t"
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
[[security-api-disable-user]]
|
||||
To disable a user, submit a PUT request to the
|
||||
`/_xpack/security/user/<username>/_disable` endpoint:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /_xpack/security/user/jacknich/_disable
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
[[security-api-enable-user]]
|
||||
To enable a user, submit a PUT request to the
|
||||
`/_xpack/security/user/<username>/_enable` endpoint:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /_xpack/security/user/jacknich/_enable
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
[[security-api-delete-user]]
|
||||
To delete a user, submit a DELETE request to the `/_xpack/security/user/<username>`
|
||||
endpoint:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
DELETE /_xpack/security/user/jacknich
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
If the user is successfully deleted, the request returns `{"found": true}`.
|
||||
Otherwise, `found` is set to false.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"found" : true
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE
|
|
@ -15,18 +15,19 @@ Most {rollup} endpoints have the following base:
|
|||
[[rollup-api-jobs]]
|
||||
=== /job/
|
||||
|
||||
* {ref}/rollup-put-job.html[PUT /job/<job_id+++>+++]: Create a job
|
||||
* {ref}/rollup-get-job.html[GET /job]: List jobs
|
||||
* {ref}/rollup-get-job.html[GET /job/<job_id+++>+++]: Get job details
|
||||
* {ref}/rollup-start-job.html[POST /job/<job_id>/_start]: Start a job
|
||||
* {ref}/rollup-stop-job.html[POST /job/<job_id>/_stop]: Stop a job
|
||||
* {ref}/rollup-delete-job.html[DELETE /job/<job_id+++>+++]: Delete a job
|
||||
* {ref}/rollup-put-job.html[PUT /_xpack/rollup/job/<job_id+++>+++]: Create a job
|
||||
* {ref}/rollup-get-job.html[GET /_xpack/rollup/job]: List jobs
|
||||
* {ref}/rollup-get-job.html[GET /_xpack/rollup/job/<job_id+++>+++]: Get job details
|
||||
* {ref}/rollup-start-job.html[POST /_xpack/rollup/job/<job_id>/_start]: Start a job
|
||||
* {ref}/rollup-stop-job.html[POST /_xpack/rollup/job/<job_id>/_stop]: Stop a job
|
||||
* {ref}/rollup-delete-job.html[DELETE /_xpack/rollup/job/<job_id+++>+++]: Delete a job
|
||||
|
||||
[float]
|
||||
[[rollup-api-data]]
|
||||
=== /data/
|
||||
|
||||
* {ref}/rollup-get-rollup-caps.html[GET /data/<index_name+++>/_rollup_caps+++]: Get Rollup Capabilities
|
||||
* {ref}/rollup-get-rollup-caps.html[GET /_xpack/rollup/data/<index_pattern+++>/_rollup_caps+++]: Get Rollup Capabilities
|
||||
* {ref}/rollup-get-rollup-index-caps.html[GET /<index_name+++>/_rollup/data/+++]: Get Rollup Index Capabilities
|
||||
|
||||
[float]
|
||||
[[rollup-api-index]]
|
||||
|
|
|
@ -20,6 +20,7 @@ So while the cost of storing a millisecond of sensor data from ten years ago is
|
|||
reading often diminishes with time. It's not useless -- it could easily contribute to a useful analysis -- but it's reduced
|
||||
value often leads to deletion rather than paying the fixed storage cost.
|
||||
|
||||
[float]
|
||||
=== Rollup store historical data at reduced granularity
|
||||
|
||||
That's where Rollup comes into play. The Rollup functionality summarizes old, high-granularity data into a reduced
|
||||
|
@ -35,6 +36,7 @@ automates this process of summarizing historical data.
|
|||
|
||||
Details about setting up and configuring Rollup are covered in <<rollup-put-job,Create Job API>>
|
||||
|
||||
[float]
|
||||
=== Rollup uses standard query DSL
|
||||
|
||||
The Rollup feature exposes a new search endpoint (`/_rollup_search` vs the standard `/_search`) which knows how to search
|
||||
|
@ -48,6 +50,7 @@ are covered more in <<rollup-search-limitations, Rollup Search limitations>>.
|
|||
But if your queries, aggregations and dashboards only use the available functionality, redirecting them to historical
|
||||
data is trivial.
|
||||
|
||||
[float]
|
||||
=== Rollup merges "live" and "rolled" data
|
||||
|
||||
A useful feature of Rollup is the ability to query both "live", realtime data in addition to historical "rolled" data
|
||||
|
@ -61,6 +64,7 @@ would only see data older than a month. The RollupSearch endpoint, however, sup
|
|||
It will take the results from both data sources and merge them together. If there is overlap between the "live" and
|
||||
"rolled" data, live data is preferred to increase accuracy.
|
||||
|
||||
[float]
|
||||
=== Rollup is multi-interval aware
|
||||
|
||||
Finally, Rollup is capable of intelligently utilizing the best interval available. If you've worked with summarizing
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue