diff --git a/.ci/java-versions.properties b/.ci/java-versions.properties index 9df8850c301..ac8682d9769 100644 --- a/.ci/java-versions.properties +++ b/.ci/java-versions.properties @@ -7,5 +7,4 @@ ES_BUILD_JAVA=openjdk12 ES_RUNTIME_JAVA=java8 GRADLE_TASK=build - - +GRADLE_EXTRA_ARGS=--no-parallel diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java index 9cc03dd7e37..af9d3cd4da9 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java @@ -229,6 +229,7 @@ public class ElasticsearchCluster implements TestClusterConfiguration { if (Version.fromString(node.getVersion()).getMajor() >= 7) { node.defaultConfig.put("cluster.initial_master_nodes", "[" + nodeNames + "]"); node.defaultConfig.put("discovery.seed_providers", "file"); + node.defaultConfig.put("discovery.seed_hosts", "[]"); } } node.start(); @@ -286,14 +287,13 @@ public class ElasticsearchCluster implements TestClusterConfiguration { } public void waitForAllConditions() { - long startedAt = System.currentTimeMillis(); LOGGER.info("Waiting for nodes"); nodes.forEach(ElasticsearchNode::waitForAllConditions); writeUnicastHostsFiles(); LOGGER.info("Starting to wait for cluster to form"); - waitForConditions(waitConditions, startedAt, CLUSTER_UP_TIMEOUT, CLUSTER_UP_TIMEOUT_UNIT, this); + waitForConditions(waitConditions, System.currentTimeMillis(), CLUSTER_UP_TIMEOUT, CLUSTER_UP_TIMEOUT_UNIT, this); } @Override diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java index 1641ef3dac4..8c13c66e0f1 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java @@ -37,6 +37,8 @@ import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardCopyOption; +import java.nio.file.StandardOpenOption; +import java.time.Instant; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -65,8 +67,10 @@ public class ElasticsearchNode implements TestClusterConfiguration { private static final Logger LOGGER = Logging.getLogger(ElasticsearchNode.class); private static final int ES_DESTROY_TIMEOUT = 20; private static final TimeUnit ES_DESTROY_TIMEOUT_UNIT = TimeUnit.SECONDS; - private static final int NODE_UP_TIMEOUT = 60; - private static final TimeUnit NODE_UP_TIMEOUT_UNIT = TimeUnit.SECONDS; + private static final int NODE_UP_TIMEOUT = 2; + private static final TimeUnit NODE_UP_TIMEOUT_UNIT = TimeUnit.MINUTES; + private static final int ADDITIONAL_CONFIG_TIMEOUT = 15; + private static final TimeUnit ADDITIONAL_CONFIG_TIMEOUT_UNIT = TimeUnit.SECONDS; private static final List OVERRIDABLE_SETTINGS = Arrays.asList( "path.repo", "discovery.seed_providers" @@ -310,6 +314,7 @@ public class ElasticsearchNode implements TestClusterConfiguration { try { if (isWorkingDirConfigured == false) { + logToProcessStdout("Configuring working directory: " + workingDir); // Only configure working dir once so we don't loose data on restarts isWorkingDirConfigured = true; createWorkingDir(distroArtifact); @@ -319,12 +324,16 @@ public class ElasticsearchNode implements TestClusterConfiguration { } createConfiguration(); - plugins.forEach(plugin -> runElaticsearchBinScript( - "elasticsearch-plugin", - "install", "--batch", plugin.toString()) - ); + if(plugins.isEmpty() == false) { + logToProcessStdout("Installing " + plugins.size() + " plugins"); + plugins.forEach(plugin -> runElaticsearchBinScript( + "elasticsearch-plugin", + "install", "--batch", plugin.toString()) + ); + } if (keystoreSettings.isEmpty() == false || keystoreFiles.isEmpty() == false) { + logToProcessStdout("Adding " + keystoreSettings.size() + " keystore settings and " + keystoreFiles.size() + " keystore files"); runElaticsearchBinScript("elasticsearch-keystore", "create"); checkSuppliers("Keystore", keystoreSettings.values()); @@ -347,6 +356,7 @@ public class ElasticsearchNode implements TestClusterConfiguration { copyExtraConfigFiles(); if (isSettingMissingOrTrue("xpack.security.enabled")) { + logToProcessStdout("Setting up " + credentials.size() + " users"); if (credentials.isEmpty()) { user(Collections.emptyMap()); } @@ -358,9 +368,25 @@ public class ElasticsearchNode implements TestClusterConfiguration { )); } + logToProcessStdout("Starting Elasticsearch process"); startElasticsearchProcess(); } + private void logToProcessStdout(String message) { + try { + if (Files.exists(esStdoutFile.getParent()) == false) { + Files.createDirectories(esStdoutFile.getParent()); + } + Files.write( + esStdoutFile, + ("[" + Instant.now().toString() + "] [BUILD] " + message + "\n").getBytes(StandardCharsets.UTF_8), + StandardOpenOption.CREATE, StandardOpenOption.APPEND + ); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + @Override public void restart() { LOGGER.info("Restarting {}", this); @@ -380,6 +406,9 @@ public class ElasticsearchNode implements TestClusterConfiguration { } private void copyExtraConfigFiles() { + if (extraConfigFiles.isEmpty() == false) { + logToProcessStdout("Setting up " + extraConfigFiles.size() + " additional config files"); + } extraConfigFiles.forEach((destination, from) -> { if (Files.exists(from.toPath()) == false) { throw new TestClustersException("Can't create extra config file from " + from + " for " + this + @@ -398,6 +427,7 @@ public class ElasticsearchNode implements TestClusterConfiguration { private void installModules() { if (distribution == Distribution.INTEG_TEST) { + logToProcessStdout("Installing " + modules.size() + "modules"); for (File module : modules) { Path destination = workingDir.resolve("modules").resolve(module.getName().replace(".zip", "").replace("-" + version, "")); @@ -843,7 +873,23 @@ public class ElasticsearchNode implements TestClusterConfiguration { } void waitForAllConditions() { - waitForConditions(waitConditions, System.currentTimeMillis(), NODE_UP_TIMEOUT, NODE_UP_TIMEOUT_UNIT, this); + waitForConditions( + waitConditions, + System.currentTimeMillis(), + NODE_UP_TIMEOUT_UNIT.toMillis(NODE_UP_TIMEOUT) + + // Installing plugins at config time and loading them when nods start requires additional time we need to + // account for + ADDITIONAL_CONFIG_TIMEOUT_UNIT.toMillis(ADDITIONAL_CONFIG_TIMEOUT * + ( + plugins.size() + + keystoreFiles.size() + + keystoreSettings.size() + + credentials.size() + ) + ), + TimeUnit.MILLISECONDS, + this + ); } @Override diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterCleanupOnShutdown.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterCleanupOnShutdown.java new file mode 100644 index 00000000000..0381cece108 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterCleanupOnShutdown.java @@ -0,0 +1,59 @@ +package org.elasticsearch.gradle.testclusters; + +import org.gradle.api.logging.Logger; +import org.gradle.api.logging.Logging; + +import java.util.Collection; +import java.util.HashSet; +import java.util.Iterator; +import java.util.Set; + +/** + * Keep an inventory of all running Clusters and stop them when interrupted + * + * This takes advantage of the fact that Gradle interrupts all the threads in the daemon when the build completes. + */ +public class TestClusterCleanupOnShutdown implements Runnable { + + private final Logger logger = Logging.getLogger(TestClusterCleanupOnShutdown.class); + + private Set clustersToWatch = new HashSet<>(); + + public void watch(Collection cluster) { + synchronized (clustersToWatch) { + clustersToWatch.addAll(clustersToWatch); + } + } + + public void unWatch(Collection cluster) { + synchronized (clustersToWatch) { + clustersToWatch.removeAll(clustersToWatch); + } + } + + @Override + public void run() { + try { + while (true) { + Thread.sleep(Long.MAX_VALUE); + } + } catch (InterruptedException interrupted) { + synchronized (clustersToWatch) { + if (clustersToWatch.isEmpty()) { + return; + } + logger.info("Cleanup thread was interrupted, shutting down all clusters"); + Iterator iterator = clustersToWatch.iterator(); + while (iterator.hasNext()) { + ElasticsearchCluster cluster = iterator.next(); + iterator.remove(); + try { + cluster.stop(false); + } catch (Exception e) { + logger.warn("Could not shut down {}", cluster, e); + } + } + } + } + } +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersCleanupExtension.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersCleanupExtension.java new file mode 100644 index 00000000000..14bdfa952db --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersCleanupExtension.java @@ -0,0 +1,74 @@ +package org.elasticsearch.gradle.testclusters; + +import org.gradle.api.Project; +import org.gradle.api.logging.Logger; +import org.gradle.api.logging.Logging; + +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +/** + * This extensions was meant to be used internally by testclusters + * + * It holds synchronization primitives needed to implement the rate limiting. + * This is tricky because we can't use Gradle workers as there's no way to make sure that tests and their clusters are + * allocated atomically, so we could be in a situation where all workers are tests waiting for clusters to start up. + * + * Also auto configures cleanup of executors to make sure we don't leak threads in the daemon. + */ +public class TestClustersCleanupExtension { + + private static final int EXECUTOR_SHUTDOWN_TIMEOUT = 1; + private static final TimeUnit EXECUTOR_SHUTDOWN_TIMEOUT_UNIT = TimeUnit.MINUTES; + + private static final Logger logger = Logging.getLogger(TestClustersCleanupExtension.class); + + private final ExecutorService executorService; + private final TestClusterCleanupOnShutdown cleanupThread; + + public TestClustersCleanupExtension() { + executorService = Executors.newSingleThreadExecutor(); + cleanupThread = new TestClusterCleanupOnShutdown(); + executorService.submit(cleanupThread); + } + + + public static void createExtension(Project project) { + if (project.getRootProject().getExtensions().findByType(TestClustersCleanupExtension.class) != null) { + return; + } + // Configure the extension on the root project so we have a single instance per run + TestClustersCleanupExtension ext = project.getRootProject().getExtensions().create( + "__testclusters_rate_limit", + TestClustersCleanupExtension.class + ); + Thread shutdownHook = new Thread(ext.cleanupThread::run); + Runtime.getRuntime().addShutdownHook(shutdownHook); + project.getGradle().buildFinished(buildResult -> { + ext.executorService.shutdownNow(); + try { + if (ext.executorService.awaitTermination(EXECUTOR_SHUTDOWN_TIMEOUT, EXECUTOR_SHUTDOWN_TIMEOUT_UNIT) == false) { + throw new IllegalStateException( + "Failed to shut down executor service after " + + EXECUTOR_SHUTDOWN_TIMEOUT + " " + EXECUTOR_SHUTDOWN_TIMEOUT_UNIT + ); + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + try { + if (false == Runtime.getRuntime().removeShutdownHook(shutdownHook)) { + logger.warn("Trying to deregister shutdown hook when it was not registered."); + } + } catch (IllegalStateException ese) { + // Thrown when shutdown is in progress + logger.warn("Can't remove shutdown hook", ese); + } + }); + } + + public TestClusterCleanupOnShutdown getCleanupThread() { + return cleanupThread; + } +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java index 3f2a7b4dcc7..47f2eb675b1 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java @@ -43,13 +43,9 @@ import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; public class TestClustersPlugin implements Plugin { @@ -58,18 +54,14 @@ public class TestClustersPlugin implements Plugin { public static final String EXTENSION_NAME = "testClusters"; private static final String HELPER_CONFIGURATION_PREFIX = "testclusters"; private static final String SYNC_ARTIFACTS_TASK_NAME = "syncTestClustersArtifacts"; - private static final int EXECUTOR_SHUTDOWN_TIMEOUT = 1; - private static final TimeUnit EXECUTOR_SHUTDOWN_TIMEOUT_UNIT = TimeUnit.MINUTES; private static final Logger logger = Logging.getLogger(TestClustersPlugin.class); private static final String TESTCLUSTERS_INSPECT_FAILURE = "testclusters.inspect.failure"; private final Map> usedClusters = new HashMap<>(); private final Map claimsInventory = new HashMap<>(); - private final Set runningClusters =new HashSet<>(); - private final Thread shutdownHook = new Thread(this::shutDownAllClusters); + private final Set runningClusters = new HashSet<>(); private final Boolean allowClusterToSurvive = Boolean.valueOf(System.getProperty(TESTCLUSTERS_INSPECT_FAILURE, "false")); - private ExecutorService executorService = Executors.newSingleThreadExecutor(); public static String getHelperConfigurationName(String version) { return HELPER_CONFIGURATION_PREFIX + "-" + version; @@ -82,6 +74,8 @@ public class TestClustersPlugin implements Plugin { // enable the DSL to describe clusters NamedDomainObjectContainer container = createTestClustersContainerExtension(project); + TestClustersCleanupExtension.createExtension(project); + // provide a task to be able to list defined clusters. createListClustersTask(project, container); @@ -100,9 +94,6 @@ public class TestClustersPlugin implements Plugin { // After each task we determine if there are clusters that are no longer needed. configureStopClustersHook(project); - // configure hooks to make sure no test cluster processes survive the build - configureCleanupHooks(project); - // Since we have everything modeled in the DSL, add all the required dependencies e.x. the distribution to the // configuration so the user doesn't have to repeat this. autoConfigureClusterDependencies(project, rootProject, container); @@ -196,8 +187,19 @@ public class TestClustersPlugin implements Plugin { @Override public void beforeActions(Task task) { // we only start the cluster before the actions, so we'll not start it if the task is up-to-date - usedClusters.getOrDefault(task, Collections.emptyList()).stream() + List neededButNotRunning = usedClusters.getOrDefault( + task, + Collections.emptyList() + ) + .stream() .filter(cluster -> runningClusters.contains(cluster) == false) + .collect(Collectors.toList()); + + project.getRootProject().getExtensions() + .getByType(TestClustersCleanupExtension.class) + .getCleanupThread() + .watch(neededButNotRunning); + neededButNotRunning .forEach(elasticsearchCluster -> { elasticsearchCluster.start(); runningClusters.add(elasticsearchCluster); @@ -220,22 +222,36 @@ public class TestClustersPlugin implements Plugin { task, Collections.emptyList() ); + if (clustersUsedByTask.isEmpty()) { + return; + } + logger.info("Clusters were used, stopping and releasing permits"); + final int permitsToRelease; if (state.getFailure() != null) { // If the task fails, and other tasks use this cluster, the other task will likely never be - // executed at all, so we will never get to un-claim and terminate it. + // executed at all, so we will never be called again to un-claim and terminate it. clustersUsedByTask.forEach(cluster -> stopCluster(cluster, true)); + permitsToRelease = clustersUsedByTask.stream() + .map(cluster -> cluster.getNumberOfNodes()) + .reduce(Integer::sum).get(); } else { clustersUsedByTask.forEach( cluster -> claimsInventory.put(cluster, claimsInventory.getOrDefault(cluster, 0) - 1) ); - claimsInventory.entrySet().stream() + List stoppingClusers = claimsInventory.entrySet().stream() .filter(entry -> entry.getValue() == 0) .filter(entry -> runningClusters.contains(entry.getKey())) .map(Map.Entry::getKey) - .forEach(cluster -> { - stopCluster(cluster, false); - runningClusters.remove(cluster); - }); + .collect(Collectors.toList()); + stoppingClusers.forEach(cluster -> { + stopCluster(cluster, false); + runningClusters.remove(cluster); + }); + + project.getRootProject().getExtensions() + .getByType(TestClustersCleanupExtension.class) + .getCleanupThread() + .unWatch(stoppingClusers); } } @Override @@ -406,62 +422,4 @@ public class TestClustersPlugin implements Plugin { }))); } - private void configureCleanupHooks(Project project) { - // When the Gradle daemon is used, it will interrupt all threads when the build concludes. - // This is our signal to clean up - executorService.submit(() -> { - while (true) { - try { - Thread.sleep(Long.MAX_VALUE); - } catch (InterruptedException interrupted) { - shutDownAllClusters(); - Thread.currentThread().interrupt(); - return; - } - } - }); - - // When the Daemon is not used, or runs into issues, rely on a shutdown hook - // When the daemon is used, but does not work correctly and eventually dies off (e.x. due to non interruptible - // thread in the build) process will be stopped eventually when the daemon dies. - Runtime.getRuntime().addShutdownHook(shutdownHook); - - // When we don't run into anything out of the ordinary, and the build completes, makes sure to clean up - project.getGradle().buildFinished(buildResult -> { - shutdownExecutorService(); - if (false == Runtime.getRuntime().removeShutdownHook(shutdownHook)) { - logger.info("Trying to deregister shutdown hook when it was not registered."); - } - }); - } - - private void shutdownExecutorService() { - executorService.shutdownNow(); - try { - if (executorService.awaitTermination(EXECUTOR_SHUTDOWN_TIMEOUT, EXECUTOR_SHUTDOWN_TIMEOUT_UNIT) == false) { - throw new IllegalStateException( - "Failed to shut down executor service after " + - EXECUTOR_SHUTDOWN_TIMEOUT + " " + EXECUTOR_SHUTDOWN_TIMEOUT_UNIT - ); - } - } catch (InterruptedException e) { - logger.info("Wait for testclusters shutdown interrupted", e); - Thread.currentThread().interrupt(); - } - } - - private void shutDownAllClusters() { - synchronized (runningClusters) { - if (runningClusters.isEmpty()) { - return; - } - Iterator iterator = runningClusters.iterator(); - while (iterator.hasNext()) { - ElasticsearchCluster next = iterator.next(); - iterator.remove(); - next.stop(false); - } - } - } - } diff --git a/buildSrc/src/main/resources/minimumGradleVersion b/buildSrc/src/main/resources/minimumGradleVersion index 04edabda285..e5e7441d3e9 100644 --- a/buildSrc/src/main/resources/minimumGradleVersion +++ b/buildSrc/src/main/resources/minimumGradleVersion @@ -1 +1 @@ -5.4.1 \ No newline at end of file +5.5 \ No newline at end of file diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 0d138fa3255..c75cea2a32b 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,4 +1,4 @@ -elasticsearch = 7.3.0 +elasticsearch = 7.4.0 lucene = 8.1.0 bundled_jdk = 12.0.1+12@69cfe15208a647278a19ef0990eea691 diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkAction.java index 40c65aee700..e6412099fee 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkAction.java @@ -18,10 +18,10 @@ */ package org.elasticsearch.plugin.noop.action.bulk; -import org.elasticsearch.action.StreamableResponseAction; +import org.elasticsearch.action.StreamableResponseActionType; import org.elasticsearch.action.bulk.BulkResponse; -public class NoopBulkAction extends StreamableResponseAction { +public class NoopBulkAction extends StreamableResponseActionType { public static final String NAME = "mock:data/write/bulk"; public static final NoopBulkAction INSTANCE = new NoopBulkAction(); diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchAction.java index aa316ae435a..fb83bda148b 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchAction.java @@ -18,11 +18,11 @@ */ package org.elasticsearch.plugin.noop.action.search; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.io.stream.Writeable; -public class NoopSearchAction extends Action { +public class NoopSearchAction extends ActionType { public static final NoopSearchAction INSTANCE = new NoopSearchAction(); public static final String NAME = "mock:data/read/search"; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java index a5a57e4d6b8..584bdad7450 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java @@ -61,6 +61,8 @@ import org.elasticsearch.client.indices.GetMappingsResponse; import org.elasticsearch.client.indices.IndexTemplatesExistRequest; import org.elasticsearch.client.indices.PutIndexTemplateRequest; import org.elasticsearch.client.indices.PutMappingRequest; +import org.elasticsearch.client.indices.ReloadAnalyzersRequest; +import org.elasticsearch.client.indices.ReloadAnalyzersResponse; import org.elasticsearch.client.indices.UnfreezeIndexRequest; import org.elasticsearch.client.indices.rollover.RolloverRequest; import org.elasticsearch.client.indices.rollover.RolloverResponse; @@ -1328,4 +1330,28 @@ public final class IndicesClient { restHighLevelClient.performRequestAsyncAndParseEntity(request, IndicesRequestConverters::deleteTemplate, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } + + /** + * Synchronously calls the _reload_search_analyzers API + * + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + */ + public ReloadAnalyzersResponse reloadAnalyzers(ReloadAnalyzersRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, IndicesRequestConverters::reloadAnalyzers, options, + ReloadAnalyzersResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously calls the _reload_search_analyzers API + * + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void reloadAnalyzersAsync(ReloadAnalyzersRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, IndicesRequestConverters::reloadAnalyzers, options, + ReloadAnalyzersResponse::fromXContent, listener, emptySet()); + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java index 7a67fe71348..5c655e56495 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java @@ -50,6 +50,7 @@ import org.elasticsearch.client.indices.GetMappingsRequest; import org.elasticsearch.client.indices.IndexTemplatesExistRequest; import org.elasticsearch.client.indices.PutIndexTemplateRequest; import org.elasticsearch.client.indices.PutMappingRequest; +import org.elasticsearch.client.indices.ReloadAnalyzersRequest; import org.elasticsearch.client.indices.UnfreezeIndexRequest; import org.elasticsearch.client.indices.rollover.RolloverRequest; import org.elasticsearch.common.Strings; @@ -646,4 +647,13 @@ final class IndicesRequestConverters { request.addParameters(params.asMap()); return request; } + + static Request reloadAnalyzers(ReloadAnalyzersRequest reloadAnalyzersRequest) { + String endpoint = RequestConverters.endpoint(reloadAnalyzersRequest.getIndices(), "_reload_search_analyzers"); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + RequestConverters.Params parameters = new RequestConverters.Params(); + parameters.withIndicesOptions(reloadAnalyzersRequest.indicesOptions()); + request.addParameters(parameters.asMap()); + return request; + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java index e5a98b46324..651851e345d 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java @@ -676,6 +676,9 @@ final class MLRequestConverters { params.putParam( StopDataFrameAnalyticsRequest.ALLOW_NO_MATCH.getPreferredName(), Boolean.toString(stopRequest.getAllowNoMatch())); } + if (stopRequest.getForce() != null) { + params.putParam(StopDataFrameAnalyticsRequest.FORCE.getPreferredName(), Boolean.toString(stopRequest.getForce())); + } request.addParameters(params.asMap()); return request; } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java index fefb5771dc8..8c29cfaae54 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java @@ -43,6 +43,8 @@ import org.elasticsearch.client.security.DisableUserRequest; import org.elasticsearch.client.security.EnableUserRequest; import org.elasticsearch.client.security.GetApiKeyRequest; import org.elasticsearch.client.security.GetApiKeyResponse; +import org.elasticsearch.client.security.GetBuiltinPrivilegesRequest; +import org.elasticsearch.client.security.GetBuiltinPrivilegesResponse; import org.elasticsearch.client.security.GetPrivilegesRequest; import org.elasticsearch.client.security.GetPrivilegesResponse; import org.elasticsearch.client.security.GetRoleMappingsRequest; @@ -751,6 +753,34 @@ public final class SecurityClient { InvalidateTokenResponse::fromXContent, listener, emptySet()); } + /** + * Synchronously get builtin (cluster & index) privilege(s). + * See + * the docs for more. + * + * @param options the request options (e.g. headers), use + * {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response from the get builtin privileges call + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public GetBuiltinPrivilegesResponse getBuiltinPrivileges(final RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(GetBuiltinPrivilegesRequest.INSTANCE, + GetBuiltinPrivilegesRequest::getRequest, options, GetBuiltinPrivilegesResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously get builtin (cluster & index) privilege(s). + * See + * the docs for more. + * + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void getBuiltinPrivilegesAsync(final RequestOptions options, final ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(GetBuiltinPrivilegesRequest.INSTANCE, + GetBuiltinPrivilegesRequest::getRequest, options, GetBuiltinPrivilegesResponse::fromXContent, listener, emptySet()); + } + /** * Synchronously get application privilege(s). * See diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/BroadcastResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/BroadcastResponse.java index 3665ba5bf50..35ce0f55d71 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/BroadcastResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/BroadcastResponse.java @@ -45,7 +45,7 @@ public class BroadcastResponse { return shards; } - BroadcastResponse(final Shards shards) { + protected BroadcastResponse(final Shards shards) { this.shards = Objects.requireNonNull(shards); } @@ -56,7 +56,7 @@ public class BroadcastResponse { a -> new BroadcastResponse((Shards) a[0])); static { - PARSER.declareObject(ConstructingObjectParser.constructorArg(), Shards.SHARDS_PARSER, SHARDS_FIELD); + declareShardsField(PARSER); } /** @@ -70,6 +70,10 @@ public class BroadcastResponse { return PARSER.parse(parser, null); } + protected static void declareShardsField(ConstructingObjectParser PARSER) { + PARSER.declareObject(ConstructingObjectParser.constructorArg(), Shards.SHARDS_PARSER, SHARDS_FIELD); + } + /** * Represents the results of a collection of shards on which a request was executed against. */ diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/PreviewDataFrameTransformResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/PreviewDataFrameTransformResponse.java index 5d5f67dd65e..40e87b5768b 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/PreviewDataFrameTransformResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/PreviewDataFrameTransformResponse.java @@ -29,23 +29,32 @@ import java.util.Objects; public class PreviewDataFrameTransformResponse { private static final String PREVIEW = "preview"; + private static final String MAPPINGS = "mappings"; @SuppressWarnings("unchecked") public static PreviewDataFrameTransformResponse fromXContent(final XContentParser parser) throws IOException { - Object previewDocs = parser.map().get(PREVIEW); - return new PreviewDataFrameTransformResponse((List>) previewDocs); + Map previewMap = parser.mapOrdered(); + Object previewDocs = previewMap.get(PREVIEW); + Object mappings = previewMap.get(MAPPINGS); + return new PreviewDataFrameTransformResponse((List>) previewDocs, (Map) mappings); } private List> docs; + private Map mappings; - public PreviewDataFrameTransformResponse(List> docs) { + public PreviewDataFrameTransformResponse(List> docs, Map mappings) { this.docs = docs; + this.mappings = mappings; } public List> getDocs() { return docs; } + public Map getMappings() { + return mappings; + } + @Override public boolean equals(Object obj) { if (obj == this) { @@ -57,12 +66,12 @@ public class PreviewDataFrameTransformResponse { } PreviewDataFrameTransformResponse other = (PreviewDataFrameTransformResponse) obj; - return Objects.equals(other.docs, docs); + return Objects.equals(other.docs, docs) && Objects.equals(other.mappings, mappings); } @Override public int hashCode() { - return Objects.hashCode(docs); + return Objects.hash(docs, mappings); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformState.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformState.java index 352cbfb67fc..186c67bf42c 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformState.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformState.java @@ -43,6 +43,7 @@ public class DataFrameTransformState { private static final ParseField CHECKPOINT = new ParseField("checkpoint"); private static final ParseField REASON = new ParseField("reason"); private static final ParseField PROGRESS = new ParseField("progress"); + private static final ParseField NODE = new ParseField("node"); @SuppressWarnings("unchecked") public static final ConstructingObjectParser PARSER = @@ -52,7 +53,8 @@ public class DataFrameTransformState { (Map) args[2], (long) args[3], (String) args[4], - (DataFrameTransformProgress) args[5])); + (DataFrameTransformProgress) args[5], + (NodeAttributes) args[6])); static { PARSER.declareField(constructorArg(), p -> DataFrameTransformTaskState.fromString(p.text()), TASK_STATE, ValueType.STRING); @@ -61,6 +63,7 @@ public class DataFrameTransformState { PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), CHECKPOINT); PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), REASON); PARSER.declareField(optionalConstructorArg(), DataFrameTransformProgress::fromXContent, PROGRESS, ValueType.OBJECT); + PARSER.declareField(optionalConstructorArg(), NodeAttributes.PARSER::apply, NODE, ValueType.OBJECT); } public static DataFrameTransformState fromXContent(XContentParser parser) throws IOException { @@ -73,19 +76,22 @@ public class DataFrameTransformState { private final Map currentPosition; private final String reason; private final DataFrameTransformProgress progress; + private final NodeAttributes node; public DataFrameTransformState(DataFrameTransformTaskState taskState, IndexerState indexerState, @Nullable Map position, long checkpoint, @Nullable String reason, - @Nullable DataFrameTransformProgress progress) { + @Nullable DataFrameTransformProgress progress, + @Nullable NodeAttributes node) { this.taskState = taskState; this.indexerState = indexerState; this.currentPosition = position == null ? null : Collections.unmodifiableMap(new LinkedHashMap<>(position)); this.checkpoint = checkpoint; this.reason = reason; this.progress = progress; + this.node = node; } public IndexerState getIndexerState() { @@ -115,6 +121,11 @@ public class DataFrameTransformState { return progress; } + @Nullable + public NodeAttributes getNode() { + return node; + } + @Override public boolean equals(Object other) { if (this == other) { @@ -132,12 +143,13 @@ public class DataFrameTransformState { Objects.equals(this.currentPosition, that.currentPosition) && Objects.equals(this.progress, that.progress) && this.checkpoint == that.checkpoint && + Objects.equals(this.node, that.node) && Objects.equals(this.reason, that.reason); } @Override public int hashCode() { - return Objects.hash(taskState, indexerState, currentPosition, checkpoint, reason, progress); + return Objects.hash(taskState, indexerState, currentPosition, checkpoint, reason, progress, node); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/NodeAttributes.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/NodeAttributes.java new file mode 100644 index 00000000000..85c2b9644c2 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/NodeAttributes.java @@ -0,0 +1,156 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.dataframe.transforms; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; +import java.util.Objects; + +/** + * A Pojo class containing an Elastic Node's attributes + */ +public class NodeAttributes implements ToXContentObject { + + public static final ParseField ID = new ParseField("id"); + public static final ParseField NAME = new ParseField("name"); + public static final ParseField EPHEMERAL_ID = new ParseField("ephemeral_id"); + public static final ParseField TRANSPORT_ADDRESS = new ParseField("transport_address"); + public static final ParseField ATTRIBUTES = new ParseField("attributes"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("node", true, + (a) -> { + int i = 0; + String id = (String) a[i++]; + String name = (String) a[i++]; + String ephemeralId = (String) a[i++]; + String transportAddress = (String) a[i++]; + Map attributes = (Map) a[i]; + return new NodeAttributes(id, name, ephemeralId, transportAddress, attributes); + }); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), ID); + PARSER.declareString(ConstructingObjectParser.constructorArg(), NAME); + PARSER.declareString(ConstructingObjectParser.constructorArg(), EPHEMERAL_ID); + PARSER.declareString(ConstructingObjectParser.constructorArg(), TRANSPORT_ADDRESS); + PARSER.declareField(ConstructingObjectParser.constructorArg(), + (p, c) -> p.mapStrings(), + ATTRIBUTES, + ObjectParser.ValueType.OBJECT); + } + + private final String id; + private final String name; + private final String ephemeralId; + private final String transportAddress; + private final Map attributes; + + public NodeAttributes(String id, String name, String ephemeralId, String transportAddress, Map attributes) { + this.id = id; + this.name = name; + this.ephemeralId = ephemeralId; + this.transportAddress = transportAddress; + this.attributes = Collections.unmodifiableMap(attributes); + } + + /** + * The unique identifier of the node. + */ + public String getId() { + return id; + } + + /** + * The node name. + */ + public String getName() { + return name; + } + + /** + * The ephemeral id of the node. + */ + public String getEphemeralId() { + return ephemeralId; + } + + /** + * The host and port where transport HTTP connections are accepted. + */ + public String getTransportAddress() { + return transportAddress; + } + + /** + * Additional attributes related to this node + */ + public Map getAttributes() { + return attributes; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(ID.getPreferredName(), id); + builder.field(NAME.getPreferredName(), name); + builder.field(EPHEMERAL_ID.getPreferredName(), ephemeralId); + builder.field(TRANSPORT_ADDRESS.getPreferredName(), transportAddress); + builder.field(ATTRIBUTES.getPreferredName(), attributes); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(id, name, ephemeralId, transportAddress, attributes); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + NodeAttributes that = (NodeAttributes) other; + return Objects.equals(id, that.id) && + Objects.equals(name, that.name) && + Objects.equals(ephemeralId, that.ephemeralId) && + Objects.equals(transportAddress, that.transportAddress) && + Objects.equals(attributes, that.attributes); + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/ReloadAnalyzersRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/ReloadAnalyzersRequest.java new file mode 100644 index 00000000000..e815d91bbe8 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/ReloadAnalyzersRequest.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indices; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.Validatable; + +import java.util.Objects; + +/** + * Request for the _reload_search_analyzers API + */ +public final class ReloadAnalyzersRequest implements Validatable { + + private final String[] indices; + private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen(); + + /** + * Creates a new reload analyzers request + * @param indices the index for which to reload analyzers + */ + public ReloadAnalyzersRequest(String... indices) { + this.indices = Objects.requireNonNull(indices); + } + + /** + * Returns the indices + */ + public String[] getIndices() { + return indices; + } + + /** + * Specifies what type of requested indices to ignore and how to deal with wildcard expressions. + * For example indices that don't exist. + * + * @return the current behaviour when it comes to index names and wildcard indices expressions + */ + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + /** + * Specifies what type of requested indices to ignore and how to deal with wildcard expressions. + * For example indices that don't exist. + * + * @param indicesOptions the desired behaviour regarding indices to ignore and wildcard indices expressions + */ + public void setIndicesOptions(IndicesOptions indicesOptions) { + this.indicesOptions = indicesOptions; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/ReloadAnalyzersResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/ReloadAnalyzersResponse.java new file mode 100644 index 00000000000..e2c39d0a7ae --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/ReloadAnalyzersResponse.java @@ -0,0 +1,108 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indices; + +import org.elasticsearch.client.core.BroadcastResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * The response object that will be returned when reloading analyzers + */ +public class ReloadAnalyzersResponse extends BroadcastResponse { + + private final Map reloadDetails; + + ReloadAnalyzersResponse(final Shards shards, Map reloadDetails) { + super(shards); + this.reloadDetails = reloadDetails; + } + + @SuppressWarnings({ "unchecked" }) + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("reload_analyzer", + true, arg -> { + Shards shards = (Shards) arg[0]; + List> results = (List>) arg[1]; + Map reloadDetails = new HashMap<>(); + for (Tuple result : results) { + reloadDetails.put(result.v1(), result.v2()); + } + return new ReloadAnalyzersResponse(shards, reloadDetails); + }); + + @SuppressWarnings({ "unchecked" }) + private static final ConstructingObjectParser, Void> ENTRY_PARSER = new ConstructingObjectParser<>( + "reload_analyzer.entry", true, arg -> { + String index = (String) arg[0]; + Set nodeIds = new HashSet<>((List) arg[1]); + Set analyzers = new HashSet<>((List) arg[2]); + return new Tuple<>(index, new ReloadDetails(index, nodeIds, analyzers)); + }); + + static { + declareShardsField(PARSER); + PARSER.declareObjectArray(constructorArg(), ENTRY_PARSER, new ParseField("reload_details")); + ENTRY_PARSER.declareString(constructorArg(), new ParseField("index")); + ENTRY_PARSER.declareStringArray(constructorArg(), new ParseField("reloaded_node_ids")); + ENTRY_PARSER.declareStringArray(constructorArg(), new ParseField("reloaded_analyzers")); + } + + public static ReloadAnalyzersResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public Map getReloadedDetails() { + return reloadDetails; + } + + public static class ReloadDetails { + + private final String indexName; + private final Set reloadedIndicesNodes; + private final Set reloadedAnalyzers; + + public ReloadDetails(String name, Set reloadedIndicesNodes, Set reloadedAnalyzers) { + this.indexName = name; + this.reloadedIndicesNodes = reloadedIndicesNodes; + this.reloadedAnalyzers = reloadedAnalyzers; + } + + public String getIndexName() { + return indexName; + } + + public Set getReloadedIndicesNodes() { + return reloadedIndicesNodes; + } + + public Set getReloadedAnalyzers() { + return reloadedAnalyzers; + } + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/StopDataFrameAnalyticsRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/StopDataFrameAnalyticsRequest.java index 9608d40fc7d..4ba6af852f6 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/StopDataFrameAnalyticsRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/StopDataFrameAnalyticsRequest.java @@ -31,10 +31,12 @@ import java.util.Optional; public class StopDataFrameAnalyticsRequest implements Validatable { public static final ParseField ALLOW_NO_MATCH = new ParseField("allow_no_match"); + public static final ParseField FORCE = new ParseField("force"); private final String id; - private TimeValue timeout; private Boolean allowNoMatch; + private Boolean force; + private TimeValue timeout; public StopDataFrameAnalyticsRequest(String id) { this.id = id; @@ -62,6 +64,15 @@ public class StopDataFrameAnalyticsRequest implements Validatable { return this; } + public Boolean getForce() { + return force; + } + + public StopDataFrameAnalyticsRequest setForce(boolean force) { + this.force = force; + return this; + } + @Override public Optional validate() { if (id == null) { @@ -78,11 +89,12 @@ public class StopDataFrameAnalyticsRequest implements Validatable { StopDataFrameAnalyticsRequest other = (StopDataFrameAnalyticsRequest) o; return Objects.equals(id, other.id) && Objects.equals(timeout, other.timeout) - && Objects.equals(allowNoMatch, other.allowNoMatch); + && Objects.equals(allowNoMatch, other.allowNoMatch) + && Objects.equals(force, other.force); } @Override public int hashCode() { - return Objects.hash(id, timeout, allowNoMatch); + return Objects.hash(id, timeout, allowNoMatch, force); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsStats.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsStats.java index 5c652f33edb..4e04204e650 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsStats.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsStats.java @@ -41,6 +41,7 @@ public class DataFrameAnalyticsStats { static final ParseField ID = new ParseField("id"); static final ParseField STATE = new ParseField("state"); + static final ParseField FAILURE_REASON = new ParseField("failure_reason"); static final ParseField PROGRESS_PERCENT = new ParseField("progress_percent"); static final ParseField NODE = new ParseField("node"); static final ParseField ASSIGNMENT_EXPLANATION = new ParseField("assignment_explanation"); @@ -50,9 +51,10 @@ public class DataFrameAnalyticsStats { args -> new DataFrameAnalyticsStats( (String) args[0], (DataFrameAnalyticsState) args[1], - (Integer) args[2], - (NodeAttributes) args[3], - (String) args[4])); + (String) args[2], + (Integer) args[3], + (NodeAttributes) args[4], + (String) args[5])); static { PARSER.declareString(constructorArg(), ID); @@ -62,6 +64,7 @@ public class DataFrameAnalyticsStats { } throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); }, STATE, ObjectParser.ValueType.STRING); + PARSER.declareString(optionalConstructorArg(), FAILURE_REASON); PARSER.declareInt(optionalConstructorArg(), PROGRESS_PERCENT); PARSER.declareObject(optionalConstructorArg(), NodeAttributes.PARSER, NODE); PARSER.declareString(optionalConstructorArg(), ASSIGNMENT_EXPLANATION); @@ -69,14 +72,17 @@ public class DataFrameAnalyticsStats { private final String id; private final DataFrameAnalyticsState state; + private final String failureReason; private final Integer progressPercent; private final NodeAttributes node; private final String assignmentExplanation; - public DataFrameAnalyticsStats(String id, DataFrameAnalyticsState state, @Nullable Integer progressPercent, - @Nullable NodeAttributes node, @Nullable String assignmentExplanation) { + public DataFrameAnalyticsStats(String id, DataFrameAnalyticsState state, @Nullable String failureReason, + @Nullable Integer progressPercent, @Nullable NodeAttributes node, + @Nullable String assignmentExplanation) { this.id = id; this.state = state; + this.failureReason = failureReason; this.progressPercent = progressPercent; this.node = node; this.assignmentExplanation = assignmentExplanation; @@ -90,6 +96,10 @@ public class DataFrameAnalyticsStats { return state; } + public String getFailureReason() { + return failureReason; + } + public Integer getProgressPercent() { return progressPercent; } @@ -110,6 +120,7 @@ public class DataFrameAnalyticsStats { DataFrameAnalyticsStats other = (DataFrameAnalyticsStats) o; return Objects.equals(id, other.id) && Objects.equals(state, other.state) + && Objects.equals(failureReason, other.failureReason) && Objects.equals(progressPercent, other.progressPercent) && Objects.equals(node, other.node) && Objects.equals(assignmentExplanation, other.assignmentExplanation); @@ -117,7 +128,7 @@ public class DataFrameAnalyticsStats { @Override public int hashCode() { - return Objects.hash(id, state, progressPercent, node, assignmentExplanation); + return Objects.hash(id, state, failureReason, progressPercent, node, assignmentExplanation); } @Override @@ -125,6 +136,7 @@ public class DataFrameAnalyticsStats { return new ToStringBuilder(getClass()) .add("id", id) .add("state", state) + .add("failureReason", failureReason) .add("progressPercent", progressPercent) .add("node", node) .add("assignmentExplanation", assignmentExplanation) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/CreateTokenRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/CreateTokenRequest.java index 6c1b394355e..4f1302533d9 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/CreateTokenRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/CreateTokenRequest.java @@ -40,6 +40,7 @@ public final class CreateTokenRequest implements Validatable, ToXContentObject { private final String username; private final char[] password; private final String refreshToken; + private final char[] kerberosTicket; /** * General purpose constructor. This constructor is typically not useful, and one of the following factory methods should be used @@ -48,10 +49,11 @@ public final class CreateTokenRequest implements Validatable, ToXContentObject { *
  • {@link #passwordGrant(String, char[])}
  • *
  • {@link #refreshTokenGrant(String)}
  • *
  • {@link #clientCredentialsGrant()}
  • + *
  • {@link #kerberosGrant(char[])}
  • * */ public CreateTokenRequest(String grantType, @Nullable String scope, @Nullable String username, @Nullable char[] password, - @Nullable String refreshToken) { + @Nullable String refreshToken, @Nullable char[] kerberosTicket) { if (Strings.isNullOrEmpty(grantType)) { throw new IllegalArgumentException("grant_type is required"); } @@ -60,6 +62,7 @@ public final class CreateTokenRequest implements Validatable, ToXContentObject { this.password = password; this.scope = scope; this.refreshToken = refreshToken; + this.kerberosTicket = kerberosTicket; } public static CreateTokenRequest passwordGrant(String username, char[] password) { @@ -69,18 +72,25 @@ public final class CreateTokenRequest implements Validatable, ToXContentObject { if (password == null || password.length == 0) { throw new IllegalArgumentException("password is required"); } - return new CreateTokenRequest("password", null, username, password, null); + return new CreateTokenRequest("password", null, username, password, null, null); } public static CreateTokenRequest refreshTokenGrant(String refreshToken) { if (Strings.isNullOrEmpty(refreshToken)) { throw new IllegalArgumentException("refresh_token is required"); } - return new CreateTokenRequest("refresh_token", null, null, null, refreshToken); + return new CreateTokenRequest("refresh_token", null, null, null, refreshToken, null); } public static CreateTokenRequest clientCredentialsGrant() { - return new CreateTokenRequest("client_credentials", null, null, null, null); + return new CreateTokenRequest("client_credentials", null, null, null, null, null); + } + + public static CreateTokenRequest kerberosGrant(char[] kerberosTicket) { + if (kerberosTicket == null || kerberosTicket.length == 0) { + throw new IllegalArgumentException("kerberos ticket is required"); + } + return new CreateTokenRequest("_kerberos", null, null, null, null, kerberosTicket); } public String getGrantType() { @@ -103,6 +113,10 @@ public final class CreateTokenRequest implements Validatable, ToXContentObject { return refreshToken; } + public char[] getKerberosTicket() { + return kerberosTicket; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject() @@ -124,6 +138,14 @@ public final class CreateTokenRequest implements Validatable, ToXContentObject { if (refreshToken != null) { builder.field("refresh_token", refreshToken); } + if (kerberosTicket != null) { + byte[] kerberosTicketBytes = CharArrays.toUtf8Bytes(kerberosTicket); + try { + builder.field("kerberos_ticket").utf8Value(kerberosTicketBytes, 0, kerberosTicketBytes.length); + } finally { + Arrays.fill(kerberosTicketBytes, (byte) 0); + } + } return builder.endObject(); } @@ -140,13 +162,15 @@ public final class CreateTokenRequest implements Validatable, ToXContentObject { Objects.equals(scope, that.scope) && Objects.equals(username, that.username) && Arrays.equals(password, that.password) && - Objects.equals(refreshToken, that.refreshToken); + Objects.equals(refreshToken, that.refreshToken) && + Arrays.equals(kerberosTicket, that.kerberosTicket); } @Override public int hashCode() { int result = Objects.hash(grantType, scope, username, refreshToken); result = 31 * result + Arrays.hashCode(password); + result = 31 * result + Arrays.hashCode(kerberosTicket); return result; } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/GetBuiltinPrivilegesRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/GetBuiltinPrivilegesRequest.java new file mode 100644 index 00000000000..54366558075 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/GetBuiltinPrivilegesRequest.java @@ -0,0 +1,40 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.apache.http.client.methods.HttpGet; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Validatable; + +/** + * Request object to retrieve the privilege that are builtin to the Elasticsearch cluster. + */ +public final class GetBuiltinPrivilegesRequest implements Validatable { + + public static final GetBuiltinPrivilegesRequest INSTANCE = new GetBuiltinPrivilegesRequest(); + + private GetBuiltinPrivilegesRequest() { + } + + public Request getRequest() { + return new Request(HttpGet.METHOD_NAME, "/_security/privilege/_builtin"); + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/GetBuiltinPrivilegesResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/GetBuiltinPrivilegesResponse.java new file mode 100644 index 00000000000..e7dd223b391 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/GetBuiltinPrivilegesResponse.java @@ -0,0 +1,84 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.Objects; +import java.util.Set; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * Get builtin privileges response + */ +public final class GetBuiltinPrivilegesResponse { + + private final Set clusterPrivileges; + private final Set indexPrivileges; + + public GetBuiltinPrivilegesResponse(Collection cluster, Collection index) { + this.clusterPrivileges = Collections.unmodifiableSet(new HashSet<>(cluster)); + this.indexPrivileges = Collections.unmodifiableSet(new HashSet<>(index)); + } + + public Set getClusterPrivileges() { + return clusterPrivileges; + } + + public Set getIndexPrivileges() { + return indexPrivileges; + } + + public static GetBuiltinPrivilegesResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + GetBuiltinPrivilegesResponse that = (GetBuiltinPrivilegesResponse) o; + return Objects.equals(this.clusterPrivileges, that.clusterPrivileges) + && Objects.equals(this.indexPrivileges, that.indexPrivileges); + } + + @Override + public int hashCode() { + return Objects.hash(clusterPrivileges, indexPrivileges); + } + + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "get_builtin_privileges", true, + args -> new GetBuiltinPrivilegesResponse((Collection) args[0], (Collection) args[1])); + + static { + PARSER.declareStringArray(constructorArg(), new ParseField("cluster")); + PARSER.declareStringArray(constructorArg(), new ParseField("index")); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/Role.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/Role.java index d6b466c0694..9aac6fba284 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/Role.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/Role.java @@ -299,10 +299,12 @@ public final class Role { public static final String NONE = "none"; public static final String ALL = "all"; public static final String MONITOR = "monitor"; + public static final String MONITOR_DATA_FRAME_TRANSFORMS = "monitor_data_frame_transforms"; public static final String MONITOR_ML = "monitor_ml"; public static final String MONITOR_WATCHER = "monitor_watcher"; public static final String MONITOR_ROLLUP = "monitor_rollup"; public static final String MANAGE = "manage"; + public static final String MANAGE_DATA_FRAME_TRANSFORMS = "manage_data_frame_transforms"; public static final String MANAGE_ML = "manage_ml"; public static final String MANAGE_WATCHER = "manage_watcher"; public static final String MANAGE_ROLLUP = "manage_rollup"; @@ -319,8 +321,9 @@ public final class Role { public static final String MANAGE_ILM = "manage_ilm"; public static final String READ_ILM = "read_ilm"; public static final String MANAGE_ENRICH = "manage_enrich"; - public static final String[] ALL_ARRAY = new String[] { NONE, ALL, MONITOR, MONITOR_ML, MONITOR_WATCHER, MONITOR_ROLLUP, MANAGE, - MANAGE_ML, MANAGE_WATCHER, MANAGE_ROLLUP, MANAGE_INDEX_TEMPLATES, MANAGE_INGEST_PIPELINES, TRANSPORT_CLIENT, + public static final String[] ALL_ARRAY = new String[] { NONE, ALL, MONITOR, MONITOR_DATA_FRAME_TRANSFORMS, MONITOR_ML, + MONITOR_WATCHER, MONITOR_ROLLUP, MANAGE, MANAGE_DATA_FRAME_TRANSFORMS, + MANAGE_ML, MANAGE_WATCHER, MANAGE_ROLLUP, MANAGE_INDEX_TEMPLATES, MANAGE_INGEST_PIPELINES, TRANSPORT_CLIENT, MANAGE_SECURITY, MANAGE_SAML, MANAGE_OIDC, MANAGE_TOKEN, MANAGE_PIPELINE, MANAGE_CCR, READ_CCR, MANAGE_ILM, READ_ILM, MANAGE_ENRICH }; } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java index 5ec2265d045..71b6cfe3337 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java @@ -60,6 +60,7 @@ import org.junit.After; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -71,6 +72,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.oneOf; @@ -277,6 +279,7 @@ public class DataFrameTransformIT extends ESRestHighLevelClientTestCase { assertThat(taskState, is(DataFrameTransformTaskState.STOPPED)); } + @SuppressWarnings("unchecked") public void testPreview() throws IOException { String sourceIndex = "transform-source"; createIndex(sourceIndex); @@ -298,6 +301,12 @@ public class DataFrameTransformIT extends ESRestHighLevelClientTestCase { Optional> michel = docs.stream().filter(doc -> "michel".equals(doc.get("reviewer"))).findFirst(); assertTrue(michel.isPresent()); assertEquals(3.6d, (double) michel.get().get("avg_rating"), 0.1d); + + Map mappings = preview.getMappings(); + assertThat(mappings, hasKey("properties")); + Map fields = (Map)mappings.get("properties"); + assertThat(fields.get("reviewer"), equalTo(Collections.singletonMap("type", "keyword"))); + assertThat(fields.get("avg_rating"), equalTo(Collections.singletonMap("type", "double"))); } private DataFrameTransformConfig validDataFrameTransformConfig(String id, String source, String destination) { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index 458e6371010..59d76142566 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -73,6 +73,8 @@ import org.elasticsearch.client.indices.IndexTemplateMetaData; import org.elasticsearch.client.indices.IndexTemplatesExistRequest; import org.elasticsearch.client.indices.PutIndexTemplateRequest; import org.elasticsearch.client.indices.PutMappingRequest; +import org.elasticsearch.client.indices.ReloadAnalyzersRequest; +import org.elasticsearch.client.indices.ReloadAnalyzersResponse; import org.elasticsearch.client.indices.UnfreezeIndexRequest; import org.elasticsearch.client.indices.rollover.RolloverRequest; import org.elasticsearch.client.indices.rollover.RolloverResponse; @@ -1877,4 +1879,14 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { assertTrue(unfreeze.isShardsAcknowledged()); assertTrue(unfreeze.isAcknowledged()); } + + public void testReloadAnalyzer() throws IOException { + createIndex("test", Settings.EMPTY); + RestHighLevelClient client = highLevelClient(); + + ReloadAnalyzersResponse reloadResponse = execute(new ReloadAnalyzersRequest("test"), client.indices()::reloadAnalyzers, + client.indices()::reloadAnalyzersAsync); + assertNotNull(reloadResponse.shards()); + assertTrue(reloadResponse.getReloadedDetails().containsKey("test")); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java index 8f52dd7b00b..d0f41776357 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java @@ -54,6 +54,7 @@ import org.elasticsearch.client.indices.IndexTemplatesExistRequest; import org.elasticsearch.client.indices.PutIndexTemplateRequest; import org.elasticsearch.client.indices.PutMappingRequest; import org.elasticsearch.client.indices.RandomCreateIndexGenerator; +import org.elasticsearch.client.indices.ReloadAnalyzersRequest; import org.elasticsearch.client.indices.rollover.RolloverRequest; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.Strings; @@ -1215,4 +1216,21 @@ public class IndicesRequestConvertersTests extends ESTestCase { Assert.assertThat(request.getParameters(), equalTo(expectedParams)); Assert.assertThat(request.getEntity(), nullValue()); } + + public void testReloadAnalyzers() { + String[] indices = RequestConvertersTests.randomIndicesNames(1, 5); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + if (indices != null && indices.length > 0) { + endpoint.add(String.join(",", indices)); + } + ReloadAnalyzersRequest reloadRequest = new ReloadAnalyzersRequest(indices); + Map expectedParams = new HashMap<>(); + RequestConvertersTests.setRandomIndicesOptions(reloadRequest::setIndicesOptions, reloadRequest::indicesOptions, + expectedParams); + Request request = IndicesRequestConverters.reloadAnalyzers(reloadRequest); + Assert.assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); + Assert.assertThat(request.getEndpoint(), equalTo(endpoint + "/_reload_search_analyzers")); + Assert.assertThat(request.getParameters(), equalTo(expectedParams)); + Assert.assertThat(request.getEntity(), nullValue()); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java index 36d71df5f91..01258d12960 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java @@ -758,11 +758,15 @@ public class MLRequestConvertersTests extends ESTestCase { public void testStopDataFrameAnalytics_WithParams() { StopDataFrameAnalyticsRequest stopRequest = new StopDataFrameAnalyticsRequest(randomAlphaOfLength(10)) .setTimeout(TimeValue.timeValueMinutes(1)) - .setAllowNoMatch(false); + .setAllowNoMatch(false) + .setForce(true); Request request = MLRequestConverters.stopDataFrameAnalytics(stopRequest); assertEquals(HttpPost.METHOD_NAME, request.getMethod()); assertEquals("/_ml/data_frame/analytics/" + stopRequest.getId() + "/_stop", request.getEndpoint()); - assertThat(request.getParameters(), allOf(hasEntry("timeout", "1m"), hasEntry("allow_no_match", "false"))); + assertThat(request.getParameters(), allOf( + hasEntry("timeout", "1m"), + hasEntry("allow_no_match", "false"), + hasEntry("force", "true"))); assertNull(request.getEntity()); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java index 77efe43b2e1..93417cab1b3 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java @@ -1359,6 +1359,7 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase { DataFrameAnalyticsStats stats = statsResponse.getAnalyticsStats().get(0); assertThat(stats.getId(), equalTo(configId)); assertThat(stats.getState(), equalTo(DataFrameAnalyticsState.STOPPED)); + assertNull(stats.getFailureReason()); assertNull(stats.getProgressPercent()); assertNull(stats.getNode()); assertNull(stats.getAssignmentExplanation()); @@ -1405,6 +1406,7 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase { assertTrue(highLevelClient().indices().exists(new GetIndexRequest(destIndex), RequestOptions.DEFAULT)); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/43924") public void testStopDataFrameAnalyticsConfig() throws Exception { String sourceIndex = "stop-test-source-index"; String destIndex = "stop-test-dest-index"; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java index 650bf2e4403..ae1cd5eb45e 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java @@ -141,7 +141,7 @@ public class RestHighLevelClientTests extends ESTestCase { // core "ping", "info", // security - "security.get_ssl_certificates", "security.authenticate", "security.get_user_privileges", + "security.get_ssl_certificates", "security.authenticate", "security.get_user_privileges", "security.get_builtin_privileges", // license "license.get_trial_status", "license.get_basic_status" @@ -824,6 +824,7 @@ public class RestHighLevelClientTests extends ESTestCase { apiName.startsWith("ccr.") == false && apiName.startsWith("data_frame") == false && apiName.endsWith("freeze") == false && + apiName.endsWith("reload_analyzers") == false && // IndicesClientIT.getIndexTemplate should be renamed "getTemplate" in version 8.0 when we // can get rid of 7.0's deprecated "getTemplate" apiName.equals("indices.get_index_template") == false) { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityRequestConvertersTests.java index 1176cabcc3d..55bbbbe14ca 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityRequestConvertersTests.java @@ -316,7 +316,7 @@ public class SecurityRequestConvertersTests extends ESTestCase { assertNull(request.getEntity()); } - public void testGetAllApplicationPrivileges() throws Exception { + public void testGetAllPrivilegesForApplication() throws Exception { final String application = randomAlphaOfLength(6); GetPrivilegesRequest getPrivilegesRequest = GetPrivilegesRequest.getApplicationPrivileges(application); Request request = SecurityRequestConverters.getPrivileges(getPrivilegesRequest); @@ -340,7 +340,7 @@ public class SecurityRequestConvertersTests extends ESTestCase { assertNull(request.getEntity()); } - public void testGetAllPrivileges() throws Exception { + public void testGetAllApplicationPrivileges() throws Exception { GetPrivilegesRequest getPrivilegesRequest = GetPrivilegesRequest.getAllPrivileges(); Request request = SecurityRequestConverters.getPrivileges(getPrivilegesRequest); assertEquals(HttpGet.METHOD_NAME, request.getMethod()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/PreviewDataFrameTransformResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/PreviewDataFrameTransformResponseTests.java index 31f1a26d6f1..28b7e52aac1 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/PreviewDataFrameTransformResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/PreviewDataFrameTransformResponseTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.test.ESTestCase; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -53,8 +54,13 @@ public class PreviewDataFrameTransformResponseTests extends ESTestCase { } docs.add(doc); } + int numMappingEntries = randomIntBetween(5, 10); + Map mappings = new HashMap<>(numMappingEntries); + for (int i = 0; i < numMappingEntries; i++) { + mappings.put(randomAlphaOfLength(10), Collections.singletonMap("type", randomAlphaOfLength(10))); + } - return new PreviewDataFrameTransformResponse(docs); + return new PreviewDataFrameTransformResponse(docs, mappings); } private void toXContent(PreviewDataFrameTransformResponse response, XContentBuilder builder) throws IOException { @@ -64,6 +70,7 @@ public class PreviewDataFrameTransformResponseTests extends ESTestCase { builder.map(doc); } builder.endArray(); + builder.field("mappings", response.getMappings()); builder.endObject(); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformStateTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformStateTests.java index 4ada50c20d2..ebb62890c3c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformStateTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformStateTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.client.dataframe.transforms; import org.elasticsearch.client.core.IndexerState; +import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.test.ESTestCase; @@ -37,7 +38,8 @@ public class DataFrameTransformStateTests extends ESTestCase { DataFrameTransformStateTests::toXContent, DataFrameTransformState::fromXContent) .supportsUnknownFields(true) - .randomFieldsExcludeFilter(field -> field.equals("current_position")) + .randomFieldsExcludeFilter(field -> field.equals("current_position") || + field.equals("node.attributes")) .test(); } @@ -47,7 +49,8 @@ public class DataFrameTransformStateTests extends ESTestCase { randomPositionMap(), randomLongBetween(0,10), randomBoolean() ? null : randomAlphaOfLength(10), - randomBoolean() ? null : DataFrameTransformProgressTests.randomInstance()); + randomBoolean() ? null : DataFrameTransformProgressTests.randomInstance(), + randomBoolean() ? null : NodeAttributesTests.createRandom()); } public static void toXContent(DataFrameTransformState state, XContentBuilder builder) throws IOException { @@ -65,6 +68,10 @@ public class DataFrameTransformStateTests extends ESTestCase { builder.field("progress"); DataFrameTransformProgressTests.toXContent(state.getProgress(), builder); } + if (state.getNode() != null) { + builder.field("node"); + state.getNode().toXContent(builder, ToXContent.EMPTY_PARAMS); + } builder.endObject(); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/NodeAttributesTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/NodeAttributesTests.java new file mode 100644 index 00000000000..661aa9f7a30 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/NodeAttributesTests.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.dataframe.transforms; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Predicate; + +public class NodeAttributesTests extends AbstractXContentTestCase { + + public static NodeAttributes createRandom() { + int numberOfAttributes = randomIntBetween(1, 10); + Map attributes = new HashMap<>(numberOfAttributes); + for(int i = 0; i < numberOfAttributes; i++) { + String val = randomAlphaOfLength(10); + attributes.put("key-"+i, val); + } + return new NodeAttributes(randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomAlphaOfLength(10), + attributes); + } + + @Override + protected NodeAttributes createTestInstance() { + return createRandom(); + } + + @Override + protected NodeAttributes doParseInstance(XContentParser parser) throws IOException { + return NodeAttributes.PARSER.parse(parser, null); + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + return field -> !field.isEmpty(); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformStateAndStatsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformStateAndStatsTests.java index ad08881fb56..dde44898bf9 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformStateAndStatsTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformStateAndStatsTests.java @@ -64,7 +64,7 @@ public class DataFrameTransformStateAndStatsTests extends AbstractHlrcXContentTe @Override protected Predicate getRandomFieldsExcludeFilter() { - return field -> field.equals("state.current_position"); + return field -> field.equals("state.current_position") || field.equals("state.node") || field.equals("state.node.attributes"); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformStateTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformStateTests.java index 4c80365bc53..b97e0a72c1f 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformStateTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformStateTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformProgr import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformState; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStateAndStats; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformTaskState; +import org.elasticsearch.xpack.core.dataframe.transforms.NodeAttributes; import org.elasticsearch.xpack.core.indexing.IndexerState; import java.io.IOException; @@ -40,8 +41,20 @@ public class DataFrameTransformStateTests extends AbstractHlrcXContentTestCase getRandomFieldsExcludeFilter() { - return field -> field.equals("current_position"); + return field -> field.equals("current_position") || field.equals("node.attributes"); } public static DataFrameTransformStateAndStats randomDataFrameTransformStateAndStats(String id) { @@ -97,6 +110,20 @@ public class DataFrameTransformStateTests extends AbstractHlrcXContentTestCase attributes = new HashMap<>(numberOfAttributes); + for(int i = 0; i < numberOfAttributes; i++) { + String val = randomAlphaOfLength(10); + attributes.put("key-"+i, val); + } + return new NodeAttributes(randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomAlphaOfLength(10), + attributes); + } + public static DataFrameIndexerTransformStats randomStats(String transformId) { return new DataFrameIndexerTransformStats(transformId, randomLongBetween(10L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), @@ -110,7 +137,8 @@ public class DataFrameTransformStateTests extends AbstractHlrcXContentTestCase randomPosition() { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java index 1301a952676..731d42f902c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java @@ -45,6 +45,7 @@ import org.elasticsearch.client.dataframe.transforms.DataFrameTransformProgress; import org.elasticsearch.client.dataframe.transforms.DataFrameTransformStateAndStats; import org.elasticsearch.client.dataframe.transforms.DataFrameTransformTaskState; import org.elasticsearch.client.dataframe.transforms.DestConfig; +import org.elasticsearch.client.dataframe.transforms.NodeAttributes; import org.elasticsearch.client.dataframe.transforms.QueryConfig; import org.elasticsearch.client.dataframe.transforms.SourceConfig; import org.elasticsearch.client.dataframe.transforms.pivot.AggregationConfig; @@ -447,6 +448,7 @@ public class DataFrameTransformDocumentationIT extends ESRestHighLevelClientTest // end::preview-data-frame-transform-execute assertNotNull(response.getDocs()); + assertNotNull(response.getMappings()); } { // tag::preview-data-frame-transform-execute-listener @@ -532,6 +534,8 @@ public class DataFrameTransformDocumentationIT extends ESRestHighLevelClientTest stateAndStats.getTransformStats(); // <4> DataFrameTransformProgress progress = stateAndStats.getTransformState().getProgress(); // <5> + NodeAttributes node = + stateAndStats.getTransformState().getNode(); // <6> // end::get-data-frame-transform-stats-response assertEquals(IndexerState.STOPPED, indexerState); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java index 8e0a3d2fd00..f878f0f6f7d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java @@ -58,6 +58,7 @@ import org.elasticsearch.client.GetAliasesResponse; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.client.SyncedFlushResponse; +import org.elasticsearch.client.core.BroadcastResponse.Shards; import org.elasticsearch.client.core.ShardsAcknowledgedResponse; import org.elasticsearch.client.indices.AnalyzeRequest; import org.elasticsearch.client.indices.AnalyzeResponse; @@ -77,6 +78,9 @@ import org.elasticsearch.client.indices.IndexTemplateMetaData; import org.elasticsearch.client.indices.IndexTemplatesExistRequest; import org.elasticsearch.client.indices.PutIndexTemplateRequest; import org.elasticsearch.client.indices.PutMappingRequest; +import org.elasticsearch.client.indices.ReloadAnalyzersRequest; +import org.elasticsearch.client.indices.ReloadAnalyzersResponse; +import org.elasticsearch.client.indices.ReloadAnalyzersResponse.ReloadDetails; import org.elasticsearch.client.indices.UnfreezeIndexRequest; import org.elasticsearch.client.indices.rollover.RolloverRequest; import org.elasticsearch.client.indices.rollover.RolloverResponse; @@ -2748,4 +2752,77 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase assertTrue(latch.await(30L, TimeUnit.SECONDS)); } + + public void testReloadSearchAnalyzers() throws Exception { + RestHighLevelClient client = highLevelClient(); + { + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index"), RequestOptions.DEFAULT); + assertTrue(createIndexResponse.isAcknowledged()); + } + + { + // tag::reload-analyzers-request + ReloadAnalyzersRequest request = new ReloadAnalyzersRequest("index"); // <1> + // end::reload-analyzers-request + + // tag::reload-analyzers-request-indicesOptions + request.setIndicesOptions(IndicesOptions.strictExpandOpen()); // <1> + // end::reload-analyzers-request-indicesOptions + + // tag::reload-analyzers-execute + ReloadAnalyzersResponse reloadResponse = client.indices().reloadAnalyzers(request, RequestOptions.DEFAULT); + // end::reload-analyzers-execute + + // tag::reload-analyzers-response + Shards shards = reloadResponse.shards(); // <1> + Map reloadDetails = reloadResponse.getReloadedDetails(); // <2> + ReloadDetails details = reloadDetails.get("index"); // <3> + String indexName = details.getIndexName(); // <4> + Set indicesNodes = details.getReloadedIndicesNodes(); // <5> + Set analyzers = details.getReloadedAnalyzers(); // <6> + // end::reload-analyzers-response + assertNotNull(shards); + assertEquals("index", indexName); + assertEquals(1, indicesNodes.size()); + assertEquals(0, analyzers.size()); + + // tag::reload-analyzers-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(ReloadAnalyzersResponse reloadResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::reload-analyzers-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::reload-analyzers-execute-async + client.indices().reloadAnalyzersAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::reload-analyzers-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + + { + // tag::reload-analyzers-notfound + try { + ReloadAnalyzersRequest request = new ReloadAnalyzersRequest("does_not_exist"); + client.indices().reloadAnalyzers(request, RequestOptions.DEFAULT); + } catch (ElasticsearchException exception) { + if (exception.status() == RestStatus.BAD_REQUEST) { + // <1> + } + } + // end::reload-analyzers-notfound + } + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java index 5c9017b7706..033861563dc 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java @@ -3110,6 +3110,7 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { { // tag::stop-data-frame-analytics-request StopDataFrameAnalyticsRequest request = new StopDataFrameAnalyticsRequest("my-analytics-config"); // <1> + request.setForce(false); // <2> // end::stop-data-frame-analytics-request // tag::stop-data-frame-analytics-execute diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java index c1add98084f..2230f925a39 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java @@ -50,6 +50,7 @@ import org.elasticsearch.client.security.EnableUserRequest; import org.elasticsearch.client.security.ExpressionRoleMapping; import org.elasticsearch.client.security.GetApiKeyRequest; import org.elasticsearch.client.security.GetApiKeyResponse; +import org.elasticsearch.client.security.GetBuiltinPrivilegesResponse; import org.elasticsearch.client.security.GetPrivilegesRequest; import org.elasticsearch.client.security.GetPrivilegesResponse; import org.elasticsearch.client.security.GetRoleMappingsRequest; @@ -118,6 +119,7 @@ import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.emptyIterable; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.isIn; import static org.hamcrest.Matchers.iterableWithSize; @@ -1497,6 +1499,60 @@ public class SecurityDocumentationIT extends ESRestHighLevelClientTestCase { } } + public void testGetBuiltinPrivileges() throws Exception { + final RestHighLevelClient client = highLevelClient(); + { + //tag::get-builtin-privileges-execute + GetBuiltinPrivilegesResponse response = client.security().getBuiltinPrivileges(RequestOptions.DEFAULT); + //end::get-builtin-privileges-execute + + assertNotNull(response); + //tag::get-builtin-privileges-response + final Set cluster = response.getClusterPrivileges(); + final Set index = response.getIndexPrivileges(); + //end::get-builtin-privileges-response + + assertThat(cluster, hasItem("all")); + assertThat(cluster, hasItem("manage")); + assertThat(cluster, hasItem("monitor")); + assertThat(cluster, hasItem("manage_security")); + + assertThat(index, hasItem("all")); + assertThat(index, hasItem("manage")); + assertThat(index, hasItem("monitor")); + assertThat(index, hasItem("read")); + assertThat(index, hasItem("write")); + } + { + // tag::get-builtin-privileges-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(GetBuiltinPrivilegesResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::get-builtin-privileges-execute-listener + + // Replace the empty listener by a blocking listener in test + final PlainActionFuture future = new PlainActionFuture<>(); + listener = future; + + // tag::get-builtin-privileges-execute-async + client.security().getBuiltinPrivilegesAsync(RequestOptions.DEFAULT, listener); // <1> + // end::get-builtin-privileges-execute-async + + final GetBuiltinPrivilegesResponse response = future.get(30, TimeUnit.SECONDS); + assertNotNull(response); + assertThat(response.getClusterPrivileges(), hasItem("manage_security")); + assertThat(response.getIndexPrivileges(), hasItem("read")); + } + } + public void testGetPrivileges() throws Exception { final RestHighLevelClient client = highLevelClient(); final ApplicationPrivilege readTestappPrivilege = @@ -1556,9 +1612,9 @@ public class SecurityDocumentationIT extends ESRestHighLevelClientTestCase { assertNotNull(response); assertThat(response.getPrivileges().size(), equalTo(3)); - final GetPrivilegesResponse exptectedResponse = + final GetPrivilegesResponse expectedResponse = new GetPrivilegesResponse(Arrays.asList(readTestappPrivilege, writeTestappPrivilege, allTestappPrivilege)); - assertThat(response, equalTo(exptectedResponse)); + assertThat(response, equalTo(expectedResponse)); //tag::get-privileges-response Set privileges = response.getPrivileges(); //end::get-privileges-response diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/ReloadAnalyzersResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/ReloadAnalyzersResponseTests.java new file mode 100644 index 00000000000..6719e10808e --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/ReloadAnalyzersResponseTests.java @@ -0,0 +1,111 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indices; + +import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.client.AbstractResponseTestCase; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.seqno.RetentionLeaseNotFoundException; +import org.elasticsearch.xpack.core.action.ReloadAnalyzersResponse.ReloadDetails; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.isIn; + +public class ReloadAnalyzersResponseTests + extends AbstractResponseTestCase { + + private String index; + private String id; + private Set shardIds; + + @Override + protected org.elasticsearch.xpack.core.action.ReloadAnalyzersResponse createServerTestInstance() { + index = randomAlphaOfLength(8); + id = randomAlphaOfLength(8); + final int total = randomIntBetween(1, 16); + final int successful = total - scaledRandomIntBetween(0, total); + final int failed = scaledRandomIntBetween(0, total - successful); + final List failures = new ArrayList<>(); + shardIds = new HashSet<>(); + for (int i = 0; i < failed; i++) { + final DefaultShardOperationFailedException failure = new DefaultShardOperationFailedException( + index, + randomValueOtherThanMany(shardIds::contains, () -> randomIntBetween(0, total - 1)), + new RetentionLeaseNotFoundException(id)); + failures.add(failure); + shardIds.add(failure.shardId()); + } + Map reloadedDetailsMap = new HashMap<>(); + int randomIndices = randomIntBetween(0, 5); + for (int i = 0; i < randomIndices; i++) { + String indexName = randomAlphaOfLengthBetween(5, 10); + Set randomNodeIds = new HashSet<>(Arrays.asList(generateRandomStringArray(5, 5, false, true))); + Set randomAnalyzers = new HashSet<>(Arrays.asList(generateRandomStringArray(5, 5, false, true))); + + ReloadDetails reloadedDetails = new ReloadDetails(indexName, randomNodeIds, randomAnalyzers); + reloadedDetailsMap.put(indexName, reloadedDetails); + } + return new org.elasticsearch.xpack.core.action.ReloadAnalyzersResponse(total, successful, failed, failures, reloadedDetailsMap); + } + + @Override + protected ReloadAnalyzersResponse doParseToClientInstance(XContentParser parser) throws IOException { + return ReloadAnalyzersResponse.fromXContent(parser); + } + + @Override + protected void assertInstances(org.elasticsearch.xpack.core.action.ReloadAnalyzersResponse serverTestInstance, + ReloadAnalyzersResponse clientInstance) { + assertThat(clientInstance.shards().total(), equalTo(serverTestInstance.getTotalShards())); + assertThat(clientInstance.shards().successful(), equalTo(serverTestInstance.getSuccessfulShards())); + assertThat(clientInstance.shards().skipped(), equalTo(0)); + assertThat(clientInstance.shards().failed(), equalTo(serverTestInstance.getFailedShards())); + assertThat(clientInstance.shards().failures(), hasSize(clientInstance.shards().failed() == 0 ? 0 : 1)); // failures are grouped + if (clientInstance.shards().failed() > 0) { + final DefaultShardOperationFailedException groupedFailure = clientInstance.shards().failures().iterator().next(); + assertThat(groupedFailure.index(), equalTo(index)); + assertThat(groupedFailure.shardId(), isIn(shardIds)); + assertThat(groupedFailure.reason(), containsString("reason=retention lease with ID [" + id + "] not found")); + } + Map serverDetails = serverTestInstance.getReloadDetails(); + assertThat(clientInstance.getReloadedDetails().size(), equalTo(serverDetails.size())); + for (Entry entry : clientInstance + .getReloadedDetails().entrySet()) { + String indexName = entry.getKey(); + assertTrue(serverDetails.keySet().contains(indexName)); + assertEquals(serverDetails.get(indexName).getIndexName(), entry.getValue().getIndexName()); + assertEquals(serverDetails.get(indexName).getReloadedAnalyzers(), entry.getValue().getReloadedAnalyzers()); + assertEquals(serverDetails.get(indexName).getReloadedIndicesNodes(), entry.getValue().getReloadedIndicesNodes()); + } + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsStatsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsStatsTests.java index ed6e24f754d..fad02eac161 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsStatsTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsStatsTests.java @@ -43,6 +43,7 @@ public class DataFrameAnalyticsStatsTests extends ESTestCase { return new DataFrameAnalyticsStats( randomAlphaOfLengthBetween(1, 10), randomFrom(DataFrameAnalyticsState.values()), + randomBoolean() ? null : randomAlphaOfLength(10), randomBoolean() ? null : randomIntBetween(0, 100), randomBoolean() ? null : NodeAttributesTests.createRandom(), randomBoolean() ? null : randomAlphaOfLengthBetween(1, 20)); @@ -52,6 +53,9 @@ public class DataFrameAnalyticsStatsTests extends ESTestCase { builder.startObject(); builder.field(DataFrameAnalyticsStats.ID.getPreferredName(), stats.getId()); builder.field(DataFrameAnalyticsStats.STATE.getPreferredName(), stats.getState().value()); + if (stats.getFailureReason() != null) { + builder.field(DataFrameAnalyticsStats.FAILURE_REASON.getPreferredName(), stats.getFailureReason()); + } if (stats.getProgressPercent() != null) { builder.field(DataFrameAnalyticsStats.PROGRESS_PERCENT.getPreferredName(), stats.getProgressPercent()); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/CreateTokenRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/CreateTokenRequestTests.java index 53f3e1d0f36..760d5e52cb3 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/CreateTokenRequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/CreateTokenRequestTests.java @@ -66,31 +66,54 @@ public class CreateTokenRequestTests extends ESTestCase { assertThat(Strings.toString(request), equalTo("{\"grant_type\":\"client_credentials\"}")); } + public void testCreateTokenFromKerberosTicket() { + final CreateTokenRequest request = CreateTokenRequest.kerberosGrant("top secret kerberos ticket".toCharArray()); + assertThat(request.getGrantType(), equalTo("_kerberos")); + assertThat(request.getScope(), nullValue()); + assertThat(request.getUsername(), nullValue()); + assertThat(request.getPassword(), nullValue()); + assertThat(request.getRefreshToken(), nullValue()); + assertThat(new String(request.getKerberosTicket()), equalTo("top secret kerberos ticket")); + assertThat(Strings.toString(request), equalTo("{\"grant_type\":\"_kerberos\"," + + "\"kerberos_ticket\":\"top secret kerberos ticket\"}")); + } + public void testEqualsAndHashCode() { final String grantType = randomAlphaOfLength(8); final String scope = randomBoolean() ? null : randomAlphaOfLength(6); final String username = randomBoolean() ? null : randomAlphaOfLengthBetween(4, 10); final char[] password = randomBoolean() ? null : randomAlphaOfLengthBetween(8, 12).toCharArray(); final String refreshToken = randomBoolean() ? null : randomAlphaOfLengthBetween(12, 24); - final CreateTokenRequest request = new CreateTokenRequest(grantType, scope, username, password, refreshToken); + final char[] kerberosTicket = randomBoolean() ? null : randomAlphaOfLengthBetween(8, 12).toCharArray(); + final CreateTokenRequest request = new CreateTokenRequest(grantType, scope, username, password, refreshToken, kerberosTicket); EqualsHashCodeTestUtils.checkEqualsAndHashCode(request, - r -> new CreateTokenRequest(r.getGrantType(), r.getScope(), r.getUsername(), r.getPassword(), r.getRefreshToken()), + r -> new CreateTokenRequest(r.getGrantType(), r.getScope(), r.getUsername(), r.getPassword(), + r.getRefreshToken(), r.getKerberosTicket()), this::mutate); } private CreateTokenRequest mutate(CreateTokenRequest req) { - switch (randomIntBetween(1, 5)) { - case 1: - return new CreateTokenRequest("g", req.getScope(), req.getUsername(), req.getPassword(), req.getRefreshToken()); - case 2: - return new CreateTokenRequest(req.getGrantType(), "s", req.getUsername(), req.getPassword(), req.getRefreshToken()); - case 3: - return new CreateTokenRequest(req.getGrantType(), req.getScope(), "u", req.getPassword(), req.getRefreshToken()); - case 4: - final char[] password = {'p'}; - return new CreateTokenRequest(req.getGrantType(), req.getScope(), req.getUsername(), password, req.getRefreshToken()); - case 5: - return new CreateTokenRequest(req.getGrantType(), req.getScope(), req.getUsername(), req.getPassword(), "r"); + switch (randomIntBetween(1, 6)) { + case 1: + return new CreateTokenRequest("g", req.getScope(), req.getUsername(), req.getPassword(), req.getRefreshToken(), + req.getKerberosTicket()); + case 2: + return new CreateTokenRequest(req.getGrantType(), "s", req.getUsername(), req.getPassword(), req.getRefreshToken(), + req.getKerberosTicket()); + case 3: + return new CreateTokenRequest(req.getGrantType(), req.getScope(), "u", req.getPassword(), req.getRefreshToken(), + req.getKerberosTicket()); + case 4: + final char[] password = { 'p' }; + return new CreateTokenRequest(req.getGrantType(), req.getScope(), req.getUsername(), password, req.getRefreshToken(), + req.getKerberosTicket()); + case 5: + final char[] kerberosTicket = { 'k' }; + return new CreateTokenRequest(req.getGrantType(), req.getScope(), req.getUsername(), req.getPassword(), req.getRefreshToken(), + kerberosTicket); + case 6: + return new CreateTokenRequest(req.getGrantType(), req.getScope(), req.getUsername(), req.getPassword(), "r", + req.getKerberosTicket()); } throw new IllegalStateException("Bad random number"); } diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc index 14bdc01932a..c97bb7a6138 100644 --- a/docs/Versions.asciidoc +++ b/docs/Versions.asciidoc @@ -1,8 +1,8 @@ -:version: 7.3.0 +:version: 7.4.0 //// bare_version never includes -alpha or -beta //// -:bare_version: 7.3.0 +:bare_version: 7.4.0 :major-version: 7.x :prev-major-version: 6.x :lucene_version: 8.1.0 diff --git a/docs/java-rest/high-level/dataframe/get_data_frame_stats.asciidoc b/docs/java-rest/high-level/dataframe/get_data_frame_stats.asciidoc index 4360157b4a4..8a3e1a96acb 100644 --- a/docs/java-rest/high-level/dataframe/get_data_frame_stats.asciidoc +++ b/docs/java-rest/high-level/dataframe/get_data_frame_stats.asciidoc @@ -52,4 +52,5 @@ include-tagged::{doc-tests-file}[{api}-response] <3> The running state of the transform indexer e.g `started`, `indexing`, etc. <4> The overall transform statistics recording the number of documents indexed etc. <5> The progress of the current run in the transform. Supplies the number of docs left until the next checkpoint -and the total number of docs expected. \ No newline at end of file +and the total number of docs expected. +<6> The assigned node information if the task is currently assigned to a node and running. \ No newline at end of file diff --git a/docs/java-rest/high-level/indices/reload_analyzers.asciidoc b/docs/java-rest/high-level/indices/reload_analyzers.asciidoc new file mode 100644 index 00000000000..29db206bf14 --- /dev/null +++ b/docs/java-rest/high-level/indices/reload_analyzers.asciidoc @@ -0,0 +1,50 @@ +-- +:api: reload-analyzers +:request: ReloadAnalyzersRequest +:response: ReloadAnalyzersResponse +-- + +[id="{upid}-{api}"] +=== Reload Search Analyzers API + +[id="{upid}-{api}-request"] +==== Reload Search Analyzers Request + +An +{request}+ requires an `index` argument: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- +<1> The index to reload + +==== Optional arguments +The following arguments can optionally be provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request-indicesOptions] +-------------------------------------------------- +<1> Setting `IndicesOptions` controls how unavailable indices are resolved and +how wildcard expressions are expanded + +include::../execution.asciidoc[] + +[id="{upid}-{api}-response"] +==== Reload Search Analyzers Response + +The returned +{response}+ allows to retrieve information about the +executed operation as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> Shard statistics. Note that reloading does not happen on each shard of an +index, but once on each node the index has shards on. The reported shard count +can therefore differ from the number of index shards +<2> Reloading details of all indices the request was executed on +<3> Details can be retrieved by index name +<4> The reloaded index name +<5> The nodes the index was reloaded on +<6> The analyzer names that were reloaded diff --git a/docs/java-rest/high-level/ml/stop-data-frame-analytics.asciidoc b/docs/java-rest/high-level/ml/stop-data-frame-analytics.asciidoc index 243c075e18b..3a06f268836 100644 --- a/docs/java-rest/high-level/ml/stop-data-frame-analytics.asciidoc +++ b/docs/java-rest/high-level/ml/stop-data-frame-analytics.asciidoc @@ -19,6 +19,7 @@ A +{request}+ object requires a {dataframe-analytics-config} id. include-tagged::{doc-tests-file}[{api}-request] --------------------------------------------------- <1> Constructing a new stop request referencing an existing {dataframe-analytics-config} +<2> Optionally used to stop a failed task include::../execution.asciidoc[] diff --git a/docs/java-rest/high-level/security/get-builtin-privileges.asciidoc b/docs/java-rest/high-level/security/get-builtin-privileges.asciidoc new file mode 100644 index 00000000000..e8eeb7b3c94 --- /dev/null +++ b/docs/java-rest/high-level/security/get-builtin-privileges.asciidoc @@ -0,0 +1,27 @@ +-- +:api: get-builtin-privileges +:request: GetBuiltinPrivilegesRequest +:response: GetBuiltinPrivilegesResponse +-- + +[id="{upid}-{api}"] +=== Get Builtin Privileges API + +include::../execution-no-req.asciidoc[] + +[id="{upid}-{api}-response"] +==== Get Builtin Privileges Response + +The returned +{response}+ contains the following properties + +`clusterPrivileges`:: +A `Set` of all _cluster_ privileges that are understood by this node. + +`indexPrivileges`:: +A `Set` of all _index_ privileges that are understood by this node. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- + diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 21ebdfab651..27f5f38136d 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -155,6 +155,7 @@ include::indices/get_index.asciidoc[] include::indices/freeze_index.asciidoc[] include::indices/unfreeze_index.asciidoc[] include::indices/delete_template.asciidoc[] +include::indices/reload_analyzers.asciidoc[] == Cluster APIs @@ -418,6 +419,7 @@ The Java High Level REST Client supports the following Security APIs: * <> * <> * <<{upid}-invalidate-token>> +* <<{upid}-get-builtin-privileges>> * <<{upid}-get-privileges>> * <<{upid}-put-privileges>> * <<{upid}-delete-privileges>> @@ -435,6 +437,7 @@ include::security/put-role.asciidoc[] include::security/get-roles.asciidoc[] include::security/delete-role.asciidoc[] include::security/delete-privileges.asciidoc[] +include::security/get-builtin-privileges.asciidoc[] include::security/get-privileges.asciidoc[] include::security/clear-roles-cache.asciidoc[] include::security/clear-realm-cache.asciidoc[] @@ -582,4 +585,4 @@ include::dataframe/put_data_frame.asciidoc[] include::dataframe/delete_data_frame.asciidoc[] include::dataframe/preview_data_frame.asciidoc[] include::dataframe/start_data_frame.asciidoc[] -include::dataframe/stop_data_frame.asciidoc[] \ No newline at end of file +include::dataframe/stop_data_frame.asciidoc[] diff --git a/docs/painless/painless-guide/painless-datetime.asciidoc b/docs/painless/painless-guide/painless-datetime.asciidoc index 5e98b39aaa7..45eb34b7572 100644 --- a/docs/painless/painless-guide/painless-datetime.asciidoc +++ b/docs/painless/painless-guide/painless-datetime.asciidoc @@ -29,7 +29,7 @@ complex:: a datetime representation as a complex type (<>) that abstracts away internal details of how the datetime is stored and often provides utilities for modification and comparison; in Painless this is typically a -<> +<> Switching between different representations of datetimes is often necessary to achieve a script's objective(s). A typical pattern in a script is to switch a @@ -335,6 +335,43 @@ if (zdt1.isAfter(zdt2)) { } ---- +==== Datetime Zone + +Both string datetimes and complex datetimes have a timezone with a default of +`UTC`. Numeric datetimes do not have enough explicit information to +have a timezone, so `UTC` is always assumed. Use +<> (or fields) in +conjunction with a <> to change +the timezone for a complex datetime. Parse a string datetime into a complex +datetime to change the timezone, and then format the complex datetime back into +a desired string datetime. Note many complex datetimes are immutable so upon +modification a new complex datetime is created that requires +<> or immediate use. + +===== Datetime Zone Examples + +* Modify the timezone for a complex datetime ++ +[source,Painless] +---- +ZonedDateTime utc = + ZonedDateTime.of(1983, 10, 13, 22, 15, 30, 0, ZoneId.of('Z')); +ZonedDateTime pst = utc.withZoneSameInstant(ZoneId.of('America/Los_Angeles')); +---- ++ +* Modify the timezone for a string datetime ++ +[source,Painless] +---- +String gmtString = 'Thu, 13 Oct 1983 22:15:30 GMT'; +ZonedDateTime gmtZdt = ZonedDateTime.parse(gmtString, + DateTimeFormatter.RFC_1123_DATE_TIME); <1> +ZonedDateTime pstZdt = + gmtZdt.withZoneSameInstant(ZoneId.of('America/Los_Angeles')); +String pstString = pstZdt.format(DateTimeFormatter.RFC_1123_DATE_TIME); +---- +<1> Note the use of a built-in DateTimeFormatter. + ==== Datetime Input There are several common ways datetimes are used as input for a script @@ -372,7 +409,7 @@ through an input called `params`. + [source,Painless] ---- -long inputDatetime = params['input_datetime']; +long inputDateTime = params['input_datetime']; Instant instant = Instant.ofEpochMilli(inputDateTime); ZonedDateTime zdt = ZonedDateTime.ofInstant(instant, ZoneId.of('Z')); ---- @@ -432,7 +469,7 @@ source document is most commonly accessible through an input called + [source,Painless] ---- -long inputDatetime = ctx['_source']['input_datetime']; <1> +long inputDateTime = ctx['_source']['input_datetime']; <1> Instant instant = Instant.ofEpochMilli(inputDateTime); ZonedDateTime zdt = ZonedDateTime.ofInstant(instant, ZoneId.of('Z')); ---- @@ -513,10 +550,9 @@ String output = input.format(DateTimeFormatter.ISO_INSTANT); <1> + ** Assumptions: + -*** The fields `start_datetime` and `end_datetime` may *not* exist in all -indexes as part of the query -*** The fields `start_datetime` and `end_datetime` may *not* have values in all -indexed documents +*** The fields `start` and `end` may *not* exist in all indexes as part of the +query +*** The fields `start` and `end` may *not* have values in all indexed documents + ** Mappings: + @@ -527,10 +563,10 @@ indexed documents ... "properties": { ... - "start_datetime": { + "start": { "type": "date" }, - "end_datetime": { + "end": { "type": "date" } ... @@ -544,14 +580,13 @@ indexed documents + [source,Painless] ---- -if (doc.containsKey('start_datetime') && doc.containsKey('end_datetime')) { <1> +if (doc.containsKey('start') && doc.containsKey('end')) { <1> - if (doc['start_datetime'].size() > 0 && doc['end_datetime'].size() > 0) { <2> + if (doc['start'].size() > 0 && doc['end'].size() > 0) { <2> - def startDatetime = doc['start_datetime'].value; - def endDatetime = doc['end_datetime'].value; - long differenceInMillis = - ChronoUnit.MILLIS.between(startDateTime, endDateTime); + def start = doc['start'].value; + def end = doc['end'].value; + long differenceInMillis = ChronoUnit.MILLIS.between(start, end); // handle difference in times } else { @@ -564,6 +599,122 @@ if (doc.containsKey('start_datetime') && doc.containsKey('end_datetime')) { <1> <1> When a query's results span multiple indexes, some indexes may not contain a specific field. Use the `containsKey` method call on the `doc` input to ensure a field exists as part of the index for the current document. -<2> Some field's within a document may have no values. Use the `size` method +<2> Some fields within a document may have no values. Use the `size` method call on a field within the `doc` input to ensure that field has at least one value for the current document. + +==== Datetime Now + +Under most Painless contexts the current datetime, `now`, is not supported. +There are two primary reasons for this. The first is scripts are often run once +per document, so each time the script is run a different `now` is returned. The +second is scripts are often run in a distributed fashion without a way to +appropriately synchronize `now`. Instead, pass in a user-defined parameter with +either a string datetime or numeric datetime for `now`. A numeric datetime is +preferred as there is no need to parse it for comparision. + +===== Datetime Now Examples + +* Use a numeric datetime as `now` ++ +** Assumptions: ++ +*** The field `input_datetime` exists in all indexes as part of the query +*** All indexed documents contain the field `input_datetime` ++ +** Mappings: ++ +[source,JSON] +---- +{ + "mappings": { + ... + "properties": { + ... + "input_datetime": { + "type": "date" + } + ... + } + ... + } +} +---- ++ +** Input: ++ +[source,JSON] +---- +... +"script": { + ... + "params": { + "now": + } +} +... +---- ++ +** Script: ++ +[source,Painless] +---- +long now = params['now']; +def inputDateTime = doc['input_datetime']; +long millisDateTime = zdt.toInstant().toEpochMilli(); +long elapsedTime = now - millisDateTime; +---- ++ +* Use a string datetime as `now` ++ +** Assumptions: ++ +*** The field `input_datetime` exists in all indexes as part of the query +*** All indexed documents contain the field `input_datetime` ++ +** Mappings: ++ +[source,JSON] +---- +{ + "mappings": { + ... + "properties": { + ... + "input_datetime": { + "type": "date" + } + ... + } + ... + } +} +---- ++ +** Input: ++ +[source,JSON] +---- +... +"script": { + ... + "params": { + "now": "" + } +} +... +---- ++ +** Script: ++ +[source,Painless] +---- +String nowString = params['now']; +ZonedDateTime nowZdt = ZonedDateTime.parse(datetime); <1> +long now = ZonedDateTime.toInstant().toEpochMilli(); +def inputDateTime = doc['input_datetime']; +long millisDateTime = zdt.toInstant().toEpochMilli(); +long elapsedTime = now - millisDateTime; +---- +<1> Note this parses the same string datetime every time the script runs. Use a +numeric datetime to avoid a significant performance hit. diff --git a/docs/reference/aggregations/bucket.asciidoc b/docs/reference/aggregations/bucket.asciidoc index b9fbddc65c1..9f186ef1ffb 100644 --- a/docs/reference/aggregations/bucket.asciidoc +++ b/docs/reference/aggregations/bucket.asciidoc @@ -55,6 +55,8 @@ include::bucket/parent-aggregation.asciidoc[] include::bucket/range-aggregation.asciidoc[] +include::bucket/rare-terms-aggregation.asciidoc[] + include::bucket/reverse-nested-aggregation.asciidoc[] include::bucket/sampler-aggregation.asciidoc[] @@ -64,3 +66,4 @@ include::bucket/significantterms-aggregation.asciidoc[] include::bucket/significanttext-aggregation.asciidoc[] include::bucket/terms-aggregation.asciidoc[] + diff --git a/docs/reference/aggregations/bucket/rare-terms-aggregation.asciidoc b/docs/reference/aggregations/bucket/rare-terms-aggregation.asciidoc new file mode 100644 index 00000000000..e2537b61aef --- /dev/null +++ b/docs/reference/aggregations/bucket/rare-terms-aggregation.asciidoc @@ -0,0 +1,357 @@ +[[search-aggregations-bucket-rare-terms-aggregation]] +=== Rare Terms Aggregation + +A multi-bucket value source based aggregation which finds "rare" terms -- terms that are at the long-tail +of the distribution and are not frequent. Conceptually, this is like a `terms` aggregation that is +sorted by `_count` ascending. As noted in the <>, +actually ordering a `terms` agg by count ascending has unbounded error. Instead, you should use the `rare_terms` +aggregation + +////////////////////////// + +[source,js] +-------------------------------------------------- +PUT /products +{ + "mappings": { + "properties": { + "genre": { + "type": "keyword" + }, + "product": { + "type": "keyword" + } + } + } +} + +POST /products/_doc/_bulk?refresh +{"index":{"_id":0}} +{"genre": "rock", "product": "Product A"} +{"index":{"_id":1}} +{"genre": "rock"} +{"index":{"_id":2}} +{"genre": "rock"} +{"index":{"_id":3}} +{"genre": "jazz", "product": "Product Z"} +{"index":{"_id":4}} +{"genre": "jazz"} +{"index":{"_id":5}} +{"genre": "electronic"} +{"index":{"_id":6}} +{"genre": "electronic"} +{"index":{"_id":7}} +{"genre": "electronic"} +{"index":{"_id":8}} +{"genre": "electronic"} +{"index":{"_id":9}} +{"genre": "electronic"} +{"index":{"_id":10}} +{"genre": "swing"} + +------------------------------------------------- +// NOTCONSOLE +// TESTSETUP + +////////////////////////// + +==== Syntax + +A `rare_terms` aggregation looks like this in isolation: + +[source,js] +-------------------------------------------------- +{ + "rare_terms": { + "field": "the_field", + "max_doc_count": 1 + } +} +-------------------------------------------------- +// NOTCONSOLE + +.`rare_terms` Parameters +|=== +|Parameter Name |Description |Required |Default Value +|`field` |The field we wish to find rare terms in |Required | +|`max_doc_count` |The maximum number of documents a term should appear in. |Optional |`1` +|`precision` |The precision of the internal CuckooFilters. Smaller precision leads to +better approximation, but higher memory usage. Cannot be smaller than `0.00001` |Optional |`0.01` +|`include` |Terms that should be included in the aggregation|Optional | +|`exclude` |Terms that should be excluded from the aggregation|Optional | +|`missing` |The value that should be used if a document does not have the field being aggregated|Optional | +|=== + + +Example: + +[source,js] +-------------------------------------------------- +GET /_search +{ + "aggs" : { + "genres" : { + "rare_terms" : { + "field" : "genre" + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[s/_search/_search\?filter_path=aggregations/] + +Response: + +[source,js] +-------------------------------------------------- +{ + ... + "aggregations" : { + "genres" : { + "buckets" : [ + { + "key" : "swing", + "doc_count" : 1 + } + ] + } + } +} +-------------------------------------------------- +// TESTRESPONSE[s/\.\.\.//] + +In this example, the only bucket that we see is the "swing" bucket, because it is the only term that appears in +one document. If we increase the `max_doc_count` to `2`, we'll see some more buckets: + +[source,js] +-------------------------------------------------- +GET /_search +{ + "aggs" : { + "genres" : { + "rare_terms" : { + "field" : "genre", + "max_doc_count": 2 + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[s/_search/_search\?filter_path=aggregations/] + +This now shows the "jazz" term which has a `doc_count` of 2": + +[source,js] +-------------------------------------------------- +{ + ... + "aggregations" : { + "genres" : { + "buckets" : [ + { + "key" : "swing", + "doc_count" : 1 + }, + { + "key" : "jazz", + "doc_count" : 2 + } + ] + } + } +} +-------------------------------------------------- +// TESTRESPONSE[s/\.\.\.//] + +[[search-aggregations-bucket-rare-terms-aggregation-max-doc-count]] +==== Maximum document count + +The `max_doc_count` parameter is used to control the upper bound of document counts that a term can have. There +is not a size limitation on the `rare_terms` agg like `terms` agg has. This means that terms +which match the `max_doc_count` criteria will be returned. The aggregation functions in this manner to avoid +the order-by-ascending issues that afflict the `terms` aggregation. + +This does, however, mean that a large number of results can be returned if chosen incorrectly. +To limit the danger of this setting, the maximum `max_doc_count` is 100. + +[[search-aggregations-bucket-rare-terms-aggregation-max-buckets]] +==== Max Bucket Limit + +The Rare Terms aggregation is more liable to trip the `search.max_buckets` soft limit than other aggregations due +to how it works. The `max_bucket` soft-limit is evaluated on a per-shard basis while the aggregation is collecting +results. It is possible for a term to be "rare" on a shard but become "not rare" once all the shard results are +merged together. This means that individual shards tend to collect more buckets than are truly rare, because +they only have their own local view. This list is ultimately pruned to the correct, smaller list of rare +terms on the coordinating node... but a shard may have already tripped the `max_buckets` soft limit and aborted +the request. + +When aggregating on fields that have potentially many "rare" terms, you may need to increase the `max_buckets` soft +limit. Alternatively, you might need to find a way to filter the results to return fewer rare values (smaller time +span, filter by category, etc), or re-evaluate your definition of "rare" (e.g. if something +appears 100,000 times, is it truly "rare"?) + +[[search-aggregations-bucket-rare-terms-aggregation-approximate-counts]] +==== Document counts are approximate + +The naive way to determine the "rare" terms in a dataset is to place all the values in a map, incrementing counts +as each document is visited, then return the bottom `n` rows. This does not scale beyond even modestly sized data +sets. A sharded approach where only the "top n" values are retained from each shard (ala the `terms` aggregation) +fails because the long-tail nature of the problem means it is impossible to find the "top n" bottom values without +simply collecting all the values from all shards. + +Instead, the Rare Terms aggregation uses a different approximate algorithm: + +1. Values are placed in a map the first time they are seen. +2. Each addition occurrence of the term increments a counter in the map +3. If the counter > the `max_doc_count` threshold, the term is removed from the map and placed in a +https://www.cs.cmu.edu/~dga/papers/cuckoo-conext2014.pdf[CuckooFilter] +4. The CuckooFilter is consulted on each term. If the value is inside the filter, it is known to be above the +threshold already and skipped. + +After execution, the map of values is the map of "rare" terms under the `max_doc_count` threshold. This map and CuckooFilter +are then merged with all other shards. If there are terms that are greater than the threshold (or appear in +a different shard's CuckooFilter) the term is removed from the merged list. The final map of values is returned +to the user as the "rare" terms. + +CuckooFilters have the possibility of returning false positives (they can say a value exists in their collection when +it actually does not). Since the CuckooFilter is being used to see if a term is over threshold, this means a false positive +from the CuckooFilter will mistakenly say a value is common when it is not (and thus exclude it from it final list of buckets). +Practically, this means the aggregations exhibits false-negative behavior since the filter is being used "in reverse" +of how people generally think of approximate set membership sketches. + +CuckooFilters are described in more detail in the paper: + +https://www.cs.cmu.edu/~dga/papers/cuckoo-conext2014.pdf[Fan, Bin, et al. "Cuckoo filter: Practically better than bloom."] +Proceedings of the 10th ACM International on Conference on emerging Networking Experiments and Technologies. ACM, 2014. + +==== Precision + +Although the internal CuckooFilter is approximate in nature, the false-negative rate can be controlled with a +`precision` parameter. This allows the user to trade more runtime memory for more accurate results. + +The default precision is `0.001`, and the smallest (e.g. most accurate and largest memory overhead) is `0.00001`. +Below are some charts which demonstrate how the accuracy of the aggregation is affected by precision and number +of distinct terms. + +The X-axis shows the number of distinct values the aggregation has seen, and the Y-axis shows the percent error. +Each line series represents one "rarity" condition (ranging from one rare item to 100,000 rare items). For example, +the orange "10" line means ten of the values were "rare" (`doc_count == 1`), out of 1-20m distinct values (where the +rest of the values had `doc_count > 1`) + +This first chart shows precision `0.01`: + +image:images/rare_terms/accuracy_01.png[] + +And precision `0.001` (the default): + +image:images/rare_terms/accuracy_001.png[] + +And finally `precision 0.0001`: + +image:images/rare_terms/accuracy_0001.png[] + +The default precision of `0.001` maintains an accuracy of < 2.5% for the tested conditions, and accuracy slowly +degrades in a controlled, linear fashion as the number of distinct values increases. + +The default precision of `0.001` has a memory profile of `1.748⁻⁶ * n` bytes, where `n` is the number +of distinct values the aggregation has seen (it can also be roughly eyeballed, e.g. 20 million unique values is about +30mb of memory). The memory usage is linear to the number of distinct values regardless of which precision is chosen, +the precision only affects the slope of the memory profile as seen in this chart: + +image:images/rare_terms/memory.png[] + +For comparison, an equivalent terms aggregation at 20 million buckets would be roughly +`20m * 69b == ~1.38gb` (with 69 bytes being a very optimistic estimate of an empty bucket cost, far lower than what +the circuit breaker accounts for). So although the `rare_terms` agg is relatively heavy, it is still orders of +magnitude smaller than the equivalent terms aggregation + +==== Filtering Values + +It is possible to filter the values for which buckets will be created. This can be done using the `include` and +`exclude` parameters which are based on regular expression strings or arrays of exact values. Additionally, +`include` clauses can filter using `partition` expressions. + +===== Filtering Values with regular expressions + +[source,js] +-------------------------------------------------- +GET /_search +{ + "aggs" : { + "genres" : { + "rare_terms" : { + "field" : "genre", + "include" : "swi*", + "exclude" : "electro*" + } + } + } +} +-------------------------------------------------- +// CONSOLE + +In the above example, buckets will be created for all the tags that starts with `swi`, except those starting +with `electro` (so the tag `swing` will be aggregated but not `electro_swing`). The `include` regular expression will determine what +values are "allowed" to be aggregated, while the `exclude` determines the values that should not be aggregated. When +both are defined, the `exclude` has precedence, meaning, the `include` is evaluated first and only then the `exclude`. + +The syntax is the same as <>. + +===== Filtering Values with exact values + +For matching based on exact values the `include` and `exclude` parameters can simply take an array of +strings that represent the terms as they are found in the index: + +[source,js] +-------------------------------------------------- +GET /_search +{ + "aggs" : { + "genres" : { + "rare_terms" : { + "field" : "genre", + "include" : ["swing", "rock"], + "exclude" : ["jazz"] + } + } + } +} +-------------------------------------------------- +// CONSOLE + + +==== Missing value + +The `missing` parameter defines how documents that are missing a value should be treated. +By default they will be ignored but it is also possible to treat them as if they +had a value. + +[source,js] +-------------------------------------------------- +GET /_search +{ + "aggs" : { + "genres" : { + "rare_terms" : { + "field" : "genre", + "missing": "N/A" <1> + } + } + } +} +-------------------------------------------------- +// CONSOLE + +<1> Documents without a value in the `tags` field will fall into the same bucket as documents that have the value `N/A`. + +==== Nested, RareTerms, and scoring sub-aggregations + +The RareTerms aggregation has to operate in `breadth_first` mode, since it needs to prune terms as doc count thresholds +are breached. This requirement means the RareTerms aggregation is incompatible with certain combinations of aggregations +that require `depth_first`. In particular, scoring sub-aggregations that are inside a `nested` force the entire aggregation tree to run +in `depth_first` mode. This will throw an exception since RareTerms is unable to process `depth_first`. + +As a concrete example, if `rare_terms` aggregation is the child of a `nested` aggregation, and one of the child aggregations of `rare_terms` +needs document scores (like a `top_hits` aggregation), this will throw an exception. \ No newline at end of file diff --git a/docs/reference/data-frames/apis/preview-transform.asciidoc b/docs/reference/data-frames/apis/preview-transform.asciidoc index 5dfe1f2f1d7..a4338d0ef60 100644 --- a/docs/reference/data-frames/apis/preview-transform.asciidoc +++ b/docs/reference/data-frames/apis/preview-transform.asciidoc @@ -90,7 +90,17 @@ The data that is returned for this example is as follows: "customer_id" : "12" } ... - ] + ], + "mappings": { + "properties": { + "max_price": { + "type": "double" + }, + "customer_id": { + "type": "keyword" + } + } + } } ---- // NOTCONSOLE diff --git a/docs/reference/docs/data-replication.asciidoc b/docs/reference/docs/data-replication.asciidoc index b83858cecfd..28389fb05ba 100644 --- a/docs/reference/docs/data-replication.asciidoc +++ b/docs/reference/docs/data-replication.asciidoc @@ -5,12 +5,12 @@ [float] === Introduction -Each index in Elasticsearch is <> +Each index in Elasticsearch is <> and each shard can have multiple copies. These copies are known as a _replication group_ and must be kept in sync when documents are added or removed. If we fail to do so, reading from one copy will result in very different results than reading from another. The process of keeping the shard copies in sync and serving reads from them is what we call the _data replication model_. -Elasticsearch’s data replication model is based on the _primary-backup model_ and is described very well in the +Elasticsearch’s data replication model is based on the _primary-backup model_ and is described very well in the https://www.microsoft.com/en-us/research/publication/pacifica-replication-in-log-based-distributed-storage-systems/[PacificA paper] of Microsoft Research. That model is based on having a single copy from the replication group that acts as the primary shard. The other copies are called _replica shards_. The primary serves as the main entry point for all indexing operations. It is in charge of @@ -23,7 +23,7 @@ it has for various interactions between write and read operations. [float] === Basic write model -Every indexing operation in Elasticsearch is first resolved to a replication group using <>, +Every indexing operation in Elasticsearch is first resolved to a replication group using <>, typically based on the document ID. Once the replication group has been determined, the operation is forwarded internally to the current _primary shard_ of the group. The primary shard is responsible for validating the operation and forwarding it to the other replicas. Since replicas can be offline, the primary @@ -50,7 +50,7 @@ configuration mistake could cause an operation to fail on a replica despite it b are infrequent but the primary has to respond to them. In the case that the primary itself fails, the node hosting the primary will send a message to the master about it. The indexing -operation will wait (up to 1 minute, by <>) for the master to promote one of the replicas to be a +operation will wait (up to 1 minute, by <>) for the master to promote one of the replicas to be a new primary. The operation will then be forwarded to the new primary for processing. Note that the master also monitors the health of the nodes and may decide to proactively demote a primary. This typically happens when the node holding the primary is isolated from the cluster by a networking issue. See <> for more details. @@ -60,8 +60,8 @@ when executing it on the replica shards. This may be caused by an actual failure issue preventing the operation from reaching the replica (or preventing the replica from responding). All of these share the same end result: a replica which is part of the in-sync replica set misses an operation that is about to be acknowledged. In order to avoid violating the invariant, the primary sends a message to the master requesting -that the problematic shard be removed from the in-sync replica set. Only once removal of the shard has been acknowledged -by the master does the primary acknowledge the operation. Note that the master will also instruct another node to start +that the problematic shard be removed from the in-sync replica set. Only once removal of the shard has been acknowledged +by the master does the primary acknowledge the operation. Note that the master will also instruct another node to start building a new shard copy in order to restore the system to a healthy state. [[demoted-primary]] @@ -72,13 +72,13 @@ will be rejected by the replicas. When the primary receives a response from the it is no longer the primary then it will reach out to the master and will learn that it has been replaced. The operation is then routed to the new primary. -.What happens if there are no replicas? +.What happens if there are no replicas? ************ This is a valid scenario that can happen due to index configuration or simply because all the replicas have failed. In that case the primary is processing operations without any external validation, which may seem problematic. On the other hand, the primary cannot fail other shards on its own but request the master to do -so on its behalf. This means that the master knows that the primary is the only single good copy. We are therefore guaranteed -that the master will not promote any other (out-of-date) shard copy to be a new primary and that any operation indexed +so on its behalf. This means that the master knows that the primary is the only single good copy. We are therefore guaranteed +that the master will not promote any other (out-of-date) shard copy to be a new primary and that any operation indexed into the primary will not be lost. Of course, since at that point we are running with only single copy of the data, physical hardware issues can cause data loss. See <> for some mitigation options. ************ @@ -91,7 +91,7 @@ take non-trivial CPU power. One of the beauties of the primary-backup model is t (with the exception of in-flight operations). As such, a single in-sync copy is sufficient to serve read requests. When a read request is received by a node, that node is responsible for forwarding it to the nodes that hold the relevant shards, -collating the responses, and responding to the client. We call that node the _coordinating node_ for that request. The basic flow +collating the responses, and responding to the client. We call that node the _coordinating node_ for that request. The basic flow is as follows: . Resolve the read requests to the relevant shards. Note that since most searches will be sent to one or more indices, @@ -153,8 +153,8 @@ Dirty reads:: An isolated primary can expose writes that will not be acknowledge [float] === The Tip of the Iceberg -This document provides a high level overview of how Elasticsearch deals with data. Of course, there is much much more -going on under the hood. Things like primary terms, cluster state publishing, and master election all play a role in +This document provides a high level overview of how Elasticsearch deals with data. Of course, there is much much more +going on under the hood. Things like primary terms, cluster state publishing, and master election all play a role in keeping this system behaving correctly. This document also doesn't cover known and important bugs (both closed and open). We recognize that https://github.com/elastic/elasticsearch/issues?q=label%3Aresiliency[GitHub is hard to keep up with]. To help people stay on top of those, we maintain a dedicated https://www.elastic.co/guide/en/elasticsearch/resiliency/current/index.html[resiliency page] diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index 1f3d8f879dc..f0c4ec46df7 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -1,108 +1,34 @@ [[getting-started]] -= Getting started += Getting started with {es} [partintro] -- -TIP: The fastest way to get started with {es} is to https://www.elastic.co/cloud/elasticsearch-service/signup[start a free 14-day trial of Elasticsearch Service] in the cloud. +Ready to take {es} for a test drive and see for yourself how you can use the +REST APIs to store, search, and analyze data? -Elasticsearch is a highly scalable open-source full-text search and analytics engine. It allows you to store, search, and analyze big volumes of data quickly and in near real time. It is generally used as the underlying engine/technology that powers applications that have complex search features and requirements. +Step through this getting started tutorial to: -Here are a few sample use-cases that Elasticsearch could be used for: +. Get an {es} instance up and running +. Index some sample documents +. Search for documents using the {es} query language +. Analyze the results using bucket and metrics aggregations -* You run an online web store where you allow your customers to search for products that you sell. In this case, you can use Elasticsearch to store your entire product catalog and inventory and provide search and autocomplete suggestions for them. -* You want to collect log or transaction data and you want to analyze and mine this data to look for trends, statistics, summarizations, or anomalies. In this case, you can use Logstash (part of the Elasticsearch/Logstash/Kibana stack) to collect, aggregate, and parse your data, and then have Logstash feed this data into Elasticsearch. Once the data is in Elasticsearch, you can run searches and aggregations to mine any information that is of interest to you. -* You run a price alerting platform which allows price-savvy customers to specify a rule like "I am interested in buying a specific electronic gadget and I want to be notified if the price of gadget falls below $X from any vendor within the next month". In this case you can scrape vendor prices, push them into Elasticsearch and use its reverse-search (Percolator) capability to match price movements against customer queries and eventually push the alerts out to the customer once matches are found. -* You have analytics/business-intelligence needs and want to quickly investigate, analyze, visualize, and ask ad-hoc questions on a lot of data (think millions or billions of records). In this case, you can use Elasticsearch to store your data and then use Kibana (part of the Elasticsearch/Logstash/Kibana stack) to build custom dashboards that can visualize aspects of your data that are important to you. Additionally, you can use the Elasticsearch aggregations functionality to perform complex business intelligence queries against your data. -For the rest of this tutorial, you will be guided through the process of getting Elasticsearch up and running, taking a peek inside it, and performing basic operations like indexing, searching, and modifying your data. At the end of this tutorial, you should have a good idea of what Elasticsearch is, how it works, and hopefully be inspired to see how you can use it to either build sophisticated search applications or to mine intelligence from your data. +Need more context? + +Check out the <> to learn the lingo and understand the basics of +how {es} works. If you're already familiar with {es} and want to see how it works +with the rest of the stack, you might want to jump to the +{stack-gs}/get-started-elastic-stack.html[Elastic Stack +Tutorial] to see how to set up a system monitoring solution with {es}, {kib}, +{beats}, and {ls}. + +TIP: The fastest way to get started with {es} is to +https://www.elastic.co/cloud/elasticsearch-service/signup[start a free 14-day +trial of Elasticsearch Service] in the cloud. -- -[[getting-started-concepts]] -== Basic Concepts - -There are a few concepts that are core to Elasticsearch. Understanding these concepts from the outset will tremendously help ease the learning process. - -[float] -=== Near Realtime (NRT) - -Elasticsearch is a near-realtime search platform. What this means is there is a slight latency (normally one second) from the time you index a document until the time it becomes searchable. - -[float] -=== Cluster - -A cluster is a collection of one or more nodes (servers) that together holds your entire data and provides federated indexing and search capabilities across all nodes. A cluster is identified by a unique name which by default is "elasticsearch". This name is important because a node can only be part of a cluster if the node is set up to join the cluster by its name. - -Make sure that you don't reuse the same cluster names in different -environments, otherwise you might end up with nodes joining the wrong cluster. -For instance you could use `logging-dev`, `logging-stage`, and `logging-prod` -for the development, staging, and production clusters. - -Note that it is valid and perfectly fine to have a cluster with only a single node in it. Furthermore, you may also have multiple independent clusters each with its own unique cluster name. - -[float] -=== Node - -A node is a single server that is part of your cluster, stores your data, and participates in the cluster's indexing and search -capabilities. Just like a cluster, a node is identified by a name which by default is a random Universally Unique IDentifier (UUID) that is assigned to the node at startup. You can define any node name you want if you do not want the default. This name is important for administration purposes where you want to identify which servers in your network correspond to which nodes in your Elasticsearch cluster. - -A node can be configured to join a specific cluster by the cluster name. By default, each node is set up to join a cluster named `elasticsearch` which means that if you start up a number of nodes on your network and--assuming they can discover each other--they will all automatically form and join a single cluster named `elasticsearch`. - -In a single cluster, you can have as many nodes as you want. Furthermore, if there are no other Elasticsearch nodes currently running on your network, starting a single node will by default form a new single-node cluster named `elasticsearch`. - -[float] -=== Index - -An index is a collection of documents that have somewhat similar characteristics. For example, you can have an index for customer data, another index for a product catalog, and yet another index for order data. An index is identified by a name (that must be all lowercase) and this name is used to refer to the index when performing indexing, search, update, and delete operations against the documents in it. - -In a single cluster, you can define as many indexes as you want. - -[float] -=== Type - -deprecated[6.0.0,See <>] - -A type used to be a logical category/partition of your index to allow you to store different types of documents in the same index, e.g. one type for users, another type for blog posts. It is no longer possible to create multiple types in an index, and the whole concept of types will be removed in a later version. See <> for more. - -[float] -=== Document - -A document is a basic unit of information that can be indexed. For example, you can have a document for a single customer, another document for a single product, and yet another for a single order. This document is expressed in http://json.org/[JSON] (JavaScript Object Notation) which is a ubiquitous internet data interchange format. Within an index, you can store as many documents as you want. - -[[getting-started-shards-and-replicas]] -[float] -=== Shards & Replicas - -An index can potentially store a large amount of data that can exceed the hardware limits of a single node. For example, a single index of a billion documents taking up 1TB of disk space may not fit on the disk of a single node or may be too slow to serve search requests from a single node alone. - -To solve this problem, Elasticsearch provides the ability to subdivide your index into multiple pieces called shards. When you create an index, you can simply define the number of shards that you want. Each shard is in itself a fully-functional and independent "index" that can be hosted on any node in the cluster. - -Sharding is important for two primary reasons: - -* It allows you to horizontally split/scale your content volume -* It allows you to distribute and parallelize operations across shards (potentially on multiple nodes) thus increasing performance/throughput - - -The mechanics of how a shard is distributed and also how its documents are aggregated back into search requests are completely managed by Elasticsearch and is transparent to you as the user. - -In a network/cloud environment where failures can be expected anytime, it is very useful and highly recommended to have a failover mechanism in case a shard/node somehow goes offline or disappears for whatever reason. To this end, Elasticsearch allows you to make one or more copies of your index's shards into what are called replica shards, or replicas for short. - -Replication is important for two primary reasons: - -* It provides high availability in case a shard/node fails. For this reason, it is important to note that a replica shard is never allocated on the same node as the original/primary shard that it was copied from. -* It allows you to scale out your search volume/throughput since searches can be executed on all replicas in parallel. - - -To summarize, each index can be split into multiple shards. An index can also be replicated zero (meaning no replicas) or more times. Once replicated, each index will have primary shards (the original shards that were replicated from) and replica shards (the copies of the primary shards). - -The number of shards and replicas can be defined per index at the time the index is created. After the index is created, you may also change the number of replicas dynamically anytime. You can change the number of shards for an existing index using the {ref}/indices-shrink-index.html[`_shrink`] and {ref}/indices-split-index.html[`_split`] APIs, however this is not a trivial task and pre-planning for the correct number of shards is the optimal approach. - -By default, each index in Elasticsearch is allocated one primary shard and one replica which means that if you have at least two nodes in your cluster, your index will have one primary shard and another replica shard (one complete replica) for a total of two shards per index. - -NOTE: Each Elasticsearch shard is a Lucene index. There is a maximum number of documents you can have in a single Lucene index. As of https://issues.apache.org/jira/browse/LUCENE-5843[`LUCENE-5843`], the limit is `2,147,483,519` (= Integer.MAX_VALUE - 128) documents. -You can monitor shard sizes using the {ref}/cat-shards.html[`_cat/shards`] API. - -With that out of the way, let's get started with the fun part... - [[getting-started-install]] == Installation diff --git a/docs/reference/ilm/policy-definitions.asciidoc b/docs/reference/ilm/policy-definitions.asciidoc index 00578ce8c05..4949c43e6ce 100644 --- a/docs/reference/ilm/policy-definitions.asciidoc +++ b/docs/reference/ilm/policy-definitions.asciidoc @@ -120,9 +120,9 @@ configuring allocation rules is optional. When configuring allocation rules, setting number of replicas is optional. Although this action can be treated as two separate index settings updates, both can be configured at once. -Read more about index replicas <>. -Read more about shard allocation filtering in -the <>. +For more information about how {es} uses replicas for scaling, see +<>. See <> for more information about +controlling where Elasticsearch allocates shards of a particular index. [[ilm-allocate-options]] .Allocate Options diff --git a/docs/reference/images/rare_terms/accuracy_0001.png b/docs/reference/images/rare_terms/accuracy_0001.png new file mode 100644 index 00000000000..0c13a3938cd Binary files /dev/null and b/docs/reference/images/rare_terms/accuracy_0001.png differ diff --git a/docs/reference/images/rare_terms/accuracy_001.png b/docs/reference/images/rare_terms/accuracy_001.png new file mode 100644 index 00000000000..2aa1be316c3 Binary files /dev/null and b/docs/reference/images/rare_terms/accuracy_001.png differ diff --git a/docs/reference/images/rare_terms/accuracy_01.png b/docs/reference/images/rare_terms/accuracy_01.png new file mode 100644 index 00000000000..7182b7d3c53 Binary files /dev/null and b/docs/reference/images/rare_terms/accuracy_01.png differ diff --git a/docs/reference/images/rare_terms/memory.png b/docs/reference/images/rare_terms/memory.png new file mode 100644 index 00000000000..e0de5c21639 Binary files /dev/null and b/docs/reference/images/rare_terms/memory.png differ diff --git a/docs/reference/indices/apis/reload-analyzers.asciidoc b/docs/reference/indices/apis/reload-analyzers.asciidoc index f3365968df3..657f6556df4 100644 --- a/docs/reference/indices/apis/reload-analyzers.asciidoc +++ b/docs/reference/indices/apis/reload-analyzers.asciidoc @@ -69,4 +69,37 @@ reload to ensure the new state of the file is reflected everywhere in the cluste POST /my_index/_reload_search_analyzers -------------------------------------------------- // CONSOLE -// TEST[s/^/PUT my_index\n/] \ No newline at end of file +// TEST[continued] + +The reload request returns information about the nodes it was executed on and the +analyzers that were reloaded: + +[source,js] +-------------------------------------------------- +{ + "_shards" : { + "total" : 2, + "successful" : 2, + "failed" : 0 + }, + "reload_details" : [ + { + "index" : "my_index", + "reloaded_analyzers" : [ + "my_synonyms" + ], + "reloaded_node_ids" : [ + "mfdqTXn_T7SGr2Ho2KT8uw" + ] + } + ] +} +-------------------------------------------------- +// TEST[continued] +// TESTRESPONSE[s/"total" : 2/"total" : $body._shards.total/] +// TESTRESPONSE[s/"successful" : 2/"successful" : $body._shards.successful/] +// TESTRESPONSE[s/mfdqTXn_T7SGr2Ho2KT8uw/$body.reload_details.0.reloaded_node_ids.0/] + +NOTE: Reloading does not happen on each shard of an index, but once on each node +the index has shards on. The total shard count can therefore differ from the number +of index shards. diff --git a/docs/reference/indices/open-close.asciidoc b/docs/reference/indices/open-close.asciidoc index 6d0866d303b..4ba434ecbbb 100644 --- a/docs/reference/indices/open-close.asciidoc +++ b/docs/reference/indices/open-close.asciidoc @@ -2,23 +2,69 @@ == Open / Close Index API The open and close index APIs allow to close an index, and later on -opening it. A closed index has almost no overhead on the cluster (except -for maintaining its metadata), and is blocked for read/write operations. -A closed index can be opened which will then go through the normal -recovery process. +opening it. -The REST endpoint is `/{index}/_close` and `/{index}/_open`. For -example: +A closed index is blocked for read/write operations and does not allow +all operations that opened indices allow. It is not possible to index +documents or to search for documents in a closed index. This allows +closed indices to not have to maintain internal data structures for +indexing or searching documents, resulting in a smaller overhead on +the cluster. + +When opening or closing an index, the master is responsible for +restarting the index shards to reflect the new state of the index. +The shards will then go through the normal recovery process. The +data of opened/closed indices is automatically replicated by the +cluster to ensure that enough shard copies are safely kept around +at all times. + +The REST endpoint is `/{index}/_close` and `/{index}/_open`. + +The following example shows how to close an index: [source,js] -------------------------------------------------- POST /my_index/_close - -POST /my_index/_open -------------------------------------------------- // CONSOLE // TEST[s/^/PUT my_index\n/] +This will return the following response: + +[source,js] +-------------------------------------------------- +{ + "acknowledged" : true, + "shards_acknowledged" : true, + "indices" : { + "my_index" : { + "closed" : true + } + } +} +-------------------------------------------------- +// TESTRESPONSE + +A closed index can be reopened like this: + +[source,js] +-------------------------------------------------- +POST /my_index/_open +-------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT my_index\nPOST my_index\/_close\n/] + +which will yield the following response: + +[source,js] +-------------------------------------------------- +{ + "acknowledged" : true, + "shards_acknowledged" : true +} +-------------------------------------------------- +// TESTRESPONSE + It is possible to open and close multiple indices. An error will be thrown if the request explicitly refers to a missing index. This behaviour can be disabled using the `ignore_unavailable=true` parameter. @@ -36,6 +82,6 @@ API by setting `cluster.indices.close.enable` to `false`. The default is `true`. [float] === Wait For Active Shards -Because opening an index allocates its shards, the +Because opening or closing an index allocates its shards, the <> setting on -index creation applies to the index opening action as well. +index creation applies to the `_open` and `_close` index actions as well. diff --git a/docs/reference/ingest.asciidoc b/docs/reference/ingest.asciidoc index 6fa2e8c796d..a945a56c194 100644 --- a/docs/reference/ingest.asciidoc +++ b/docs/reference/ingest.asciidoc @@ -23,7 +23,30 @@ another processor that renames a field. The <> then the configured pipelines. To use a pipeline, simply specify the `pipeline` parameter on an index or bulk request. This -way, the ingest node knows which pipeline to use. For example: +way, the ingest node knows which pipeline to use. + +For example: +Create a pipeline + +[source,js] +-------------------------------------------------- +PUT _ingest/pipeline/my_pipeline_id +{ + "description" : "describe pipeline", + "processors" : [ + { + "set" : { + "field": "foo", + "value": "new" + } + } + ] +} +-------------------------------------------------- +// CONSOLE +// TEST + +Index with defined pipeline [source,js] -------------------------------------------------- @@ -33,7 +56,27 @@ PUT my-index/_doc/my-id?pipeline=my_pipeline_id } -------------------------------------------------- // CONSOLE -// TEST[catch:bad_request] +// TEST[continued] + +Response: +[source,js] +-------------------------------------------------- +{ + "_index" : "my-index", + "_type" : "_doc", + "_id" : "my-id", + "_version" : 1, + "result" : "created", + "_shards" : { + "total" : 2, + "successful" : 2, + "failed" : 0 + }, + "_seq_no" : 0, + "_primary_term" : 1 +} +-------------------------------------------------- +// TESTRESPONSE[s/"successful" : 2/"successful" : 1/] An index may also declare a <> that will be used in the absence of the `pipeline` parameter. diff --git a/docs/reference/mapping/removal_of_types.asciidoc b/docs/reference/mapping/removal_of_types.asciidoc index 47952d61c49..4aa178d443f 100644 --- a/docs/reference/mapping/removal_of_types.asciidoc +++ b/docs/reference/mapping/removal_of_types.asciidoc @@ -258,7 +258,7 @@ Elasticsearch 6.x:: * The `_default_` mapping type is deprecated. -* In 6.7, the index creation, index template, and mapping APIs support a query +* In 6.8, the index creation, index template, and mapping APIs support a query string parameter (`include_type_name`) which indicates whether requests and responses should include a type name. It defaults to `true`, and should be set to an explicit value to prepare to upgrade to 7.0. Not setting `include_type_name` @@ -442,12 +442,12 @@ documents to it using typeless `index` calls, and load documents with typeless Index creation, index template, and mapping APIs support a new `include_type_name` URL parameter that specifies whether mapping definitions in requests and responses -should contain the type name. The parameter defaults to `true` in version 6.7 to +should contain the type name. The parameter defaults to `true` in version 6.8 to match the pre-7.0 behavior of using type names in mappings. It defaults to `false` in version 7.0 and will be removed in version 8.0. -It should be set explicitly in 6.7 to prepare to upgrade to 7.0. To avoid deprecation -warnings in 6.7, the parameter can be set to either `true` or `false`. In 7.0, setting +It should be set explicitly in 6.8 to prepare to upgrade to 7.0. To avoid deprecation +warnings in 6.8, the parameter can be set to either `true` or `false`. In 7.0, setting `include_type_name` at all will result in a deprecation warning. See some examples of interactions with Elasticsearch with this option set to `false`: @@ -717,12 +717,12 @@ indices. [float] ==== Mixed-version clusters -In a cluster composed of both 6.7 and 7.0 nodes, the parameter +In a cluster composed of both 6.8 and 7.0 nodes, the parameter `include_type_name` should be specified in indices APIs like index creation. This is because the parameter has a different default between -6.7 and 7.0, so the same mapping definition will not be valid for both +6.8 and 7.0, so the same mapping definition will not be valid for both node versions. Typeless document APIs such as `bulk` and `update` are only available as of -7.0, and will not work with 6.7 nodes. This also holds true for the typeless +7.0, and will not work with 6.8 nodes. This also holds true for the typeless versions of queries that perform document lookups, such as `terms`. diff --git a/docs/reference/mapping/types.asciidoc b/docs/reference/mapping/types.asciidoc index c0db156dc3a..bdfcf1128a0 100644 --- a/docs/reference/mapping/types.asciidoc +++ b/docs/reference/mapping/types.asciidoc @@ -42,8 +42,6 @@ string:: <> and <> <>:: Defines parent/child relation for documents within the same index -<>:: Defines an alias to an existing field. - <>:: Record numeric feature to boost hits at query time. <>:: Record numeric features to boost hits at query time. @@ -54,6 +52,11 @@ string:: <> and <> <>:: A text-like field optimized for queries to implement as-you-type completion +<>:: Defines an alias to an existing field. + +<>:: Allows an entire JSON object to be indexed as a single field. + + [float] === Multi-fields @@ -82,6 +85,8 @@ include::types/date.asciidoc[] include::types/date_nanos.asciidoc[] +include::types/flattened.asciidoc[] + include::types/geo-point.asciidoc[] include::types/geo-shape.asciidoc[] diff --git a/docs/reference/mapping/types/dense-vector.asciidoc b/docs/reference/mapping/types/dense-vector.asciidoc index f79bdde9cc4..7ea04b42330 100644 --- a/docs/reference/mapping/types/dense-vector.asciidoc +++ b/docs/reference/mapping/types/dense-vector.asciidoc @@ -7,9 +7,7 @@ experimental[] A `dense_vector` field stores dense vectors of float values. The maximum number of dimensions that can be in a vector should -not exceed 1024. The number of dimensions can be -different across documents. A `dense_vector` field is -a single-valued field. +not exceed 1024. A `dense_vector` field is a single-valued field. These vectors can be used for <>. For example, a document score can represent a distance between @@ -24,7 +22,8 @@ PUT my_index "mappings": { "properties": { "my_vector": { - "type": "dense_vector" + "type": "dense_vector", + "dims": 3 <1> }, "my_text" : { "type" : "keyword" @@ -42,13 +41,14 @@ PUT my_index/_doc/1 PUT my_index/_doc/2 { "my_text" : "text2", - "my_vector" : [-0.5, 10, 10, 4] + "my_vector" : [-0.5, 10, 10] } -------------------------------------------------- // CONSOLE +<1> dims—the number of dimensions in the vector, required parameter. + Internally, each document's dense vector is encoded as a binary doc value. Its size in bytes is equal to -`4 * NUMBER_OF_DIMENSIONS`, where `NUMBER_OF_DIMENSIONS` - -number of the vector's dimensions. \ No newline at end of file +`4 * dims`, where `dims`—the number of the vector's dimensions. \ No newline at end of file diff --git a/docs/reference/mapping/types/flattened.asciidoc b/docs/reference/mapping/types/flattened.asciidoc new file mode 100644 index 00000000000..80fd72c3dcc --- /dev/null +++ b/docs/reference/mapping/types/flattened.asciidoc @@ -0,0 +1,188 @@ +[role="xpack"] +[testenv="basic"] + +[[flattened]] +=== Flattened datatype + +By default, each subfield in an object is mapped and indexed separately. If +the names or types of the subfields are not known in advance, then they are +<>. + +The `flattened` type provides an alternative approach, where the entire +object is mapped as a single field. Given an object, the `flattened` +mapping will parse out its leaf values and index them into one field as +keywords. The object's contents can then be searched through simple queries +and aggregations. + +This data type can be useful for indexing objects with a large or unknown +number of unique keys. Only one field mapping is created for the whole JSON +object, which can help prevent a <> +from having too many distinct field mappings. + +On the other hand, flattened object fields present a trade-off in terms of +search functionality. Only basic queries are allowed, with no support for +numeric range queries or highlighting. Further information on the limitations +can be found in the <> section. + +NOTE: The `flattened` mapping type should **not** be used for indexing all +document content, as it treats all values as keywords and does not provide full +search functionality. The default approach, where each subfield has its own +entry in the mappings, works well in the majority of cases. + +An flattened object field can be created as follows: +[source,js] +-------------------------------- +PUT bug_reports +{ + "mappings": { + "properties": { + "title": { + "type": "text" + }, + "labels": { + "type": "flattened" + } + } + } +} + +POST bug_reports/_doc/1 +{ + "title": "Results are not sorted correctly.", + "labels": { + "priority": "urgent", + "release": ["v1.2.5", "v1.3.0"], + "timestamp": { + "created": 1541458026, + "closed": 1541457010 + } + } +} +-------------------------------- +// CONSOLE +// TESTSETUP + +During indexing, tokens are created for each leaf value in the JSON object. The +values are indexed as string keywords, without analysis or special handling for +numbers or dates. + +Querying the top-level `flattened` field searches all leaf values in the +object: + +[source,js] +-------------------------------- +POST bug_reports/_search +{ + "query": { + "term": {"labels": "urgent"} + } +} +-------------------------------- +// CONSOLE + +To query on a specific key in the flattened object, object dot notation is used: +[source,js] +-------------------------------- +POST bug_reports/_search +{ + "query": { + "term": {"labels.release": "v1.3.0"} + } +} +-------------------------------- +// CONSOLE + +[[supported-operations]] +==== Supported operations + +Because of the similarities in the way values are indexed, `flattened` +fields share much of the same mapping and search functionality as +<> fields. + +Currently, flattened object fields can be used with the following query types: + +- `term`, `terms`, and `terms_set` +- `prefix` +- `range` +- `match` and `multi_match` +- `query_string` and `simple_query_string` +- `exists` + +When querying, it is not possible to refer to field keys using wildcards, as in +`{ "term": {"labels.time*": 1541457010}}`. Note that all queries, including +`range`, treat the values as string keywords. Highlighting is not supported on +`flattened` fields. + +It is possible to sort on an flattened object field, as well as perform simple +keyword-style aggregations such as `terms`. As with queries, there is no +special support for numerics -- all values in the JSON object are treated as +keywords. When sorting, this implies that values are compared +lexicographically. + +Flattened object fields currently cannot be stored. It is not possible to +specify the <> parameter in the mapping. + +[[flattened-params]] +==== Parameters for flattened object fields + +The following mapping parameters are accepted: + +[horizontal] + +<>:: + + Mapping field-level query time boosting. Accepts a floating point number, + defaults to `1.0`. + +`depth_limit`:: + + The maximum allowed depth of the flattened object field, in terms of nested + inner objects. If a flattened object field exceeds this limit, then an + error will be thrown. Defaults to `20`. + +<>:: + + Should the field be stored on disk in a column-stride fashion, so that it + can later be used for sorting, aggregations, or scripting? Accepts `true` + (default) or `false`. + +<>:: + + Should global ordinals be loaded eagerly on refresh? Accepts `true` or + `false` (default). Enabling this is a good idea on fields that are + frequently used for terms aggregations. + +<>:: + + Leaf values longer than this limit will not be indexed. By default, there + is no limit and all values will be indexed. Note that this limit applies + to the leaf values within the flattened object field, and not the length of + the entire field. + +<>:: + + Determines if the field should be searchable. Accepts `true` (default) or + `false`. + +<>:: + + What information should be stored in the index for scoring purposes. + Defaults to `docs` but can also be set to `freqs` to take term frequency + into account when computing scores. + +<>:: + + A string value which is substituted for any explicit `null` values within + the flattened object field. Defaults to `null`, which means null sields are + treated as if it were missing. + +<>:: + + Which scoring algorithm or _similarity_ should be used. Defaults + to `BM25`. + +`split_queries_on_whitespace`:: + + Whether <> should split the input on + whitespace when building a query for this field. Accepts `true` or `false` + (default). diff --git a/docs/reference/migration/migrate_7_3.asciidoc b/docs/reference/migration/migrate_7_3.asciidoc index ee23c1f772e..66e67ba2611 100644 --- a/docs/reference/migration/migrate_7_3.asciidoc +++ b/docs/reference/migration/migrate_7_3.asciidoc @@ -20,6 +20,8 @@ coming[7.3.0] [[breaking_73_mapping_changes]] === Mapping changes +`dense_vector` field now requires `dims` parameter, specifying the number of +dimensions for document and query vectors for this field. [float] ==== Defining multi-fields within multi-fields diff --git a/docs/reference/migration/migrate_7_4.asciidoc b/docs/reference/migration/migrate_7_4.asciidoc new file mode 100644 index 00000000000..d12c5c5eba3 --- /dev/null +++ b/docs/reference/migration/migrate_7_4.asciidoc @@ -0,0 +1,30 @@ +[[breaking-changes-7.4]] +== Breaking changes in 7.4 +++++ +7.4 +++++ + +This section discusses the changes that you need to be aware of when migrating +your application to Elasticsearch 7.4. + +See also <> and <>. + +coming[7.4.0] + +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + +[[breaking_74_plugin_changes]] +=== Plugins changes + +[float] +==== TokenizerFactory changes + +TokenizerFactory now has a `name()` method that must be implemented. Most +plugin-provided TokenizerFactory implementations will extend `AbstractTokenizerFactory`, +which now takes a `name` parameter in its constructor. + diff --git a/docs/reference/modules/node.asciidoc b/docs/reference/modules/node.asciidoc index ddf8e1c9ba9..467e6076913 100644 --- a/docs/reference/modules/node.asciidoc +++ b/docs/reference/modules/node.asciidoc @@ -109,18 +109,20 @@ To create a dedicated master-eligible node in the {default-dist}, set: [source,yaml] ------------------- node.master: true <1> -node.data: false <2> -node.ingest: false <3> -node.ml: false <4> -xpack.ml.enabled: true <5> -cluster.remote.connect: false <6> +node.voting_only: false <2> +node.data: false <3> +node.ingest: false <4> +node.ml: false <5> +xpack.ml.enabled: true <6> +cluster.remote.connect: false <7> ------------------- <1> The `node.master` role is enabled by default. -<2> Disable the `node.data` role (enabled by default). -<3> Disable the `node.ingest` role (enabled by default). -<4> Disable the `node.ml` role (enabled by default). -<5> The `xpack.ml.enabled` setting is enabled by default. -<6> Disable {ccs} (enabled by default). +<2> The `node.voting_only` role is disabled by default. +<3> Disable the `node.data` role (enabled by default). +<4> Disable the `node.ingest` role (enabled by default). +<5> Disable the `node.ml` role (enabled by default). +<6> The `xpack.ml.enabled` setting is enabled by default. +<7> Disable {ccs} (enabled by default). To create a dedicated master-eligible node in the {oss-dist}, set: @@ -177,6 +179,30 @@ reasonably fast persistent storage and a reliable and low-latency network connection to the rest of the cluster, since they are on the critical path for <>. +Voting-only master-eligible nodes may also fill other roles in your cluster. +For instance, a node may be both a data node and a voting-only master-eligible +node. A _dedicated_ voting-only master-eligible nodes is a voting-only +master-eligible node that fills no other roles in the cluster. To create a +dedicated voting-only master-eligible node in the {default-dist}, set: + +[source,yaml] +------------------- +node.master: true <1> +node.voting_only: true <2> +node.data: false <3> +node.ingest: false <4> +node.ml: false <5> +xpack.ml.enabled: true <6> +cluster.remote.connect: false <7> +------------------- +<1> The `node.master` role is enabled by default. +<2> Enable the `node.voting_only` role (disabled by default). +<3> Disable the `node.data` role (enabled by default). +<4> Disable the `node.ingest` role (enabled by default). +<5> Disable the `node.ml` role (enabled by default). +<6> The `xpack.ml.enabled` setting is enabled by default. +<7> Disable {ccs} (enabled by default). + [float] [[data-node]] === Data Node @@ -193,16 +219,18 @@ To create a dedicated data node in the {default-dist}, set: [source,yaml] ------------------- node.master: false <1> -node.data: true <2> -node.ingest: false <3> -node.ml: false <4> -cluster.remote.connect: false <5> +node.voting_only: false <2> +node.data: true <3> +node.ingest: false <4> +node.ml: false <5> +cluster.remote.connect: false <6> ------------------- <1> Disable the `node.master` role (enabled by default). -<2> The `node.data` role is enabled by default. -<3> Disable the `node.ingest` role (enabled by default). -<4> Disable the `node.ml` role (enabled by default). -<5> Disable {ccs} (enabled by default). +<2> The `node.voting_only` role is disabled by default. +<3> The `node.data` role is enabled by default. +<4> Disable the `node.ingest` role (enabled by default). +<5> Disable the `node.ml` role (enabled by default). +<6> Disable {ccs} (enabled by default). To create a dedicated data node in the {oss-dist}, set: [source,yaml] @@ -231,16 +259,18 @@ To create a dedicated ingest node in the {default-dist}, set: [source,yaml] ------------------- node.master: false <1> -node.data: false <2> -node.ingest: true <3> -node.ml: false <4> -cluster.remote.connect: false <5> +node.voting_only: false <2> +node.data: false <3> +node.ingest: true <4> +node.ml: false <5> +cluster.remote.connect: false <6> ------------------- <1> Disable the `node.master` role (enabled by default). -<2> Disable the `node.data` role (enabled by default). -<3> The `node.ingest` role is enabled by default. -<4> Disable the `node.ml` role (enabled by default). -<5> Disable {ccs} (enabled by default). +<2> The `node.voting_only` role is disabled by default. +<3> Disable the `node.data` role (enabled by default). +<4> The `node.ingest` role is enabled by default. +<5> Disable the `node.ml` role (enabled by default). +<6> Disable {ccs} (enabled by default). To create a dedicated ingest node in the {oss-dist}, set: @@ -282,16 +312,18 @@ To create a dedicated coordinating node in the {default-dist}, set: [source,yaml] ------------------- node.master: false <1> -node.data: false <2> -node.ingest: false <3> -node.ml: false <4> -cluster.remote.connect: false <5> +node.voting_only: false <2> +node.data: false <3> +node.ingest: false <4> +node.ml: false <5> +cluster.remote.connect: false <6> ------------------- <1> Disable the `node.master` role (enabled by default). -<2> Disable the `node.data` role (enabled by default). -<3> Disable the `node.ingest` role (enabled by default). -<4> Disable the `node.ml` role (enabled by default). -<5> Disable {ccs} (enabled by default). +<2> The `node.voting_only` role is disabled by default. +<3> Disable the `node.data` role (enabled by default). +<4> Disable the `node.ingest` role (enabled by default). +<5> Disable the `node.ml` role (enabled by default). +<6> Disable {ccs} (enabled by default). To create a dedicated coordinating node in the {oss-dist}, set: @@ -326,18 +358,20 @@ To create a dedicated {ml} node in the {default-dist}, set: [source,yaml] ------------------- node.master: false <1> -node.data: false <2> -node.ingest: false <3> -node.ml: true <4> -xpack.ml.enabled: true <5> -cluster.remote.connect: false <6> +node.voting_only: false <2> +node.data: false <3> +node.ingest: false <4> +node.ml: true <5> +xpack.ml.enabled: true <6> +cluster.remote.connect: false <7> ------------------- <1> Disable the `node.master` role (enabled by default). -<2> Disable the `node.data` role (enabled by default). -<3> Disable the `node.ingest` role (enabled by default). -<4> The `node.ml` role is enabled by default. -<5> The `xpack.ml.enabled` setting is enabled by default. -<6> Disable {ccs} (enabled by default). +<2> The `node.voting_only` role is disabled by default. +<3> Disable the `node.data` role (enabled by default). +<4> Disable the `node.ingest` role (enabled by default). +<5> The `node.ml` role is enabled by default. +<6> The `xpack.ml.enabled` setting is enabled by default. +<7> Disable {ccs} (enabled by default). [float] [[change-node-role]] diff --git a/docs/reference/query-dsl/dis-max-query.asciidoc b/docs/reference/query-dsl/dis-max-query.asciidoc index f05f97107a0..9a0f1fb7b03 100644 --- a/docs/reference/query-dsl/dis-max-query.asciidoc +++ b/docs/reference/query-dsl/dis-max-query.asciidoc @@ -1,48 +1,63 @@ [[query-dsl-dis-max-query]] -=== Dis Max Query +=== Disjunction Max Query -A query that generates the union of documents produced by its -subqueries, and that scores each document with the maximum score for -that document as produced by any subquery, plus a tie breaking increment -for any additional matching subqueries. +Returns documents matching one or more wrapped queries, called query clauses or +clauses. -This is useful when searching for a word in multiple fields with -different boost factors (so that the fields cannot be combined -equivalently into a single search field). We want the primary score to -be the one associated with the highest boost, not the sum of the field -scores (as Boolean Query would give). If the query is "albino elephant" -this ensures that "albino" matching one field and "elephant" matching -another gets a higher score than "albino" matching both fields. To get -this result, use both Boolean Query and DisjunctionMax Query: for each -term a DisjunctionMaxQuery searches for it in each field, while the set -of these DisjunctionMaxQuery's is combined into a BooleanQuery. +If a returned document matches multiple query clauses, the `dis_max` query +assigns the document the highest relevance score from any matching clause, plus +a tie breaking increment for any additional matching subqueries. -The tie breaker capability allows results that include the same term in -multiple fields to be judged better than results that include this term -in only the best of those multiple fields, without confusing this with -the better case of two different terms in the multiple fields. The -default `tie_breaker` is `0.0`. +You can use the `dis_max` to search for a term in fields mapped with different +<> factors. -This query maps to Lucene `DisjunctionMaxQuery`. +[[query-dsl-dis-max-query-ex-request]] +==== Example request [source,js] --------------------------------------------------- +---- GET /_search { "query": { "dis_max" : { - "tie_breaker" : 0.7, - "boost" : 1.2, "queries" : [ - { - "term" : { "age" : 34 } - }, - { - "term" : { "age" : 35 } - } - ] + { "term" : { "title" : "Quick pets" }}, + { "term" : { "body" : "Quick pets" }} + ], + "tie_breaker" : 0.7 } } } --------------------------------------------------- +---- // CONSOLE + +[[query-dsl-dis-max-query-top-level-params]] +==== Top-level parameters for `dis_max` + +`queries` (Required):: +(array of query objects) Contains one or more query clauses. Returned documents +**must match one or more** of these queries. If a document matches multiple +queries, {es} uses the highest <>. + +`tie_breaker` (Optional):: ++ +-- +(float) Floating point number between `0` and `1.0` used to increase the +<> of documents matching multiple query +clauses. Defaults to `0.0`. + +You can use the `tie_breaker` value to assign higher relevance scores to +documents that contain the same term in multiple fields than documents that +contain this term in only the best of those multiple fields, without confusing +this with the better case of two different terms in the multiple fields. + +If a document matches multiple clauses, the `dis_max` query calculates the +relevance score for the document as follows: + +. Take the relevance score from a matching clause with the highest score. +. Multiply the score from any other matching clauses by the `tie_breaker` value. +. Add the highest score to the multiplied scores. + +If the `tie_breaker` value is greater than `0.0`, all matching clauses count, +but the clause with the highest score counts most. +-- \ No newline at end of file diff --git a/docs/reference/query-dsl/script-score-query.asciidoc b/docs/reference/query-dsl/script-score-query.asciidoc index dd68d188460..610ecacfe90 100644 --- a/docs/reference/query-dsl/script-score-query.asciidoc +++ b/docs/reference/query-dsl/script-score-query.asciidoc @@ -186,8 +186,7 @@ a vector function is executed, 0 is returned as a result for this document. NOTE: If a document's dense vector field has a number of dimensions -different from the query's vector, 0 is used for missing dimensions -in the calculations of vector functions. +different from the query's vector, an error will be thrown. [[random-score-function]] diff --git a/docs/reference/rest-api/info.asciidoc b/docs/reference/rest-api/info.asciidoc index 9fc44d12e6f..f6a4c3d4454 100644 --- a/docs/reference/rest-api/info.asciidoc +++ b/docs/reference/rest-api/info.asciidoc @@ -71,6 +71,10 @@ Example response: "available" : true, "enabled" : true }, + "flattened" : { + "available" : true, + "enabled" : true + }, "graph" : { "available" : true, "enabled" : true diff --git a/docs/reference/security/securing-communications/configuring-tls-docker.asciidoc b/docs/reference/security/securing-communications/configuring-tls-docker.asciidoc index 1d23430e37e..2190f17e458 100644 --- a/docs/reference/security/securing-communications/configuring-tls-docker.asciidoc +++ b/docs/reference/security/securing-communications/configuring-tls-docker.asciidoc @@ -43,11 +43,13 @@ instances: `.env`: [source,yaml] ---- -CERTS_DIR=/usr/share/elasticsearch/config/certificates <1> -ELASTIC_PASSWORD=PleaseChangeMe <2> +COMPOSE_PROJECT_NAME=es <1> +CERTS_DIR=/usr/share/elasticsearch/config/certificates <2> +ELASTIC_PASSWORD=PleaseChangeMe <3> ---- -<1> The path, inside the Docker image, where certificates are expected to be found. -<2> Initial password for the `elastic` user. +<1> Use an `es_` prefix for all volumes and networks created by docker-compose. +<2> The path, inside the Docker image, where certificates are expected to be found. +<3> Initial password for the `elastic` user. [[getting-starter-tls-create-certs-composefile]] `create-certs.yml`: @@ -69,21 +71,21 @@ services: image: {docker-image} command: > bash -c ' - if [[ ! -d config/certificates/certs ]]; then - mkdir config/certificates/certs; + yum install -y -q -e 0 unzip; + if [[ ! -f /certs/bundle.zip ]]; then + bin/elasticsearch-certutil cert --silent --pem --in config/certificates/instances.yml -out /certs/bundle.zip; + unzip /certs/bundle.zip -d /certs; <1> fi; - if [[ ! -f /local/certs/bundle.zip ]]; then - bin/elasticsearch-certgen --silent --in config/certificates/instances.yml --out config/certificates/certs/bundle.zip; - unzip config/certificates/certs/bundle.zip -d config/certificates/certs; <1> - fi; - chgrp -R 0 config/certificates/certs + chown -R 1000:0 /certs ' - user: $\{UID:-1000\} + user: "0" working_dir: /usr/share/elasticsearch - volumes: ['.:/usr/share/elasticsearch/config/certificates'] + volumes: ['certs:/certs', '.:/usr/share/elasticsearch/config/certificates'] + +volumes: {"certs"} ---- -<1> The new node certificates and CA certificate+key are placed under the local directory `certs`. +<1> The new node certificates and CA certificate+key are placed in a docker volume `es_certs`. endif::[] [[getting-starter-tls-create-docker-compose]] @@ -106,7 +108,7 @@ services: image: {docker-image} environment: - node.name=es01 - - discovery.seed_hosts=es02 + - discovery.seed_hosts=es01,es02 - cluster.initial_master_nodes=es01,es02 - ELASTIC_PASSWORD=$ELASTIC_PASSWORD <1> - "ES_JAVA_OPTS=-Xms512m -Xmx512m" @@ -121,7 +123,7 @@ services: - xpack.security.transport.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt - xpack.security.transport.ssl.certificate=$CERTS_DIR/es01/es01.crt - xpack.security.transport.ssl.key=$CERTS_DIR/es01/es01.key - volumes: ['esdata_01:/usr/share/elasticsearch/data', './certs:$CERTS_DIR'] + volumes: ['data01:/usr/share/elasticsearch/data', 'certs:$CERTS_DIR'] ports: - 9200:9200 healthcheck: @@ -135,7 +137,7 @@ services: image: {docker-image} environment: - node.name=es02 - - discovery.seed_hosts=es01 + - discovery.seed_hosts=es01,es02 - cluster.initial_master_nodes=es01,es02 - ELASTIC_PASSWORD=$ELASTIC_PASSWORD - "ES_JAVA_OPTS=-Xms512m -Xmx512m" @@ -150,14 +152,14 @@ services: - xpack.security.transport.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt - xpack.security.transport.ssl.certificate=$CERTS_DIR/es02/es02.crt - xpack.security.transport.ssl.key=$CERTS_DIR/es02/es02.key - volumes: ['esdata_02:/usr/share/elasticsearch/data', './certs:$CERTS_DIR'] + volumes: ['data02:/usr/share/elasticsearch/data', 'certs:$CERTS_DIR'] wait_until_ready: image: {docker-image} command: /usr/bin/true depends_on: {"es01": {"condition": "service_healthy"}} -volumes: {"esdata_01": {"driver": "local"}, "esdata_02": {"driver": "local"}} +volumes: {"data01", "data02", "certs"} ---- <1> Bootstrap `elastic` with the password defined in `.env`. See @@ -175,7 +177,7 @@ endif::[] -- ["source","sh"] ---- -docker-compose -f create-certs.yml up +docker-compose -f create-certs.yml run --rm create_certs ---- -- . Start two {es} nodes configured for SSL/TLS: @@ -189,9 +191,9 @@ docker-compose up -d . Access the {es} API over SSL/TLS using the bootstrapped password: + -- -["source","sh"] +["source","sh",subs="attributes"] ---- -curl --cacert certs/ca/ca.crt -u elastic:PleaseChangeMe https://localhost:9200 +docker run --rm -v es_certs:/certs --network=es_default {docker-image} curl --cacert /certs/ca/ca.crt -u elastic:PleaseChangeMe https://es01:9200 ---- // NOTCONSOLE -- @@ -210,3 +212,13 @@ auto --batch \ --url https://localhost:9200" ---- -- + +[float] +==== Tear everything down +To remove all the Docker resources created by the example, issue: +-- +["source","sh"] +---- +docker-compose down -v +---- +-- diff --git a/docs/reference/security/securing-communications/separating-node-client-traffic.asciidoc b/docs/reference/security/securing-communications/separating-node-client-traffic.asciidoc index 2eab8e0ae5a..9acb0e9b3ef 100644 --- a/docs/reference/security/securing-communications/separating-node-client-traffic.asciidoc +++ b/docs/reference/security/securing-communications/separating-node-client-traffic.asciidoc @@ -2,6 +2,8 @@ [[separating-node-client-traffic]] === Separating node-to-node and client traffic +deprecated[7.3.0, Transport Client is deprecated and will be removed] + Elasticsearch has the feature of so called {ref}/modules-transport.html[TCP transport profiles] that allows it to bind to several ports and addresses. The {es} diff --git a/docs/reference/setup/important-settings/discovery-settings.asciidoc b/docs/reference/setup/important-settings/discovery-settings.asciidoc index 245852b2096..942c076a33a 100644 --- a/docs/reference/setup/important-settings/discovery-settings.asciidoc +++ b/docs/reference/setup/important-settings/discovery-settings.asciidoc @@ -39,9 +39,9 @@ first election. In <>, with no discovery settings configured, this step is automatically performed by the nodes themselves. As this auto-bootstrapping is <>, when you start a brand new cluster in <>, you must explicitly list the names or IP addresses of the -master-eligible nodes whose votes should be counted in the very first election. -This list is set using the `cluster.initial_master_nodes` setting. +mode>>, you must explicitly list the master-eligible nodes whose votes should be +counted in the very first election. This list is set using the +`cluster.initial_master_nodes` setting. [source,yaml] -------------------------------------------------- diff --git a/docs/reference/sql/functions/geo.asciidoc b/docs/reference/sql/functions/geo.asciidoc index 72f69af8552..fc9a85ce97e 100644 --- a/docs/reference/sql/functions/geo.asciidoc +++ b/docs/reference/sql/functions/geo.asciidoc @@ -147,7 +147,7 @@ ST_Y( .Description: -Returns the the latitude of the first point in the geometry. +Returns the latitude of the first point in the geometry. ["source","sql",subs="attributes,macros"] -------------------------------------------------- @@ -206,4 +206,4 @@ Returns the distance between geometries in meters. Both geometries have to be po ["source","sql",subs="attributes,macros"] -------------------------------------------------- include-tagged::{sql-specs}/docs/geo.csv-spec[distance] --------------------------------------------------- \ No newline at end of file +-------------------------------------------------- diff --git a/docs/reference/upgrade/cluster_restart.asciidoc b/docs/reference/upgrade/cluster_restart.asciidoc index 50e70c08ded..2682b46230e 100644 --- a/docs/reference/upgrade/cluster_restart.asciidoc +++ b/docs/reference/upgrade/cluster_restart.asciidoc @@ -5,7 +5,7 @@ To upgrade directly to {es} {version} from versions 6.0-6.7, you must shut down all nodes in the cluster, upgrade each node to {version}, and restart the cluster. NOTE: If you are running a version prior to 6.0, -https://www.elastic.co/guide/en/elastic-stack/6.8/upgrading-elastic-stack.html[upgrade to 6.8] +{stack-ref-68}/upgrading-elastic-stack.html[upgrade to 6.8] and reindex your old indices or bring up a new {version} cluster and <>. diff --git a/docs/reference/upgrade/reindex_upgrade.asciidoc b/docs/reference/upgrade/reindex_upgrade.asciidoc index 7fc6c320263..faa8fbc1639 100644 --- a/docs/reference/upgrade/reindex_upgrade.asciidoc +++ b/docs/reference/upgrade/reindex_upgrade.asciidoc @@ -36,7 +36,7 @@ been deleted. [[reindex-upgrade-inplace]] === Reindex in place -You can use the Upgrade Assistant in {kib} 6.7 to automatically reindex 5.x +You can use the Upgrade Assistant in {kib} 6.8 to automatically reindex 5.x indices you need to carry forward to {version}. To manually reindex your old indices in place: @@ -103,7 +103,7 @@ endif::include-xpack[] You can use <> to migrate indices from your old cluster to a new {version} cluster. This enables you move to {version} -from a pre-6.7 cluster without interrupting service. +from a pre-6.8 cluster without interrupting service. [WARNING] ============================================= @@ -196,4 +196,4 @@ monitor progress of the reindex job with the <>: `30s` and `1`). .. Once reindexing is complete and the status of the new index is `green`, - you can delete the old index. \ No newline at end of file + you can delete the old index. diff --git a/docs/reference/upgrade/rolling_upgrade.asciidoc b/docs/reference/upgrade/rolling_upgrade.asciidoc index f1a7e2da58b..56f047807e0 100644 --- a/docs/reference/upgrade/rolling_upgrade.asciidoc +++ b/docs/reference/upgrade/rolling_upgrade.asciidoc @@ -10,7 +10,7 @@ running the older version. Rolling upgrades are supported: * Between minor versions -* https://www.elastic.co/guide/en/elastic-stack/6.8/upgrading-elastic-stack.html[From 5.6 to 6.8] +* {stack-ref-68}/upgrading-elastic-stack.html[From 5.6 to 6.8] * From 6.8 to {version} Upgrading directly to {version} from 6.7 or earlier requires a diff --git a/gradle.properties b/gradle.properties index 491770edd7c..63b1dc3cd72 100644 --- a/gradle.properties +++ b/gradle.properties @@ -1,3 +1,3 @@ org.gradle.daemon=true -org.gradle.jvmargs=-Xmx2g -XX:+HeapDumpOnOutOfMemoryError -Xss2m +org.gradle.jvmargs=-Xmx3g -XX:+HeapDumpOnOutOfMemoryError -Xss2m options.forkOptions.memoryMaximumSize=2g diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 47216b872e4..9fabbb9bd78 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,6 +1,6 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-5.4.1-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-5.5-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=14cd15fc8cc8705bd69dcfa3c8fefb27eb7027f5de4b47a8b279218f76895a91 +distributionSha256Sum=302b7df46730ce75c582542c056c9bf5cac2b94fbf2cc656d0e37e41e8a5d371 diff --git a/gradlew b/gradlew index b0d6d0ab5de..8e25e6c19d5 100755 --- a/gradlew +++ b/gradlew @@ -7,7 +7,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/gradlew.bat b/gradlew.bat index 15e1ee37a70..24467a141f7 100644 --- a/gradlew.bat +++ b/gradlew.bat @@ -5,7 +5,7 @@ @rem you may not use this file except in compliance with the License. @rem You may obtain a copy of the License at @rem -@rem http://www.apache.org/licenses/LICENSE-2.0 +@rem https://www.apache.org/licenses/LICENSE-2.0 @rem @rem Unless required by applicable law or agreed to in writing, software @rem distributed under the License is distributed on an "AS IS" BASIS, diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CharGroupTokenizerFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CharGroupTokenizerFactory.java index 4920b7daae8..35770be0b01 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CharGroupTokenizerFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CharGroupTokenizerFactory.java @@ -39,7 +39,7 @@ public class CharGroupTokenizerFactory extends AbstractTokenizerFactory{ private boolean tokenizeOnSymbol = false; public CharGroupTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - super(indexSettings, settings); + super(indexSettings, settings, name); for (final String c : settings.getAsList("tokenize_on_chars")) { if (c == null || c.length() == 0) { diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ClassicTokenizerFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ClassicTokenizerFactory.java index 27316f4cde5..d522f09c39c 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ClassicTokenizerFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ClassicTokenizerFactory.java @@ -35,7 +35,7 @@ public class ClassicTokenizerFactory extends AbstractTokenizerFactory { private final int maxTokenLength; ClassicTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - super(indexSettings, settings); + super(indexSettings, settings, name); maxTokenLength = settings.getAsInt("max_token_length", StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH); } diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerFactory.java index 9bb17abf0cd..2a366513f4e 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerFactory.java @@ -36,7 +36,7 @@ public class EdgeNGramTokenizerFactory extends AbstractTokenizerFactory { private final CharMatcher matcher; EdgeNGramTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - super(indexSettings, settings); + super(indexSettings, settings, name); this.minGram = settings.getAsInt("min_gram", NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE); this.maxGram = settings.getAsInt("max_gram", NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE); this.matcher = parseTokenChars(settings.getAsList("token_chars")); diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/KeywordTokenizerFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/KeywordTokenizerFactory.java index e4bf2c8c4ad..a63947a0fe3 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/KeywordTokenizerFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/KeywordTokenizerFactory.java @@ -31,7 +31,7 @@ public class KeywordTokenizerFactory extends AbstractTokenizerFactory { private final int bufferSize; KeywordTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - super(indexSettings, settings); + super(indexSettings, settings, name); bufferSize = settings.getAsInt("buffer_size", 256); } diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LetterTokenizerFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LetterTokenizerFactory.java index cba30cb63c3..658f840bf8a 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LetterTokenizerFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LetterTokenizerFactory.java @@ -29,7 +29,7 @@ import org.elasticsearch.index.analysis.AbstractTokenizerFactory; public class LetterTokenizerFactory extends AbstractTokenizerFactory { LetterTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - super(indexSettings, settings); + super(indexSettings, settings, name); } @Override diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/NGramTokenizerFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/NGramTokenizerFactory.java index e811d0fbc4b..ff3b27b021e 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/NGramTokenizerFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/NGramTokenizerFactory.java @@ -85,7 +85,7 @@ public class NGramTokenizerFactory extends AbstractTokenizerFactory { } NGramTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - super(indexSettings, settings); + super(indexSettings, settings, name); int maxAllowedNgramDiff = indexSettings.getMaxNgramDiff(); this.minGram = settings.getAsInt("min_gram", NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE); this.maxGram = settings.getAsInt("max_gram", NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE); diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PathHierarchyTokenizerFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PathHierarchyTokenizerFactory.java index 5b966c1c3b8..5b010390bf3 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PathHierarchyTokenizerFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PathHierarchyTokenizerFactory.java @@ -37,7 +37,7 @@ public class PathHierarchyTokenizerFactory extends AbstractTokenizerFactory { private final boolean reverse; PathHierarchyTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - super(indexSettings, settings); + super(indexSettings, settings, name); bufferSize = settings.getAsInt("buffer_size", 1024); String delimiter = settings.get("delimiter"); if (delimiter == null) { diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PatternTokenizerFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PatternTokenizerFactory.java index 11ba7e44db0..2abb4e8ed0e 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PatternTokenizerFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PatternTokenizerFactory.java @@ -35,7 +35,7 @@ public class PatternTokenizerFactory extends AbstractTokenizerFactory { private final int group; PatternTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - super(indexSettings, settings); + super(indexSettings, settings, name); String sPattern = settings.get("pattern", "\\W+" /*PatternAnalyzer.NON_WORD_PATTERN*/); if (sPattern == null) { diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SimplePatternSplitTokenizerFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SimplePatternSplitTokenizerFactory.java index 0faf4078295..2c68b7ba2f7 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SimplePatternSplitTokenizerFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SimplePatternSplitTokenizerFactory.java @@ -31,7 +31,7 @@ public class SimplePatternSplitTokenizerFactory extends AbstractTokenizerFactory private final String pattern; public SimplePatternSplitTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - super(indexSettings, settings); + super(indexSettings, settings, name); pattern = settings.get("pattern", ""); } diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SimplePatternTokenizerFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SimplePatternTokenizerFactory.java index 67aee333d0f..a2148a61a2b 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SimplePatternTokenizerFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SimplePatternTokenizerFactory.java @@ -31,7 +31,7 @@ public class SimplePatternTokenizerFactory extends AbstractTokenizerFactory { private final String pattern; public SimplePatternTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - super(indexSettings, settings); + super(indexSettings, settings, name); pattern = settings.get("pattern", ""); } diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SynonymTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SynonymTokenFilterFactory.java index 5d6135549b8..4e299850ff1 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SynonymTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SynonymTokenFilterFactory.java @@ -116,7 +116,7 @@ public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory { Analyzer buildSynonymAnalyzer(TokenizerFactory tokenizer, List charFilters, List tokenFilters, Function allFilters) { - return new CustomAnalyzer("synonyms", tokenizer, charFilters.toArray(new CharFilterFactory[0]), + return new CustomAnalyzer(tokenizer, charFilters.toArray(new CharFilterFactory[0]), tokenFilters.stream() .map(TokenFilterFactory::getSynonymFilter) .toArray(TokenFilterFactory[]::new)); diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ThaiTokenizerFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ThaiTokenizerFactory.java index 861ade079a0..04fbe949bb0 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ThaiTokenizerFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ThaiTokenizerFactory.java @@ -32,7 +32,7 @@ import org.elasticsearch.index.analysis.AbstractTokenizerFactory; public class ThaiTokenizerFactory extends AbstractTokenizerFactory { ThaiTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - super(indexSettings, settings); + super(indexSettings, settings, name); } @Override diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/UAX29URLEmailTokenizerFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/UAX29URLEmailTokenizerFactory.java index cd02eec24b4..19a0f500cd9 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/UAX29URLEmailTokenizerFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/UAX29URLEmailTokenizerFactory.java @@ -32,7 +32,7 @@ public class UAX29URLEmailTokenizerFactory extends AbstractTokenizerFactory { private final int maxTokenLength; UAX29URLEmailTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - super(indexSettings, settings); + super(indexSettings, settings, name); maxTokenLength = settings.getAsInt("max_token_length", StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH); } diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/WhitespaceTokenizerFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/WhitespaceTokenizerFactory.java index 7ce6a361cba..9b962675830 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/WhitespaceTokenizerFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/WhitespaceTokenizerFactory.java @@ -34,7 +34,7 @@ public class WhitespaceTokenizerFactory extends AbstractTokenizerFactory { private Integer maxTokenLength; WhitespaceTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - super(indexSettings, settings); + super(indexSettings, settings, name); maxTokenLength = settings.getAsInt(MAX_TOKEN_LENGTH, StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH); } diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/XLowerCaseTokenizerFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/XLowerCaseTokenizerFactory.java index 4cd5b07fe48..f14a64487a9 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/XLowerCaseTokenizerFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/XLowerCaseTokenizerFactory.java @@ -30,7 +30,7 @@ import org.elasticsearch.index.analysis.AbstractTokenizerFactory; public class XLowerCaseTokenizerFactory extends AbstractTokenizerFactory { public XLowerCaseTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - super(indexSettings, settings); + super(indexSettings, settings, name); } @Override diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml index 460bc8ecf83..3bca0e1b950 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml @@ -19,7 +19,7 @@ tokenizer: type: keyword - length: { detail.tokenizer.tokens: 1 } - - match: { detail.tokenizer.name: _anonymous_tokenizer } + - match: { detail.tokenizer.name: __anonymous__keyword } - match: { detail.tokenizer.tokens.0.token: Foo Bar! } --- @@ -48,7 +48,7 @@ type: simple_pattern pattern: "[abcdef0123456789]{4}" - length: { detail.tokenizer.tokens: 2 } - - match: { detail.tokenizer.name: _anonymous_tokenizer } + - match: { detail.tokenizer.name: __anonymous__simple_pattern } - match: { detail.tokenizer.tokens.0.token: a6bf } - match: { detail.tokenizer.tokens.1.token: ff61 } @@ -63,7 +63,7 @@ type: simple_pattern_split pattern: == - length: { detail.tokenizer.tokens: 2 } - - match: { detail.tokenizer.name: _anonymous_tokenizer } + - match: { detail.tokenizer.name: __anonymous__simple_pattern_split } - match: { detail.tokenizer.tokens.0.token: foo } - match: { detail.tokenizer.tokens.1.token: bar } @@ -77,7 +77,7 @@ tokenizer: type: thai - length: { detail.tokenizer.tokens: 2 } - - match: { detail.tokenizer.name: _anonymous_tokenizer } + - match: { detail.tokenizer.name: __anonymous__thai } - match: { detail.tokenizer.tokens.0.token: ภาษา } - match: { detail.tokenizer.tokens.1.token: ไทย } @@ -104,7 +104,7 @@ min_gram: 3 max_gram: 3 - length: { detail.tokenizer.tokens: 4 } - - match: { detail.tokenizer.name: _anonymous_tokenizer } + - match: { detail.tokenizer.name: __anonymous__ngram } - match: { detail.tokenizer.tokens.0.token: foo } - match: { detail.tokenizer.tokens.1.token: oob } - match: { detail.tokenizer.tokens.2.token: oba } @@ -120,7 +120,7 @@ min_gram: 3 max_gram: 3 - length: { detail.tokenizer.tokens: 4 } - - match: { detail.tokenizer.name: _anonymous_tokenizer } + - match: { detail.tokenizer.name: __anonymous__ngram } - match: { detail.tokenizer.tokens.0.token: foo } - match: { detail.tokenizer.tokens.1.token: oob } - match: { detail.tokenizer.tokens.2.token: oba } @@ -166,7 +166,7 @@ min_gram: 1 max_gram: 3 - length: { detail.tokenizer.tokens: 3 } - - match: { detail.tokenizer.name: _anonymous_tokenizer } + - match: { detail.tokenizer.name: __anonymous__edge_ngram } - match: { detail.tokenizer.tokens.0.token: f } - match: { detail.tokenizer.tokens.1.token: fo } - match: { detail.tokenizer.tokens.2.token: foo } @@ -181,7 +181,7 @@ min_gram: 1 max_gram: 3 - length: { detail.tokenizer.tokens: 3 } - - match: { detail.tokenizer.name: _anonymous_tokenizer } + - match: { detail.tokenizer.name: __anonymous__edge_ngram } - match: { detail.tokenizer.tokens.0.token: f } - match: { detail.tokenizer.tokens.1.token: fo } - match: { detail.tokenizer.tokens.2.token: foo } @@ -218,7 +218,7 @@ tokenizer: type: classic - length: { detail.tokenizer.tokens: 4 } - - match: { detail.tokenizer.name: _anonymous_tokenizer } + - match: { detail.tokenizer.name: __anonymous__classic } - match: { detail.tokenizer.tokens.0.token: Brown } - match: { detail.tokenizer.tokens.1.token: Foxes } - match: { detail.tokenizer.tokens.2.token: don't } @@ -247,7 +247,7 @@ tokenizer: type: letter - length: { detail.tokenizer.tokens: 5 } - - match: { detail.tokenizer.name: _anonymous_tokenizer } + - match: { detail.tokenizer.name: __anonymous__letter } - match: { detail.tokenizer.tokens.0.token: Brown } - match: { detail.tokenizer.tokens.1.token: Foxes } - match: { detail.tokenizer.tokens.2.token: don } @@ -278,7 +278,7 @@ tokenizer: type: lowercase - length: { detail.tokenizer.tokens: 5 } - - match: { detail.tokenizer.name: _anonymous_tokenizer } + - match: { detail.tokenizer.name: __anonymous__lowercase } - match: { detail.tokenizer.tokens.0.token: brown } - match: { detail.tokenizer.tokens.1.token: foxes } - match: { detail.tokenizer.tokens.2.token: don } @@ -309,7 +309,7 @@ tokenizer: type: path_hierarchy - length: { detail.tokenizer.tokens: 3 } - - match: { detail.tokenizer.name: _anonymous_tokenizer } + - match: { detail.tokenizer.name: __anonymous__path_hierarchy } - match: { detail.tokenizer.tokens.0.token: a } - match: { detail.tokenizer.tokens.1.token: a/b } - match: { detail.tokenizer.tokens.2.token: a/b/c } @@ -322,7 +322,7 @@ tokenizer: type: PathHierarchy - length: { detail.tokenizer.tokens: 3 } - - match: { detail.tokenizer.name: _anonymous_tokenizer } + - match: { detail.tokenizer.name: __anonymous__PathHierarchy } - match: { detail.tokenizer.tokens.0.token: a } - match: { detail.tokenizer.tokens.1.token: a/b } - match: { detail.tokenizer.tokens.2.token: a/b/c } @@ -361,7 +361,7 @@ tokenizer: type: pattern - length: { detail.tokenizer.tokens: 5 } - - match: { detail.tokenizer.name: _anonymous_tokenizer } + - match: { detail.tokenizer.name: __anonymous__pattern } - match: { detail.tokenizer.tokens.0.token: split } - match: { detail.tokenizer.tokens.1.token: by } - match: { detail.tokenizer.tokens.2.token: whitespace } @@ -392,7 +392,7 @@ tokenizer: type: uax_url_email - length: { detail.tokenizer.tokens: 4 } - - match: { detail.tokenizer.name: _anonymous_tokenizer } + - match: { detail.tokenizer.name: __anonymous__uax_url_email } - match: { detail.tokenizer.tokens.0.token: Email } - match: { detail.tokenizer.tokens.1.token: me } - match: { detail.tokenizer.tokens.2.token: at } @@ -421,7 +421,7 @@ tokenizer: type: whitespace - length: { detail.tokenizer.tokens: 3 } - - match: { detail.tokenizer.name: _anonymous_tokenizer } + - match: { detail.tokenizer.name: __anonymous__whitespace } - match: { detail.tokenizer.tokens.0.token: split } - match: { detail.tokenizer.tokens.1.token: by } - match: { detail.tokenizer.tokens.2.token: whitespace } diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml index 56bbed7044e..c8bc209a732 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml @@ -106,7 +106,7 @@ - length: { detail.tokenizer.tokens: 1 } - length: { detail.tokenfilters.0.tokens: 1 } - - match: { detail.tokenizer.name: keyword_for_normalizer } + - match: { detail.tokenizer.name: keyword } - match: { detail.tokenizer.tokens.0.token: ABc } - match: { detail.tokenfilters.0.name: lowercase } - match: { detail.tokenfilters.0.tokens.0.token: abc } diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java index ee2f49390b8..f6eadab8014 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java @@ -22,7 +22,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.StreamableResponseAction; +import org.elasticsearch.action.StreamableResponseActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.client.node.NodeClient; @@ -45,7 +45,7 @@ import java.util.Map; import static org.elasticsearch.ingest.common.IngestCommonPlugin.GROK_PATTERNS; import static org.elasticsearch.rest.RestRequest.Method.GET; -public class GrokProcessorGetAction extends StreamableResponseAction { +public class GrokProcessorGetAction extends StreamableResponseActionType { static final GrokProcessorGetAction INSTANCE = new GrokProcessorGetAction(); static final String NAME = "cluster:admin/ingest/processor/grok/get"; diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/200_default_pipeline.yml b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/200_default_pipeline.yml index 86f4821ddaa..3d100fad3b0 100644 --- a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/200_default_pipeline.yml +++ b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/200_default_pipeline.yml @@ -119,6 +119,12 @@ teardown: {"doc":{"bytes_source_field":"2kb"}, "doc_as_upsert":true} {"update":{"_id":"8","_index":"test"}} {"script": "ctx._source.ran_script = true","upsert":{"bytes_source_field":"3kb"}, "scripted_upsert" : true} + {"update":{"_id":"6_alias","_index":"test_alias"}} + {"script":"ctx._source.ran_script = true","upsert":{"bytes_source_field":"1kb"}} + {"update":{"_id":"7_alias","_index":"test_alias"}} + {"doc":{"bytes_source_field":"2kb"}, "doc_as_upsert":true} + {"update":{"_id":"8_alias","_index":"test_alias"}} + {"script": "ctx._source.ran_script = true","upsert":{"bytes_source_field":"3kb"}, "scripted_upsert" : true} - do: mget: @@ -127,6 +133,9 @@ teardown: - { _index: "test", _id: "6" } - { _index: "test", _id: "7" } - { _index: "test", _id: "8" } + - { _index: "test", _id: "6_alias" } + - { _index: "test", _id: "7_alias" } + - { _index: "test", _id: "8_alias" } - match: { docs.0._index: "test" } - match: { docs.0._id: "6" } - match: { docs.0._source.bytes_source_field: "1kb" } @@ -141,6 +150,20 @@ teardown: - match: { docs.2._source.bytes_source_field: "3kb" } - match: { docs.2._source.bytes_target_field: 3072 } - match: { docs.2._source.ran_script: true } + - match: { docs.3._index: "test" } + - match: { docs.3._id: "6_alias" } + - match: { docs.3._source.bytes_source_field: "1kb" } + - match: { docs.3._source.bytes_target_field: 1024 } + - is_false: docs.3._source.ran_script + - match: { docs.4._index: "test" } + - match: { docs.4._id: "7_alias" } + - match: { docs.4._source.bytes_source_field: "2kb" } + - match: { docs.4._source.bytes_target_field: 2048 } + - match: { docs.5._index: "test" } + - match: { docs.5._id: "8_alias" } + - match: { docs.5._source.bytes_source_field: "3kb" } + - match: { docs.5._source.bytes_target_field: 3072 } + - match: { docs.5._source.ran_script: true } # explicit no default pipeline - do: diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateAction.java index 573c5888991..a096a89951e 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateAction.java @@ -19,10 +19,10 @@ package org.elasticsearch.script.mustache; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionType; import org.elasticsearch.common.io.stream.Writeable; -public class MultiSearchTemplateAction extends Action { +public class MultiSearchTemplateAction extends ActionType { public static final MultiSearchTemplateAction INSTANCE = new MultiSearchTemplateAction(); public static final String NAME = "indices:data/read/msearch/template"; diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateAction.java index 7bd57154e26..2e8417c9939 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateAction.java @@ -19,10 +19,10 @@ package org.elasticsearch.script.mustache; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionType; import org.elasticsearch.common.io.stream.Writeable; -public class SearchTemplateAction extends Action { +public class SearchTemplateAction extends ActionType { public static final SearchTemplateAction INSTANCE = new SearchTemplateAction(); public static final String NAME = "indices:data/read/search/template"; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextAction.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextAction.java index 4abad4d78af..5e941a2d0e2 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextAction.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextAction.java @@ -19,7 +19,7 @@ package org.elasticsearch.painless.action; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; @@ -64,7 +64,7 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; * retrieves all available information about the API for this specific context * */ -public class PainlessContextAction extends Action { +public class PainlessContextAction extends ActionType { public static final PainlessContextAction INSTANCE = new PainlessContextAction(); private static final String NAME = "cluster:admin/scripts/painless/context"; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java index bc34c90ad52..93e24816ed7 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java @@ -31,7 +31,7 @@ import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.store.RAMDirectory; import org.elasticsearch.Version; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.support.ActionFilters; @@ -92,7 +92,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; -public class PainlessExecuteAction extends Action { +public class PainlessExecuteAction extends ActionType { public static final PainlessExecuteAction INSTANCE = new PainlessExecuteAction(); private static final String NAME = "cluster:admin/scripts/painless/execute"; diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DateTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DateTimeTests.java similarity index 89% rename from modules/lang-painless/src/test/java/org/elasticsearch/painless/DateTests.java rename to modules/lang-painless/src/test/java/org/elasticsearch/painless/DateTimeTests.java index 58357cce3ac..38ceae74e05 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DateTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DateTimeTests.java @@ -22,7 +22,7 @@ package org.elasticsearch.painless; import java.time.ZoneId; import java.time.ZonedDateTime; -public class DateTests extends ScriptTestCase { +public class DateTimeTests extends ScriptTestCase { public void testLongToZonedDateTime() { assertEquals(ZonedDateTime.of(1983, 10, 13, 22, 15, 30, 0, ZoneId.of("Z")), exec( @@ -177,4 +177,18 @@ public class DateTests extends ScriptTestCase { "return zdt1.isAfter(zdt2);" )); } + + public void testTimeZone() { + assertEquals(ZonedDateTime.of(1983, 10, 13, 15, 15, 30, 0, ZoneId.of("America/Los_Angeles")), exec( + "ZonedDateTime utc = ZonedDateTime.of(1983, 10, 13, 22, 15, 30, 0, ZoneId.of('Z'));" + + "return utc.withZoneSameInstant(ZoneId.of('America/Los_Angeles'));")); + + assertEquals("Thu, 13 Oct 1983 15:15:30 -0700", exec( + "String gmtString = 'Thu, 13 Oct 1983 22:15:30 GMT';" + + "ZonedDateTime gmtZdt = ZonedDateTime.parse(gmtString," + + "DateTimeFormatter.RFC_1123_DATE_TIME);" + + "ZonedDateTime pstZdt =" + + "gmtZdt.withZoneSameInstant(ZoneId.of('America/Los_Angeles'));" + + "return pstZdt.format(DateTimeFormatter.RFC_1123_DATE_TIME);")); + } } diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalAction.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalAction.java index 664377786f8..07de8c8a22c 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalAction.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.index.rankeval; -import org.elasticsearch.action.StreamableResponseAction; +import org.elasticsearch.action.StreamableResponseActionType; /** - * Action for explaining evaluating search ranking results. + * ActionType for explaining evaluating search ranking results. */ -public class RankEvalAction extends StreamableResponseAction { +public class RankEvalAction extends StreamableResponseActionType { public static final RankEvalAction INSTANCE = new RankEvalAction(); public static final String NAME = "indices:data/read/rank_eval"; diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequestBuilder.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequestBuilder.java index 4108a817f04..1bfb576c379 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequestBuilder.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequestBuilder.java @@ -19,13 +19,13 @@ package org.elasticsearch.index.rankeval; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; public class RankEvalRequestBuilder extends ActionRequestBuilder { - public RankEvalRequestBuilder(ElasticsearchClient client, Action action, + public RankEvalRequestBuilder(ElasticsearchClient client, ActionType action, RankEvalRequest request) { super(client, action, request); } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBaseReindexRestHandler.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBaseReindexRestHandler.java index 8111aac3945..63451abb7cc 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBaseReindexRestHandler.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBaseReindexRestHandler.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.reindex; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.client.node.NodeClient; @@ -38,7 +38,7 @@ import java.util.Map; public abstract class AbstractBaseReindexRestHandler< Request extends AbstractBulkByScrollRequest, - A extends Action + A extends ActionType > extends BaseRestHandler { private final A action; diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByQueryRestHandler.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByQueryRestHandler.java index 240ccde3505..834703cddc7 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByQueryRestHandler.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByQueryRestHandler.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.reindex; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; @@ -39,7 +39,7 @@ import java.util.function.Consumer; */ public abstract class AbstractBulkByQueryRestHandler< Request extends AbstractBulkByScrollRequest, - A extends Action> extends AbstractBaseReindexRestHandler { + A extends ActionType> extends AbstractBaseReindexRestHandler { protected AbstractBulkByQueryRestHandler(Settings settings, A action) { super(settings, action); diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollParallelizationHelper.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollParallelizationHelper.java index dfb8deac58c..6df73414e81 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollParallelizationHelper.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollParallelizationHelper.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.reindex; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; @@ -61,7 +61,7 @@ class BulkByScrollParallelizationHelper { static > void startSlicedAction( Request request, BulkByScrollTask task, - Action action, + ActionType action, ActionListener listener, Client client, DiscoveryNode node, @@ -85,7 +85,7 @@ class BulkByScrollParallelizationHelper { private static > void sliceConditionally( Request request, BulkByScrollTask task, - Action action, + ActionType action, ActionListener listener, Client client, DiscoveryNode node, @@ -118,7 +118,7 @@ class BulkByScrollParallelizationHelper { private static > void sendSubRequests( Client client, - Action action, + ActionType action, String localNodeId, BulkByScrollTask task, Request request, diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RethrottleAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RethrottleAction.java index 513b4261bdf..cf04d6d856d 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RethrottleAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RethrottleAction.java @@ -19,11 +19,11 @@ package org.elasticsearch.index.reindex; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.common.io.stream.Writeable; -public class RethrottleAction extends Action { +public class RethrottleAction extends ActionType { public static final RethrottleAction INSTANCE = new RethrottleAction(); public static final String NAME = "cluster:admin/reindex/rethrottle"; diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RethrottleRequestBuilder.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RethrottleRequestBuilder.java index 25407e6dc93..648eb6e441b 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RethrottleRequestBuilder.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RethrottleRequestBuilder.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.reindex; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.support.tasks.TasksRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; @@ -29,7 +29,7 @@ import org.elasticsearch.client.ElasticsearchClient; */ public class RethrottleRequestBuilder extends TasksRequestBuilder { public RethrottleRequestBuilder(ElasticsearchClient client, - Action action) { + ActionType action) { super(client, action, new RethrottleRequest()); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java index bdedc65b7a6..3d28ce3bcbc 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java @@ -23,7 +23,7 @@ import org.apache.lucene.search.TotalHits; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; @@ -761,7 +761,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { @Override @SuppressWarnings("unchecked") protected - void doExecute(Action action, Request request, ActionListener listener) { + void doExecute(ActionType action, Request request, ActionListener listener) { if (false == expectedHeaders.equals(threadPool().getThreadContext().getHeaders())) { listener.onFailure( new RuntimeException("Expected " + expectedHeaders + " but got " + threadPool().getThreadContext().getHeaders())); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWithAuthTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWithAuthTests.java index dc6d5eac588..9c017cb2ded 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWithAuthTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWithAuthTests.java @@ -167,7 +167,7 @@ public class ReindexFromRemoteWithAuthTests extends ESSingleNodeTestCase { } /** - * Action filter that will reject the request if it isn't authenticated. + * ActionType filter that will reject the request if it isn't authenticated. */ public static class TestFilter implements ActionFilter { /** diff --git a/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobContainer.java b/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobContainer.java index 91676037043..f9406e45447 100644 --- a/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobContainer.java +++ b/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobContainer.java @@ -20,6 +20,7 @@ package org.elasticsearch.common.blobstore.url; import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.support.AbstractBlobContainer; @@ -74,6 +75,11 @@ public class URLBlobContainer extends AbstractBlobContainer { throw new UnsupportedOperationException("URL repository doesn't support this operation"); } + @Override + public Map children() throws IOException { + throw new UnsupportedOperationException("URL repository doesn't support this operation"); + } + /** * This operation is not supported by URLBlobContainer */ @@ -90,6 +96,11 @@ public class URLBlobContainer extends AbstractBlobContainer { throw new UnsupportedOperationException("URL repository is read only"); } + @Override + public void delete() { + throw new UnsupportedOperationException("URL repository is read only"); + } + /** * This operation is not supported by URLBlobContainer */ diff --git a/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java b/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java index 29582f1f871..11b660cd2b3 100644 --- a/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java +++ b/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java @@ -116,7 +116,7 @@ public class URLRepository extends BlobStoreRepository { } @Override - protected BlobPath basePath() { + public BlobPath basePath() { return basePath; } diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuTokenizerFactory.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuTokenizerFactory.java index 3f8b9296aa0..7168af45ee7 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuTokenizerFactory.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuTokenizerFactory.java @@ -47,7 +47,7 @@ public class IcuTokenizerFactory extends AbstractTokenizerFactory { private static final String RULE_FILES = "rule_files"; public IcuTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - super(indexSettings, settings); + super(indexSettings, settings, name); config = getIcuConfig(environment, settings); } diff --git a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiTokenizerFactory.java b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiTokenizerFactory.java index e9268f73065..22000cf7979 100644 --- a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiTokenizerFactory.java +++ b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiTokenizerFactory.java @@ -45,7 +45,7 @@ public class KuromojiTokenizerFactory extends AbstractTokenizerFactory { private boolean discartPunctuation; public KuromojiTokenizerFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { - super(indexSettings, settings); + super(indexSettings, settings, name); mode = getMode(settings); userDictionary = getUserDictionary(env, settings); discartPunctuation = settings.getAsBoolean("discard_punctuation", true); diff --git a/plugins/analysis-nori/src/main/java/org/elasticsearch/index/analysis/NoriTokenizerFactory.java b/plugins/analysis-nori/src/main/java/org/elasticsearch/index/analysis/NoriTokenizerFactory.java index aa96da807c8..8830cf7c977 100644 --- a/plugins/analysis-nori/src/main/java/org/elasticsearch/index/analysis/NoriTokenizerFactory.java +++ b/plugins/analysis-nori/src/main/java/org/elasticsearch/index/analysis/NoriTokenizerFactory.java @@ -41,7 +41,7 @@ public class NoriTokenizerFactory extends AbstractTokenizerFactory { private final KoreanTokenizer.DecompoundMode decompoundMode; public NoriTokenizerFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { - super(indexSettings, settings); + super(indexSettings, settings, name); decompoundMode = getMode(settings); userDictionary = getUserDictionary(env, settings); } diff --git a/plugins/analysis-smartcn/src/main/java/org/elasticsearch/index/analysis/SmartChineseTokenizerTokenizerFactory.java b/plugins/analysis-smartcn/src/main/java/org/elasticsearch/index/analysis/SmartChineseTokenizerTokenizerFactory.java index 560bce9db27..47a4fe84f00 100644 --- a/plugins/analysis-smartcn/src/main/java/org/elasticsearch/index/analysis/SmartChineseTokenizerTokenizerFactory.java +++ b/plugins/analysis-smartcn/src/main/java/org/elasticsearch/index/analysis/SmartChineseTokenizerTokenizerFactory.java @@ -28,7 +28,7 @@ import org.elasticsearch.index.IndexSettings; public class SmartChineseTokenizerTokenizerFactory extends AbstractTokenizerFactory { public SmartChineseTokenizerTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - super(indexSettings, settings); + super(indexSettings, settings, name); } @Override diff --git a/plugins/repository-azure/qa/microsoft-azure-storage/build.gradle b/plugins/repository-azure/qa/microsoft-azure-storage/build.gradle index decef58aff2..0c2f68d3483 100644 --- a/plugins/repository-azure/qa/microsoft-azure-storage/build.gradle +++ b/plugins/repository-azure/qa/microsoft-azure-storage/build.gradle @@ -80,6 +80,8 @@ testClusters.integTest { // in a hacky way to change the protocol and endpoint. We must fix that. setting 'azure.client.integration_test.endpoint_suffix', { "ignored;DefaultEndpointsProtocol=http;BlobEndpoint=http://${azureStorageFixture.addressAndPort }" } + String firstPartOfSeed = project.rootProject.testSeed.tokenize(':').get(0) + setting 'thread_pool.repository_azure.max', (Math.abs(Long.parseUnsignedLong(firstPartOfSeed, 16) % 10) + 1).toString() } else { println "Using an external service to test the repository-azure plugin" } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java index 002907a0a7e..3dfd5903721 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java @@ -23,29 +23,38 @@ import com.microsoft.azure.storage.LocationMode; import com.microsoft.azure.storage.StorageException; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.action.support.GroupedActionListener; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.support.AbstractBlobContainer; +import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; import java.io.InputStream; import java.net.HttpURLConnection; import java.net.URISyntaxException; import java.nio.file.NoSuchFileException; +import java.util.List; import java.util.Map; +import java.util.concurrent.ExecutorService; public class AzureBlobContainer extends AbstractBlobContainer { private final Logger logger = LogManager.getLogger(AzureBlobContainer.class); private final AzureBlobStore blobStore; - + private final ThreadPool threadPool; private final String keyPath; - public AzureBlobContainer(BlobPath path, AzureBlobStore blobStore) { + AzureBlobContainer(BlobPath path, AzureBlobStore blobStore, ThreadPool threadPool) { super(path); this.blobStore = blobStore; this.keyPath = path.buildAsString(); + this.threadPool = threadPool; } @Override @@ -117,6 +126,43 @@ public class AzureBlobContainer extends AbstractBlobContainer { } } + @Override + public void delete() throws IOException { + try { + blobStore.deleteBlobDirectory(keyPath, threadPool.executor(AzureRepositoryPlugin.REPOSITORY_THREAD_POOL_NAME)); + } catch (URISyntaxException | StorageException e) { + throw new IOException(e); + } + } + + @Override + public void deleteBlobsIgnoringIfNotExists(List blobNames) throws IOException { + final PlainActionFuture result = PlainActionFuture.newFuture(); + if (blobNames.isEmpty()) { + result.onResponse(null); + } else { + final GroupedActionListener listener = + new GroupedActionListener<>(ActionListener.map(result, v -> null), blobNames.size()); + final ExecutorService executor = threadPool.executor(AzureRepositoryPlugin.REPOSITORY_THREAD_POOL_NAME); + // Executing deletes in parallel since Azure SDK 8 is using blocking IO while Azure does not provide a bulk delete API endpoint + // TODO: Upgrade to newer non-blocking Azure SDK 11 and execute delete requests in parallel that way. + for (String blobName : blobNames) { + executor.execute(new ActionRunnable(listener) { + @Override + protected void doRun() throws IOException { + deleteBlobIgnoringIfNotExists(blobName); + listener.onResponse(null); + } + }); + } + } + try { + result.actionGet(); + } catch (Exception e) { + throw new IOException("Exception during bulk delete", e); + } + } + @Override public Map listBlobsByPrefix(@Nullable String prefix) throws IOException { logger.trace("listBlobsByPrefix({})", prefix); @@ -135,6 +181,16 @@ public class AzureBlobContainer extends AbstractBlobContainer { return listBlobsByPrefix(null); } + @Override + public Map children() throws IOException { + final BlobPath path = path(); + try { + return blobStore.children(path); + } catch (URISyntaxException | StorageException e) { + throw new IOException("Failed to list children in path [" + path.buildAsString() + "].", e); + } + } + protected String buildKey(String blobName) { return keyPath + (blobName == null ? "" : blobName); } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java index bc3993440a3..a7d9bb93a51 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java @@ -28,27 +28,34 @@ import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.repositories.azure.AzureRepository.Repository; +import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; import java.io.InputStream; import java.net.URISyntaxException; import java.nio.file.FileAlreadyExistsException; +import java.util.Collections; import java.util.Map; +import java.util.concurrent.Executor; +import java.util.function.Function; +import java.util.stream.Collectors; import static java.util.Collections.emptyMap; public class AzureBlobStore implements BlobStore { private final AzureStorageService service; + private final ThreadPool threadPool; private final String clientName; private final String container; private final LocationMode locationMode; - public AzureBlobStore(RepositoryMetaData metadata, AzureStorageService service) { + public AzureBlobStore(RepositoryMetaData metadata, AzureStorageService service, ThreadPool threadPool) { this.container = Repository.CONTAINER_SETTING.get(metadata.settings()); this.clientName = Repository.CLIENT_NAME.get(metadata.settings()); this.service = service; + this.threadPool = threadPool; // locationMode is set per repository, not per client this.locationMode = Repository.LOCATION_MODE_SETTING.get(metadata.settings()); final Map prevSettings = this.service.refreshAndClearCache(emptyMap()); @@ -70,7 +77,7 @@ public class AzureBlobStore implements BlobStore { @Override public BlobContainer blobContainer(BlobPath path) { - return new AzureBlobContainer(path, this); + return new AzureBlobContainer(path, this, threadPool); } @Override @@ -85,6 +92,10 @@ public class AzureBlobStore implements BlobStore { service.deleteBlob(clientName, container, blob); } + public void deleteBlobDirectory(String path, Executor executor) throws URISyntaxException, StorageException, IOException { + service.deleteBlobDirectory(clientName, container, path, executor); + } + public InputStream getInputStream(String blob) throws URISyntaxException, StorageException, IOException { return service.getInputStream(clientName, container, blob); } @@ -94,6 +105,11 @@ public class AzureBlobStore implements BlobStore { return service.listBlobsByPrefix(clientName, container, keyPath, prefix); } + public Map children(BlobPath path) throws URISyntaxException, StorageException { + return Collections.unmodifiableMap(service.children(clientName, container, path).stream().collect( + Collectors.toMap(Function.identity(), name -> new AzureBlobContainer(path.add(name), this, threadPool)))); + } + public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws URISyntaxException, StorageException, FileAlreadyExistsException { service.writeBlob(this.clientName, container, blobName, inputStream, blobSize, failIfAlreadyExists); diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index 3a9c6dd2c3d..87e918ed319 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -75,7 +75,6 @@ public class AzureRepository extends BlobStoreRepository { private final BlobPath basePath; private final ByteSizeValue chunkSize; - private final Environment environment; private final AzureStorageService storageService; private final boolean readonly; @@ -83,7 +82,6 @@ public class AzureRepository extends BlobStoreRepository { AzureStorageService storageService, ThreadPool threadPool) { super(metadata, environment.settings(), Repository.COMPRESS_SETTING.get(metadata.settings()), namedXContentRegistry, threadPool); this.chunkSize = Repository.CHUNK_SIZE_SETTING.get(metadata.settings()); - this.environment = environment; this.storageService = storageService; final String basePath = Strings.trimLeadingCharacter(Repository.BASE_PATH_SETTING.get(metadata.settings()), '/'); @@ -115,7 +113,7 @@ public class AzureRepository extends BlobStoreRepository { @Override protected AzureBlobStore createBlobStore() { - final AzureBlobStore blobStore = new AzureBlobStore(metadata, storageService); + final AzureBlobStore blobStore = new AzureBlobStore(metadata, storageService, threadPool); logger.debug(() -> new ParameterizedMessage( "using container [{}], chunk_size [{}], compress [{}], base_path [{}]", @@ -124,7 +122,7 @@ public class AzureRepository extends BlobStoreRepository { } @Override - protected BlobPath basePath() { + public BlobPath basePath() { return basePath; } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java index 21d041a2d7a..809ba9d5158 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java @@ -22,12 +22,15 @@ package org.elasticsearch.repositories.azure; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.ReloadablePlugin; import org.elasticsearch.plugins.RepositoryPlugin; import org.elasticsearch.repositories.Repository; +import org.elasticsearch.threadpool.ExecutorBuilder; +import org.elasticsearch.threadpool.ScalingExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; import java.util.Arrays; @@ -40,6 +43,8 @@ import java.util.Map; */ public class AzureRepositoryPlugin extends Plugin implements RepositoryPlugin, ReloadablePlugin { + public static final String REPOSITORY_THREAD_POOL_NAME = "repository_azure"; + // protected for testing final AzureStorageService azureStoreService; @@ -70,6 +75,15 @@ public class AzureRepositoryPlugin extends Plugin implements RepositoryPlugin, R ); } + @Override + public List> getExecutorBuilders(Settings settings) { + return Collections.singletonList(executorBuilder()); + } + + public static ExecutorBuilder executorBuilder() { + return new ScalingExecutorBuilder(REPOSITORY_THREAD_POOL_NAME, 0, 32, TimeValue.timeValueSeconds(30L)); + } + @Override public void reload(Settings settings) { // secure settings should be readable diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java index ee00551849d..be98edda83d 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java @@ -29,8 +29,10 @@ import com.microsoft.azure.storage.StorageException; import com.microsoft.azure.storage.blob.BlobInputStream; import com.microsoft.azure.storage.blob.BlobListingDetails; import com.microsoft.azure.storage.blob.BlobProperties; +import com.microsoft.azure.storage.blob.CloudBlob; import com.microsoft.azure.storage.blob.CloudBlobClient; import com.microsoft.azure.storage.blob.CloudBlobContainer; +import com.microsoft.azure.storage.blob.CloudBlobDirectory; import com.microsoft.azure.storage.blob.CloudBlockBlob; import com.microsoft.azure.storage.blob.DeleteSnapshotsOption; import com.microsoft.azure.storage.blob.ListBlobItem; @@ -38,7 +40,9 @@ import com.microsoft.azure.storage.blob.ListBlobItem; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.blobstore.BlobMetaData; +import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.collect.Tuple; @@ -46,6 +50,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; import java.io.IOException; import java.io.InputStream; @@ -54,8 +59,15 @@ import java.net.URI; import java.net.URISyntaxException; import java.nio.file.FileAlreadyExistsException; import java.security.InvalidKeyException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; import java.util.EnumSet; +import java.util.HashSet; import java.util.Map; +import java.util.Set; +import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicLong; import java.util.function.Supplier; import static java.util.Collections.emptyMap; @@ -181,6 +193,50 @@ public class AzureStorageService { }); } + void deleteBlobDirectory(String account, String container, String path, Executor executor) + throws URISyntaxException, StorageException, IOException { + final Tuple> client = client(account); + final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); + final Collection exceptions = Collections.synchronizedList(new ArrayList<>()); + final AtomicLong outstanding = new AtomicLong(1L); + final PlainActionFuture result = PlainActionFuture.newFuture(); + SocketAccess.doPrivilegedVoidException(() -> { + for (final ListBlobItem blobItem : blobContainer.listBlobs(path, true)) { + // uri.getPath is of the form /container/keyPath.* and we want to strip off the /container/ + // this requires 1 + container.length() + 1, with each 1 corresponding to one of the / + final String blobPath = blobItem.getUri().getPath().substring(1 + container.length() + 1); + outstanding.incrementAndGet(); + executor.execute(new AbstractRunnable() { + @Override + protected void doRun() throws Exception { + deleteBlob(account, container, blobPath); + } + + @Override + public void onFailure(Exception e) { + exceptions.add(e); + } + + @Override + public void onAfter() { + if (outstanding.decrementAndGet() == 0) { + result.onResponse(null); + } + } + }); + } + }); + if (outstanding.decrementAndGet() == 0) { + result.onResponse(null); + } + result.actionGet(); + if (exceptions.isEmpty() == false) { + final IOException ex = new IOException("Deleting directory [" + path + "] failed"); + exceptions.forEach(ex::addSuppressed); + throw ex; + } + } + public InputStream getInputStream(String account, String container, String blob) throws URISyntaxException, StorageException, IOException { final Tuple> client = client(account); @@ -209,15 +265,40 @@ public class AzureStorageService { // uri.getPath is of the form /container/keyPath.* and we want to strip off the /container/ // this requires 1 + container.length() + 1, with each 1 corresponding to one of the / final String blobPath = uri.getPath().substring(1 + container.length() + 1); - final BlobProperties properties = ((CloudBlockBlob) blobItem).getProperties(); - final String name = blobPath.substring(keyPath.length()); - logger.trace(() -> new ParameterizedMessage("blob url [{}], name [{}], size [{}]", uri, name, properties.getLength())); - blobsBuilder.put(name, new PlainBlobMetaData(name, properties.getLength())); + if (blobItem instanceof CloudBlob) { + final BlobProperties properties = ((CloudBlob) blobItem).getProperties(); + final String name = blobPath.substring(keyPath.length()); + logger.trace(() -> new ParameterizedMessage("blob url [{}], name [{}], size [{}]", uri, name, properties.getLength())); + blobsBuilder.put(name, new PlainBlobMetaData(name, properties.getLength())); + } } }); return blobsBuilder.immutableMap(); } + public Set children(String account, String container, BlobPath path) throws URISyntaxException, StorageException { + final Set blobsBuilder = new HashSet<>(); + final Tuple> client = client(account); + final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); + final String keyPath = path.buildAsString(); + final EnumSet enumBlobListingDetails = EnumSet.of(BlobListingDetails.METADATA); + + SocketAccess.doPrivilegedVoidException(() -> { + for (ListBlobItem blobItem : blobContainer.listBlobs(keyPath, false, enumBlobListingDetails, null, client.v2().get())) { + if (blobItem instanceof CloudBlobDirectory) { + final URI uri = blobItem.getUri(); + logger.trace(() -> new ParameterizedMessage("blob url [{}]", uri)); + // uri.getPath is of the form /container/keyPath.* and we want to strip off the /container/ + // this requires 1 + container.length() + 1, with each 1 corresponding to one of the /. + // Lastly, we add the length of keyPath to the offset to strip this container's path. + final String uriPath = uri.getPath(); + blobsBuilder.add(uriPath.substring(1 + container.length() + 1 + keyPath.length(), uriPath.length() - 1)); + } + } + }); + return Collections.unmodifiableSet(blobsBuilder); + } + public void writeBlob(String account, String container, String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws URISyntaxException, StorageException, FileAlreadyExistsException { diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreContainerTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreContainerTests.java index 13cc487a1c1..07d0a1e18d3 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreContainerTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreContainerTests.java @@ -23,13 +23,31 @@ import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.ESBlobStoreContainerTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import java.util.concurrent.TimeUnit; public class AzureBlobStoreContainerTests extends ESBlobStoreContainerTestCase { + + private ThreadPool threadPool; + + @Override + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool("AzureBlobStoreTests", AzureRepositoryPlugin.executorBuilder()); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + ThreadPool.terminate(threadPool, 10L, TimeUnit.SECONDS); + } + @Override protected BlobStore newBlobStore() { RepositoryMetaData repositoryMetaData = new RepositoryMetaData("azure", "ittest", Settings.EMPTY); AzureStorageServiceMock client = new AzureStorageServiceMock(); - return new AzureBlobStore(repositoryMetaData, client); + return new AzureBlobStore(repositoryMetaData, client, threadPool); } } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreTests.java index 67d30fda05b..74bfcb784ae 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreTests.java @@ -22,13 +22,31 @@ import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.ESBlobStoreTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; + +import java.util.concurrent.TimeUnit; public class AzureBlobStoreTests extends ESBlobStoreTestCase { + private ThreadPool threadPool; + + @Override + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool("AzureBlobStoreTests", AzureRepositoryPlugin.executorBuilder()); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + ThreadPool.terminate(threadPool, 10L, TimeUnit.SECONDS); + } + @Override protected BlobStore newBlobStore() { RepositoryMetaData repositoryMetaData = new RepositoryMetaData("azure", "ittest", Settings.EMPTY); AzureStorageServiceMock client = new AzureStorageServiceMock(); - return new AzureBlobStore(repositoryMetaData, client); + return new AzureBlobStore(repositoryMetaData, client, threadPool); } } diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java index fb81a5c9003..75d4ad92fbf 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java @@ -19,6 +19,7 @@ package org.elasticsearch.repositories.gcs; +import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStoreException; @@ -55,6 +56,11 @@ class GoogleCloudStorageBlobContainer extends AbstractBlobContainer { return blobStore.listBlobs(path); } + @Override + public Map children() throws IOException { + return blobStore.listChildren(path()); + } + @Override public Map listBlobsByPrefix(String prefix) throws IOException { return blobStore.listBlobsByPrefix(path, prefix); @@ -80,6 +86,11 @@ class GoogleCloudStorageBlobContainer extends AbstractBlobContainer { blobStore.deleteBlob(buildKey(blobName)); } + @Override + public void delete() throws IOException { + blobStore.deleteDirectory(path().buildAsString()); + } + @Override public void deleteBlobsIgnoringIfNotExists(List blobNames) throws IOException { blobStore.deleteBlobsIgnoringIfNotExists(blobNames.stream().map(this::buildKey).collect(Collectors.toList())); diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java index c90d49bd73d..743b6ba30eb 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java @@ -19,6 +19,7 @@ package org.elasticsearch.repositories.gcs; +import com.google.api.gax.paging.Page; import com.google.cloud.BatchResult; import com.google.cloud.ReadChannel; import com.google.cloud.WriteChannel; @@ -30,7 +31,6 @@ import com.google.cloud.storage.Storage; import com.google.cloud.storage.Storage.BlobListOption; import com.google.cloud.storage.StorageBatch; import com.google.cloud.storage.StorageException; - import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; @@ -50,11 +50,11 @@ import java.nio.channels.ReadableByteChannel; import java.nio.channels.WritableByteChannel; import java.nio.file.FileAlreadyExistsException; import java.nio.file.NoSuchFileException; -import java.util.Map; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; @@ -132,13 +132,34 @@ class GoogleCloudStorageBlobStore implements BlobStore { Map listBlobsByPrefix(String path, String prefix) throws IOException { final String pathPrefix = buildKey(path, prefix); final MapBuilder mapBuilder = MapBuilder.newMapBuilder(); - SocketAccess.doPrivilegedVoidIOException(() -> { - client().get(bucketName).list(BlobListOption.prefix(pathPrefix)).iterateAll().forEach(blob -> { - assert blob.getName().startsWith(path); - final String suffixName = blob.getName().substring(path.length()); - mapBuilder.put(suffixName, new PlainBlobMetaData(suffixName, blob.getSize())); - }); - }); + SocketAccess.doPrivilegedVoidIOException( + () -> client().get(bucketName).list(BlobListOption.currentDirectory(), BlobListOption.prefix(pathPrefix)).iterateAll().forEach( + blob -> { + assert blob.getName().startsWith(path); + if (blob.isDirectory() == false) { + final String suffixName = blob.getName().substring(path.length()); + mapBuilder.put(suffixName, new PlainBlobMetaData(suffixName, blob.getSize())); + } + })); + return mapBuilder.immutableMap(); + } + + Map listChildren(BlobPath path) throws IOException { + final String pathStr = path.buildAsString(); + final MapBuilder mapBuilder = MapBuilder.newMapBuilder(); + SocketAccess.doPrivilegedVoidIOException + (() -> client().get(bucketName).list(BlobListOption.currentDirectory(), BlobListOption.prefix(pathStr)).iterateAll().forEach( + blob -> { + if (blob.isDirectory()) { + assert blob.getName().startsWith(pathStr); + assert blob.getName().endsWith("/"); + // Strip path prefix and trailing slash + final String suffixName = blob.getName().substring(pathStr.length(), blob.getName().length() - 1); + if (suffixName.isEmpty() == false) { + mapBuilder.put(suffixName, new GoogleCloudStorageBlobContainer(path.add(suffixName), this)); + } + } + })); return mapBuilder.immutableMap(); } @@ -286,6 +307,23 @@ class GoogleCloudStorageBlobStore implements BlobStore { } } + /** + * Deletes the given path and all its children. + * + * @param pathStr Name of path to delete + */ + void deleteDirectory(String pathStr) throws IOException { + SocketAccess.doPrivilegedVoidIOException(() -> { + Page page = client().get(bucketName).list(BlobListOption.prefix(pathStr)); + do { + final Collection blobsToDelete = new ArrayList<>(); + page.getValues().forEach(b -> blobsToDelete.add(b.getName())); + deleteBlobsIgnoringIfNotExists(blobsToDelete); + page = page.getNextPage(); + } while (page != null); + }); + } + /** * Deletes multiple blobs from the specific bucket using a batch request * @@ -295,11 +333,6 @@ class GoogleCloudStorageBlobStore implements BlobStore { if (blobNames.isEmpty()) { return; } - // for a single op submit a simple delete instead of a batch of size 1 - if (blobNames.size() == 1) { - deleteBlob(blobNames.iterator().next()); - return; - } final List blobIdsToDelete = blobNames.stream().map(blob -> BlobId.of(bucketName, blob)).collect(Collectors.toList()); final List failedBlobs = Collections.synchronizedList(new ArrayList<>()); final StorageException e = SocketAccess.doPrivilegedIOException(() -> { diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java index 8e39cb4b5f1..93e85081f6a 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java @@ -94,7 +94,7 @@ class GoogleCloudStorageRepository extends BlobStoreRepository { } @Override - protected BlobPath basePath() { + public BlobPath basePath() { return basePath; } diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java index e9b45a9b52e..b050645f995 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java @@ -25,6 +25,7 @@ import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.Options.CreateOpts; import org.apache.hadoop.fs.Path; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.fs.FsBlobContainer; @@ -77,6 +78,11 @@ final class HdfsBlobContainer extends AbstractBlobContainer { } } + @Override + public void delete() throws IOException { + store.execute(fileContext -> fileContext.delete(path, true)); + } + @Override public InputStream readBlob(String blobName) throws IOException { // FSDataInputStream does buffering internally @@ -137,11 +143,13 @@ final class HdfsBlobContainer extends AbstractBlobContainer { @Override public Map listBlobsByPrefix(@Nullable final String prefix) throws IOException { - FileStatus[] files = store.execute(fileContext -> (fileContext.util().listStatus(path, - path -> prefix == null || path.getName().startsWith(prefix)))); - Map map = new LinkedHashMap(); + FileStatus[] files = store.execute(fileContext -> fileContext.util().listStatus(path, + path -> prefix == null || path.getName().startsWith(prefix))); + Map map = new LinkedHashMap<>(); for (FileStatus file : files) { - map.put(file.getPath().getName(), new PlainBlobMetaData(file.getPath().getName(), file.getLen())); + if (file.isFile()) { + map.put(file.getPath().getName(), new PlainBlobMetaData(file.getPath().getName(), file.getLen())); + } } return Collections.unmodifiableMap(map); } @@ -151,6 +159,19 @@ final class HdfsBlobContainer extends AbstractBlobContainer { return listBlobsByPrefix(null); } + @Override + public Map children() throws IOException { + FileStatus[] files = store.execute(fileContext -> fileContext.util().listStatus(path)); + Map map = new LinkedHashMap<>(); + for (FileStatus file : files) { + if (file.isDirectory()) { + final String name = file.getPath().getName(); + map.put(name, new HdfsBlobContainer(path().add(name), store, new Path(path, name), bufferSize, securityContext)); + } + } + return Collections.unmodifiableMap(map); + } + /** * Exists to wrap underlying InputStream methods that might make socket connections in * doPrivileged blocks. This is due to the way that hdfs client libraries might open diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java index d51a48cac0e..e5da422f2fd 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java @@ -234,7 +234,7 @@ public final class HdfsRepository extends BlobStoreRepository { } @Override - protected BlobPath basePath() { + public BlobPath basePath() { return basePath; } diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsRepositoryTests.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsRepositoryTests.java new file mode 100644 index 00000000000..e34f290a8e2 --- /dev/null +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsRepositoryTests.java @@ -0,0 +1,61 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.hdfs; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.bootstrap.JavaVersion; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.SecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.AbstractThirdPartyRepositoryTestCase; + +import java.util.Collection; + +import static org.hamcrest.Matchers.equalTo; + +@ThreadLeakFilters(filters = HdfsClientThreadLeakFilter.class) +public class HdfsRepositoryTests extends AbstractThirdPartyRepositoryTestCase { + + @Override + protected Collection> getPlugins() { + return pluginList(HdfsPlugin.class); + } + + @Override + protected SecureSettings credentials() { + return new MockSecureSettings(); + } + + @Override + protected void createRepository(String repoName) { + assumeFalse("https://github.com/elastic/elasticsearch/issues/31498", JavaVersion.current().equals(JavaVersion.parse("11"))); + AcknowledgedResponse putRepositoryResponse = client().admin().cluster().preparePutRepository(repoName) + .setType("hdfs") + .setSettings(Settings.builder() + .put("uri", "hdfs:///") + .put("conf.fs.AbstractFileSystem.hdfs.impl", TestingFs.class.getName()) + .put("path", "foo") + .put("chunk_size", randomIntBetween(100, 1000) + "k") + .put("compress", randomBoolean()) + ).get(); + assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + } +} diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index e32eefa5055..1126381560c 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -163,7 +163,7 @@ if (useFixture) { def minioAddress = { int minioPort = postProcessFixture.ext."test.fixtures.minio-fixture.tcp.9000" assert minioPort > 0 - return 'http://127.0.0.1:' + minioPort + 'http://127.0.0.1:' + minioPort } File minioAddressFile = new File(project.buildDir, 'generated-resources/s3Fixture.address') diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java index c057d330da5..47f7ee26e83 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java @@ -25,6 +25,7 @@ import com.amazonaws.services.s3.model.AmazonS3Exception; import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest; import com.amazonaws.services.s3.model.DeleteObjectsRequest; import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; +import com.amazonaws.services.s3.model.ListObjectsRequest; import com.amazonaws.services.s3.model.MultiObjectDeleteException; import com.amazonaws.services.s3.model.ObjectListing; import com.amazonaws.services.s3.model.ObjectMetadata; @@ -38,6 +39,7 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStoreException; @@ -50,6 +52,9 @@ import java.io.IOException; import java.io.InputStream; import java.nio.file.NoSuchFileException; import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -126,12 +131,53 @@ class S3BlobContainer extends AbstractBlobContainer { deleteBlobIgnoringIfNotExists(blobName); } + @Override + public void delete() throws IOException { + try (AmazonS3Reference clientReference = blobStore.clientReference()) { + ObjectListing prevListing = null; + while (true) { + ObjectListing list; + if (prevListing != null) { + final ObjectListing finalPrevListing = prevListing; + list = SocketAccess.doPrivileged(() -> clientReference.client().listNextBatchOfObjects(finalPrevListing)); + } else { + final ListObjectsRequest listObjectsRequest = new ListObjectsRequest(); + listObjectsRequest.setBucketName(blobStore.bucket()); + listObjectsRequest.setPrefix(keyPath); + list = SocketAccess.doPrivileged(() -> clientReference.client().listObjects(listObjectsRequest)); + } + final List blobsToDelete = + list.getObjectSummaries().stream().map(S3ObjectSummary::getKey).collect(Collectors.toList()); + if (list.isTruncated()) { + doDeleteBlobs(blobsToDelete, false); + prevListing = list; + } else { + final List lastBlobsToDelete = new ArrayList<>(blobsToDelete); + lastBlobsToDelete.add(keyPath); + doDeleteBlobs(lastBlobsToDelete, false); + break; + } + } + } catch (final AmazonClientException e) { + throw new IOException("Exception when deleting blob container [" + keyPath + "]", e); + } + } + @Override public void deleteBlobsIgnoringIfNotExists(List blobNames) throws IOException { + doDeleteBlobs(blobNames, true); + } + + private void doDeleteBlobs(List blobNames, boolean relative) throws IOException { if (blobNames.isEmpty()) { return; } - final Set outstanding = blobNames.stream().map(this::buildKey).collect(Collectors.toSet()); + final Set outstanding; + if (relative) { + outstanding = blobNames.stream().map(this::buildKey).collect(Collectors.toSet()); + } else { + outstanding = new HashSet<>(blobNames); + } try (AmazonS3Reference clientReference = blobStore.clientReference()) { // S3 API only allows 1k blobs per delete so we split up the given blobs into requests of max. 1k deletes final List deleteRequests = new ArrayList<>(); @@ -202,12 +248,15 @@ class S3BlobContainer extends AbstractBlobContainer { final ObjectListing finalPrevListing = prevListing; list = SocketAccess.doPrivileged(() -> clientReference.client().listNextBatchOfObjects(finalPrevListing)); } else { + final ListObjectsRequest listObjectsRequest = new ListObjectsRequest(); + listObjectsRequest.setBucketName(blobStore.bucket()); + listObjectsRequest.setDelimiter("/"); if (blobNamePrefix != null) { - list = SocketAccess.doPrivileged(() -> clientReference.client().listObjects(blobStore.bucket(), - buildKey(blobNamePrefix))); + listObjectsRequest.setPrefix(buildKey(blobNamePrefix)); } else { - list = SocketAccess.doPrivileged(() -> clientReference.client().listObjects(blobStore.bucket(), keyPath)); + listObjectsRequest.setPrefix(keyPath); } + list = SocketAccess.doPrivileged(() -> clientReference.client().listObjects(listObjectsRequest)); } for (final S3ObjectSummary summary : list.getObjectSummaries()) { final String name = summary.getKey().substring(keyPath.length()); @@ -230,6 +279,52 @@ class S3BlobContainer extends AbstractBlobContainer { return listBlobsByPrefix(null); } + @Override + public Map children() throws IOException { + try (AmazonS3Reference clientReference = blobStore.clientReference()) { + ObjectListing prevListing = null; + final Map entries = new HashMap<>(); + while (true) { + ObjectListing list; + if (prevListing != null) { + final ObjectListing finalPrevListing = prevListing; + list = SocketAccess.doPrivileged(() -> clientReference.client().listNextBatchOfObjects(finalPrevListing)); + } else { + final ListObjectsRequest listObjectsRequest = new ListObjectsRequest(); + listObjectsRequest.setBucketName(blobStore.bucket()); + listObjectsRequest.setPrefix(keyPath); + listObjectsRequest.setDelimiter("/"); + list = SocketAccess.doPrivileged(() -> clientReference.client().listObjects(listObjectsRequest)); + } + for (final String summary : list.getCommonPrefixes()) { + final String name = summary.substring(keyPath.length()); + if (name.isEmpty() == false) { + // Stripping the trailing slash off of the common prefix + final String last = name.substring(0, name.length() - 1); + final BlobPath path = path().add(last); + entries.put(last, blobStore.blobContainer(path)); + } + } + assert list.getObjectSummaries().stream().noneMatch(s -> { + for (String commonPrefix : list.getCommonPrefixes()) { + if (s.getKey().substring(keyPath.length()).startsWith(commonPrefix)) { + return true; + } + } + return false; + }) : "Response contained children for listed common prefixes."; + if (list.isTruncated()) { + prevListing = list; + } else { + break; + } + } + return Collections.unmodifiableMap(entries); + } catch (final AmazonClientException e) { + throw new IOException("Exception when listing children of [" + path().buildAsString() + ']', e); + } + } + private String buildKey(String blobName) { return keyPath + blobName; } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index e8d8c6d27ad..bdd2d7261fb 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -226,12 +226,6 @@ class S3Repository extends BlobStoreRepository { return new S3BlobStore(service, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass, repositoryMetaData); } - // only use for testing - @Override - protected BlobStore blobStore() { - return super.blobStore(); - } - // only use for testing @Override protected BlobStore getBlobStore() { @@ -239,7 +233,7 @@ class S3Repository extends BlobStoreRepository { } @Override - protected BlobPath basePath() { + public BlobPath basePath() { return basePath; } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java index 88e29357548..bdaace00f80 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java @@ -19,6 +19,8 @@ package org.elasticsearch.repositories.s3; import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.blobstore.BlobMetaData; +import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.SecureSettings; import org.elasticsearch.common.settings.Settings; @@ -28,6 +30,8 @@ import org.elasticsearch.test.StreamsUtils; import java.io.IOException; import java.util.Collection; +import java.util.Map; +import java.util.concurrent.TimeUnit; import static org.hamcrest.Matchers.blankOrNullString; import static org.hamcrest.Matchers.equalTo; @@ -56,7 +60,7 @@ public class S3RepositoryThirdPartyTests extends AbstractThirdPartyRepositoryTes protected void createRepository(String repoName) { Settings.Builder settings = Settings.builder() .put("bucket", System.getProperty("test.s3.bucket")) - .put("base_path", System.getProperty("test.s3.base", "/")); + .put("base_path", System.getProperty("test.s3.base", "testpath")); final String endpointPath = System.getProperty("test.s3.endpoint"); if (endpointPath != null) { try { @@ -70,4 +74,25 @@ public class S3RepositoryThirdPartyTests extends AbstractThirdPartyRepositoryTes .setSettings(settings).get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); } + + @Override + protected void assertBlobsByPrefix(BlobPath path, String prefix, Map blobs) throws Exception { + // AWS S3 is eventually consistent so we retry for 10 minutes assuming a list operation will never take longer than that + // to become consistent. + assertBusy(() -> super.assertBlobsByPrefix(path, prefix, blobs), 10L, TimeUnit.MINUTES); + } + + @Override + protected void assertChildren(BlobPath path, Collection children) throws Exception { + // AWS S3 is eventually consistent so we retry for 10 minutes assuming a list operation will never take longer than that + // to become consistent. + assertBusy(() -> super.assertChildren(path, children), 10L, TimeUnit.MINUTES); + } + + @Override + protected void assertDeleted(BlobPath path, String name) throws Exception { + // AWS S3 is eventually consistent so we retry for 10 minutes assuming a list operation will never take longer than that + // to become consistent. + assertBusy(() -> super.assertDeleted(path, name), 10L, TimeUnit.MINUTES); + } } diff --git a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java index 20ceb90ca03..b6f39d42bb0 100644 --- a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java +++ b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java @@ -34,7 +34,6 @@ import java.util.Iterator; import java.util.List; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; public class DieWithDignityIT extends ESRestTestCase { @@ -49,7 +48,6 @@ public class DieWithDignityIT extends ESRestTestCase { assertBusy(() -> { final String jpsPath = PathUtils.get(System.getProperty("runtime.java.home"), "bin/jps").toString(); final Process process = new ProcessBuilder().command(jpsPath, "-v").start(); - assertThat(process.waitFor(), equalTo(0)); try (InputStream is = process.getInputStream(); BufferedReader in = new BufferedReader(new InputStreamReader(is, "UTF-8"))) { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml index 586a04f065c..92bbe3f5a36 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml @@ -79,3 +79,47 @@ setup: indices.stats: metric: [ translog ] - gte: { indices.test.primaries.translog.earliest_last_modified_age: 0 } + +--- +"Translog stats on closed indices": + - skip: + version: " - 7.2.99" + reason: "closed indices have translog stats starting version 7.3.0" + + - do: + index: + index: test + id: 1 + body: { "foo": "bar" } + + - do: + index: + index: test + id: 2 + body: { "foo": "bar" } + + - do: + index: + index: test + id: 3 + body: { "foo": "bar" } + + - do: + indices.stats: + metric: [ translog ] + - match: { indices.test.primaries.translog.operations: 3 } + - match: { indices.test.primaries.translog.uncommitted_operations: 3 } + + - do: + indices.close: + index: test + wait_for_active_shards: 1 + - is_true: acknowledged + + - do: + indices.stats: + metric: [ translog ] + expand_wildcards: all + forbid_closed_indices: false + - match: { indices.test.primaries.translog.operations: 3 } + - match: { indices.test.primaries.translog.uncommitted_operations: 0 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/280_rare_terms.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/280_rare_terms.yml new file mode 100644 index 00000000000..a82caddd9cf --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/280_rare_terms.yml @@ -0,0 +1,316 @@ +setup: + - skip: + version: " - 7.2.99" + reason: RareTerms added in 7.3.0 + - do: + indices.create: + index: test_1 + body: + settings: + number_of_replicas: 0 + mappings: + properties: + str: + type: keyword + ip: + type: ip + boolean: + type: boolean + integer: + type: long + number: + type: long + date: + type: date + + + - do: + cluster.health: + wait_for_status: green + +--- +"Basic test": + - do: + index: + index: test_1 + id: 1 + body: { "str" : "abc" } + + - do: + index: + index: test_1 + id: 2 + body: { "str": "abc" } + + - do: + index: + index: test_1 + id: 3 + body: { "str": "bcd" } + + - do: + indices.refresh: {} + + - do: + search: + body: { "size" : 0, "aggs" : { "str_terms" : { "rare_terms" : { "field" : "str", "max_doc_count" : 1 } } } } + + - match: { hits.total.value: 3 } + - length: { aggregations.str_terms.buckets: 1 } + - match: { aggregations.str_terms.buckets.0.key: "bcd" } + - is_false: aggregations.str_terms.buckets.0.key_as_string + - match: { aggregations.str_terms.buckets.0.doc_count: 1 } + +--- +"IP test": + - do: + index: + index: test_1 + id: 1 + body: { "ip": "::1" } + + - do: + index: + index: test_1 + id: 2 + body: { "ip": "127.0.0.1" } + + - do: + index: + index: test_1 + id: 3 + body: { "ip": "::1" } + + - do: + indices.refresh: {} + + - do: + search: + body: { "size" : 0, "aggs" : { "ip_terms" : { "rare_terms" : { "field" : "ip" } } } } + + - match: { hits.total.value: 3 } + - length: { aggregations.ip_terms.buckets: 1 } + - match: { aggregations.ip_terms.buckets.0.key: "127.0.0.1" } + - is_false: aggregations.ip_terms.buckets.0.key_as_string + - match: { aggregations.ip_terms.buckets.0.doc_count: 1 } + + - do: + search: + body: { "size" : 0, "aggs" : { "ip_terms" : { "rare_terms" : { "field" : "ip", "include" : [ "127.0.0.1" ] } } } } + + - match: { hits.total.value: 3 } + - length: { aggregations.ip_terms.buckets: 1 } + - match: { aggregations.ip_terms.buckets.0.key: "127.0.0.1" } + - is_false: aggregations.ip_terms.buckets.0.key_as_string + - match: { aggregations.ip_terms.buckets.0.doc_count: 1 } + + - do: + search: + body: { "size" : 0, "aggs" : { "ip_terms" : { "rare_terms" : { "field" : "ip", "exclude" : [ "127.0.0.1" ] } } } } + + - match: { hits.total.value: 3 } + - length: { aggregations.ip_terms.buckets: 0 } + + - do: + catch: request + search: + index: test_1 + body: { "size" : 0, "aggs" : { "ip_terms" : { "rare_terms" : { "field" : "ip", "exclude" : "127.*" } } } } + + + +--- +"Boolean test": + - do: + index: + index: test_1 + id: 1 + body: { "boolean": true } + + - do: + index: + index: test_1 + id: 2 + body: { "boolean": false } + + - do: + index: + index: test_1 + id: 3 + body: { "boolean": true } + + - do: + indices.refresh: {} + + - do: + search: + body: { "size" : 0, "aggs" : { "boolean_terms" : { "rare_terms" : { "field" : "boolean" } } } } + + - match: { hits.total.value: 3 } + - length: { aggregations.boolean_terms.buckets: 1 } + - match: { aggregations.boolean_terms.buckets.0.key: 0 } + - match: { aggregations.boolean_terms.buckets.0.key_as_string: "false" } + - match: { aggregations.boolean_terms.buckets.0.doc_count: 1 } + +--- +"Integer test": + - do: + index: + index: test_1 + id: 1 + body: { "integer": 1234 } + + - do: + index: + index: test_1 + id: 2 + body: { "integer": 5678 } + + - do: + index: + index: test_1 + id: 3 + body: { "integer": 1234 } + + - do: + indices.refresh: {} + + - do: + search: + body: { "size" : 0, "aggs" : { "integer_terms" : { "rare_terms" : { "field" : "integer" } } } } + + - match: { hits.total.value: 3 } + + - length: { aggregations.integer_terms.buckets: 1 } + + - match: { aggregations.integer_terms.buckets.0.key: 5678 } + - is_false: aggregations.integer_terms.buckets.0.key_as_string + - match: { aggregations.integer_terms.buckets.0.doc_count: 1 } + +--- +"Date test": + - do: + index: + index: test_1 + id: 1 + body: { "date": "2016-05-03" } + + - do: + index: + index: test_1 + id: 2 + body: { "date": "2014-09-01" } + + - do: + index: + index: test_1 + id: 3 + body: { "date": "2016-05-03" } + + - do: + indices.refresh: {} + + - do: + search: + body: { "size" : 0, "aggs" : { "date_terms" : { "rare_terms" : { "field" : "date" } } } } + + - match: { hits.total.value: 3 } + + - length: { aggregations.date_terms.buckets: 1 } + - match: { aggregations.date_terms.buckets.0.key: 1409529600000 } + - match: { aggregations.date_terms.buckets.0.key_as_string: "2014-09-01T00:00:00.000Z" } + - match: { aggregations.date_terms.buckets.0.doc_count: 1 } + + - do: + search: + body: { "size" : 0, "aggs" : { "date_terms" : { "rare_terms" : { "field" : "date", "include" : [ "2014-09-01" ] } } } } + + - match: { hits.total.value: 3 } + - length: { aggregations.date_terms.buckets: 1 } + - match: { aggregations.date_terms.buckets.0.key_as_string: "2014-09-01T00:00:00.000Z" } + - match: { aggregations.date_terms.buckets.0.doc_count: 1 } + + - do: + search: + body: { "size" : 0, "aggs" : { "date_terms" : { "rare_terms" : { "field" : "date", "exclude" : [ "2014-09-01" ] } } } } + + - match: { hits.total.value: 3 } + - length: { aggregations.date_terms.buckets: 0 } + +--- +"Unmapped strings": + + - do: + index: + index: test_1 + id: 1 + body: {} + + - do: + indices.refresh: {} + + - do: + search: + body: { "size" : 0, "aggs" : { "string_terms" : { "rare_terms" : { "field" : "unmapped_string"} } } } + + - match: { hits.total.value: 1 } + - length: { aggregations.string_terms.buckets: 0 } + +--- +"Unmapped booleans": + + - do: + index: + index: test_1 + id: 1 + body: {} + + - do: + indices.refresh: {} + + - do: + search: + body: { "size" : 0, "aggs" : { "boolean_terms" : { "rare_terms" : { "field" : "unmapped_boolean" } } } } + + - match: { hits.total.value: 1 } + - length: { aggregations.boolean_terms.buckets: 0 } + +--- +"Unmapped dates": + + - do: + index: + index: test_1 + id: 1 + body: {} + + - do: + indices.refresh: {} + + - do: + search: + body: { "size" : 0, "aggs" : { "date_terms" : { "rare_terms" : { "field" : "unmapped_date"} } } } + + - match: { hits.total.value: 1 } + - length: { aggregations.date_terms.buckets: 0 } + +--- +"Unmapped longs": + + - do: + index: + index: test_1 + id: 1 + body: {} + + - do: + indices.refresh: {} + + - do: + search: + body: { "size" : 0, "aggs" : { "long_terms" : { "rare_terms" : { "field" : "unmapped_long", "value_type" : "long" } } } } + + - match: { hits.total.value: 1 } + - length: { aggregations.long_terms.buckets: 0 } + + diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index b806f3c3863..efc9ef77e5f 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -104,7 +104,8 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_7_2_0 = new Version(7020099, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final Version V_7_2_1 = new Version(7020199, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final Version V_7_3_0 = new Version(7030099, org.apache.lucene.util.Version.LUCENE_8_1_0); - public static final Version CURRENT = V_7_3_0; + public static final Version V_7_4_0 = new Version(7040099, org.apache.lucene.util.Version.LUCENE_8_1_0); + public static final Version CURRENT = V_7_4_0; private static final ImmutableOpenIntMap idToVersion; diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 27d441830c5..c787ea6e7e2 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -424,7 +424,7 @@ public class ActionModule extends AbstractModule { } public void register( - Action action, Class> transportAction, + ActionType action, Class> transportAction, Class... supportTransportActions) { register(new ActionHandler<>(action, transportAction, supportTransportActions)); } @@ -703,10 +703,10 @@ public class ActionModule extends AbstractModule { bind(AutoCreateIndex.class).toInstance(autoCreateIndex); bind(TransportLivenessAction.class).asEagerSingleton(); - // register Action -> transportAction Map used by NodeClient + // register ActionType -> transportAction Map used by NodeClient @SuppressWarnings("rawtypes") - MapBinder transportActionsBinder - = MapBinder.newMapBinder(binder(), Action.class, TransportAction.class); + MapBinder transportActionsBinder + = MapBinder.newMapBinder(binder(), ActionType.class, TransportAction.class); for (ActionHandler action : actions.values()) { // bind the action as eager singleton, so the map binder one will reuse it bind(action.getTransportAction()).asEagerSingleton(); diff --git a/server/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java index a3aa8ac2a52..166bec9e065 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java @@ -26,11 +26,11 @@ import java.util.Objects; public abstract class ActionRequestBuilder { - protected final Action action; + protected final ActionType action; protected final Request request; protected final ElasticsearchClient client; - protected ActionRequestBuilder(ElasticsearchClient client, Action action, Request request) { + protected ActionRequestBuilder(ElasticsearchClient client, ActionType action, Request request) { Objects.requireNonNull(action, "action must not be null"); this.action = action; this.request = request; diff --git a/server/src/main/java/org/elasticsearch/action/Action.java b/server/src/main/java/org/elasticsearch/action/ActionType.java similarity index 89% rename from server/src/main/java/org/elasticsearch/action/Action.java rename to server/src/main/java/org/elasticsearch/action/ActionType.java index 0037533797d..02f8f3c6fc2 100644 --- a/server/src/main/java/org/elasticsearch/action/Action.java +++ b/server/src/main/java/org/elasticsearch/action/ActionType.java @@ -26,7 +26,7 @@ import org.elasticsearch.transport.TransportRequestOptions; /** * A generic action. Should strive to make it a singleton. */ -public class Action { +public class ActionType { private final String name; private final Writeable.Reader responseReader; @@ -36,7 +36,7 @@ public class Action { * @deprecated Pass a {@link Writeable.Reader} with {@link } */ @Deprecated - protected Action(String name) { + protected ActionType(String name) { this(name, null); } @@ -44,7 +44,7 @@ public class Action { * @param name The name of the action, must be unique across actions. * @param responseReader A reader for the response type */ - public Action(String name, Writeable.Reader responseReader) { + public ActionType(String name, Writeable.Reader responseReader) { this.name = name; this.responseReader = responseReader; } @@ -72,7 +72,7 @@ public class Action { @Override public boolean equals(Object o) { - return o instanceof Action && name.equals(((Action) o).name()); + return o instanceof ActionType && name.equals(((ActionType) o).name()); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/StreamableResponseAction.java b/server/src/main/java/org/elasticsearch/action/StreamableResponseActionType.java similarity index 85% rename from server/src/main/java/org/elasticsearch/action/StreamableResponseAction.java rename to server/src/main/java/org/elasticsearch/action/StreamableResponseActionType.java index c7eecfc35d7..b8206bb03f8 100644 --- a/server/src/main/java/org/elasticsearch/action/StreamableResponseAction.java +++ b/server/src/main/java/org/elasticsearch/action/StreamableResponseActionType.java @@ -23,12 +23,12 @@ import org.elasticsearch.common.io.stream.Writeable; /** * An action for with the response type implements {@link org.elasticsearch.common.io.stream.Streamable}. - * @deprecated Use {@link Action} directly and provide a {@link Writeable.Reader} + * @deprecated Use {@link ActionType} directly and provide a {@link Writeable.Reader} */ @Deprecated -public abstract class StreamableResponseAction extends Action { +public abstract class StreamableResponseActionType extends ActionType { - protected StreamableResponseAction(String name) { + protected StreamableResponseActionType(String name) { super(name); } diff --git a/server/src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java b/server/src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java index a4c3e17e802..4319a745ba8 100644 --- a/server/src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java +++ b/server/src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java @@ -30,10 +30,10 @@ import org.elasticsearch.transport.TransportService; public class TransportActionNodeProxy { private final TransportService transportService; - private final Action action; + private final ActionType action; private final TransportRequestOptions transportOptions; - public TransportActionNodeProxy(Settings settings, Action action, TransportService transportService) { + public TransportActionNodeProxy(Settings settings, ActionType action, TransportService transportService) { this.action = action; this.transportService = transportService; this.transportOptions = action.transportOptions(settings); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainAction.java index b4b348ae97e..acaaed9eaa9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.cluster.allocation; -import org.elasticsearch.action.StreamableResponseAction; +import org.elasticsearch.action.StreamableResponseActionType; /** - * Action for explaining shard allocation for a shard in the cluster + * ActionType for explaining shard allocation for a shard in the cluster */ -public class ClusterAllocationExplainAction extends StreamableResponseAction { +public class ClusterAllocationExplainAction extends StreamableResponseActionType { public static final ClusterAllocationExplainAction INSTANCE = new ClusterAllocationExplainAction(); public static final String NAME = "cluster:monitor/allocation/explain"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsAction.java index 20f1e3c5044..a2f0c721b5d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsAction.java @@ -18,10 +18,10 @@ */ package org.elasticsearch.action.admin.cluster.configuration; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionType; import org.elasticsearch.common.io.stream.Writeable.Reader; -public class AddVotingConfigExclusionsAction extends Action { +public class AddVotingConfigExclusionsAction extends ActionType { public static final AddVotingConfigExclusionsAction INSTANCE = new AddVotingConfigExclusionsAction(); public static final String NAME = "cluster:admin/voting_config/add_exclusions"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsAction.java index 6cafcb7653f..6091800693f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsAction.java @@ -18,10 +18,10 @@ */ package org.elasticsearch.action.admin.cluster.configuration; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionType; import org.elasticsearch.common.io.stream.Writeable.Reader; -public class ClearVotingConfigExclusionsAction extends Action { +public class ClearVotingConfigExclusionsAction extends ActionType { public static final ClearVotingConfigExclusionsAction INSTANCE = new ClearVotingConfigExclusionsAction(); public static final String NAME = "cluster:admin/voting_config/clear_exclusions"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthAction.java index ceb2a145fb6..e8f5ecfaf5b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.admin.cluster.health; -import org.elasticsearch.action.StreamableResponseAction; +import org.elasticsearch.action.StreamableResponseActionType; -public class ClusterHealthAction extends StreamableResponseAction { +public class ClusterHealthAction extends StreamableResponseActionType { public static final ClusterHealthAction INSTANCE = new ClusterHealthAction(); public static final String NAME = "cluster:monitor/health"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsAction.java index 317fa984163..4833625d295 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.admin.cluster.node.hotthreads; -import org.elasticsearch.action.StreamableResponseAction; +import org.elasticsearch.action.StreamableResponseActionType; -public class NodesHotThreadsAction extends StreamableResponseAction { +public class NodesHotThreadsAction extends StreamableResponseActionType { public static final NodesHotThreadsAction INSTANCE = new NodesHotThreadsAction(); public static final String NAME = "cluster:monitor/nodes/hot_threads"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoAction.java index b860f07c8ff..e94390d8f92 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.admin.cluster.node.info; -import org.elasticsearch.action.StreamableResponseAction; +import org.elasticsearch.action.StreamableResponseActionType; -public class NodesInfoAction extends StreamableResponseAction { +public class NodesInfoAction extends StreamableResponseActionType { public static final NodesInfoAction INSTANCE = new NodesInfoAction(); public static final String NAME = "cluster:monitor/nodes/info"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsAction.java index 0a0c8a74fe9..e22595c1870 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsAction.java @@ -19,10 +19,10 @@ package org.elasticsearch.action.admin.cluster.node.reload; -import org.elasticsearch.action.StreamableResponseAction; +import org.elasticsearch.action.StreamableResponseActionType; public class NodesReloadSecureSettingsAction - extends StreamableResponseAction { + extends StreamableResponseActionType { public static final NodesReloadSecureSettingsAction INSTANCE = new NodesReloadSecureSettingsAction(); public static final String NAME = "cluster:admin/nodes/reload_secure_settings"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsAction.java index 1febe1b4872..dbe7deed74a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.admin.cluster.node.stats; -import org.elasticsearch.action.StreamableResponseAction; +import org.elasticsearch.action.StreamableResponseActionType; -public class NodesStatsAction extends StreamableResponseAction { +public class NodesStatsAction extends StreamableResponseActionType { public static final NodesStatsAction INSTANCE = new NodesStatsAction(); public static final String NAME = "cluster:monitor/nodes/stats"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksAction.java index 39532d18519..a0fa139dc73 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksAction.java @@ -19,13 +19,13 @@ package org.elasticsearch.action.admin.cluster.node.tasks.cancel; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionType; import org.elasticsearch.common.io.stream.Writeable; /** - * Action for cancelling running tasks + * ActionType for cancelling running tasks */ -public class CancelTasksAction extends Action { +public class CancelTasksAction extends ActionType { public static final CancelTasksAction INSTANCE = new CancelTasksAction(); public static final String NAME = "cluster:admin/tasks/cancel"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskAction.java index cdb5bbc3906..978e07555b5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.cluster.node.tasks.get; -import org.elasticsearch.action.StreamableResponseAction; +import org.elasticsearch.action.StreamableResponseActionType; /** - * Action for retrieving a list of currently running tasks + * ActionType for retrieving a list of currently running tasks */ -public class GetTaskAction extends StreamableResponseAction { +public class GetTaskAction extends StreamableResponseActionType { public static final String TASKS_ORIGIN = "tasks"; public static final GetTaskAction INSTANCE = new GetTaskAction(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java index d1d72da5445..2b0ac0233be 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java @@ -56,7 +56,7 @@ import static org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskActio import static org.elasticsearch.action.admin.cluster.node.tasks.list.TransportListTasksAction.waitForCompletionTimeout; /** - * Action to get a single task. If the task isn't running then it'll try to request the status from request index. + * ActionType to get a single task. If the task isn't running then it'll try to request the status from request index. * * The general flow is: *