Merge remote-tracking branch 'es/7.x' into enrich-7.x

This commit is contained in:
Martijn van Groningen 2019-07-04 13:05:10 +02:00
commit 653f1436a0
No known key found for this signature in database
GPG Key ID: AB236F4FCF2AF12A
763 changed files with 17186 additions and 2769 deletions

View File

@ -7,5 +7,4 @@
ES_BUILD_JAVA=openjdk12
ES_RUNTIME_JAVA=java8
GRADLE_TASK=build
GRADLE_EXTRA_ARGS=--no-parallel

View File

@ -229,6 +229,7 @@ public class ElasticsearchCluster implements TestClusterConfiguration {
if (Version.fromString(node.getVersion()).getMajor() >= 7) {
node.defaultConfig.put("cluster.initial_master_nodes", "[" + nodeNames + "]");
node.defaultConfig.put("discovery.seed_providers", "file");
node.defaultConfig.put("discovery.seed_hosts", "[]");
}
}
node.start();
@ -286,14 +287,13 @@ public class ElasticsearchCluster implements TestClusterConfiguration {
}
public void waitForAllConditions() {
long startedAt = System.currentTimeMillis();
LOGGER.info("Waiting for nodes");
nodes.forEach(ElasticsearchNode::waitForAllConditions);
writeUnicastHostsFiles();
LOGGER.info("Starting to wait for cluster to form");
waitForConditions(waitConditions, startedAt, CLUSTER_UP_TIMEOUT, CLUSTER_UP_TIMEOUT_UNIT, this);
waitForConditions(waitConditions, System.currentTimeMillis(), CLUSTER_UP_TIMEOUT, CLUSTER_UP_TIMEOUT_UNIT, this);
}
@Override

View File

@ -37,6 +37,8 @@ import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
import java.nio.file.StandardOpenOption;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
@ -65,8 +67,10 @@ public class ElasticsearchNode implements TestClusterConfiguration {
private static final Logger LOGGER = Logging.getLogger(ElasticsearchNode.class);
private static final int ES_DESTROY_TIMEOUT = 20;
private static final TimeUnit ES_DESTROY_TIMEOUT_UNIT = TimeUnit.SECONDS;
private static final int NODE_UP_TIMEOUT = 60;
private static final TimeUnit NODE_UP_TIMEOUT_UNIT = TimeUnit.SECONDS;
private static final int NODE_UP_TIMEOUT = 2;
private static final TimeUnit NODE_UP_TIMEOUT_UNIT = TimeUnit.MINUTES;
private static final int ADDITIONAL_CONFIG_TIMEOUT = 15;
private static final TimeUnit ADDITIONAL_CONFIG_TIMEOUT_UNIT = TimeUnit.SECONDS;
private static final List<String> OVERRIDABLE_SETTINGS = Arrays.asList(
"path.repo",
"discovery.seed_providers"
@ -310,6 +314,7 @@ public class ElasticsearchNode implements TestClusterConfiguration {
try {
if (isWorkingDirConfigured == false) {
logToProcessStdout("Configuring working directory: " + workingDir);
// Only configure working dir once so we don't loose data on restarts
isWorkingDirConfigured = true;
createWorkingDir(distroArtifact);
@ -319,12 +324,16 @@ public class ElasticsearchNode implements TestClusterConfiguration {
}
createConfiguration();
plugins.forEach(plugin -> runElaticsearchBinScript(
"elasticsearch-plugin",
"install", "--batch", plugin.toString())
);
if(plugins.isEmpty() == false) {
logToProcessStdout("Installing " + plugins.size() + " plugins");
plugins.forEach(plugin -> runElaticsearchBinScript(
"elasticsearch-plugin",
"install", "--batch", plugin.toString())
);
}
if (keystoreSettings.isEmpty() == false || keystoreFiles.isEmpty() == false) {
logToProcessStdout("Adding " + keystoreSettings.size() + " keystore settings and " + keystoreFiles.size() + " keystore files");
runElaticsearchBinScript("elasticsearch-keystore", "create");
checkSuppliers("Keystore", keystoreSettings.values());
@ -347,6 +356,7 @@ public class ElasticsearchNode implements TestClusterConfiguration {
copyExtraConfigFiles();
if (isSettingMissingOrTrue("xpack.security.enabled")) {
logToProcessStdout("Setting up " + credentials.size() + " users");
if (credentials.isEmpty()) {
user(Collections.emptyMap());
}
@ -358,9 +368,25 @@ public class ElasticsearchNode implements TestClusterConfiguration {
));
}
logToProcessStdout("Starting Elasticsearch process");
startElasticsearchProcess();
}
private void logToProcessStdout(String message) {
try {
if (Files.exists(esStdoutFile.getParent()) == false) {
Files.createDirectories(esStdoutFile.getParent());
}
Files.write(
esStdoutFile,
("[" + Instant.now().toString() + "] [BUILD] " + message + "\n").getBytes(StandardCharsets.UTF_8),
StandardOpenOption.CREATE, StandardOpenOption.APPEND
);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
@Override
public void restart() {
LOGGER.info("Restarting {}", this);
@ -380,6 +406,9 @@ public class ElasticsearchNode implements TestClusterConfiguration {
}
private void copyExtraConfigFiles() {
if (extraConfigFiles.isEmpty() == false) {
logToProcessStdout("Setting up " + extraConfigFiles.size() + " additional config files");
}
extraConfigFiles.forEach((destination, from) -> {
if (Files.exists(from.toPath()) == false) {
throw new TestClustersException("Can't create extra config file from " + from + " for " + this +
@ -398,6 +427,7 @@ public class ElasticsearchNode implements TestClusterConfiguration {
private void installModules() {
if (distribution == Distribution.INTEG_TEST) {
logToProcessStdout("Installing " + modules.size() + "modules");
for (File module : modules) {
Path destination = workingDir.resolve("modules").resolve(module.getName().replace(".zip", "").replace("-" + version, ""));
@ -843,7 +873,23 @@ public class ElasticsearchNode implements TestClusterConfiguration {
}
void waitForAllConditions() {
waitForConditions(waitConditions, System.currentTimeMillis(), NODE_UP_TIMEOUT, NODE_UP_TIMEOUT_UNIT, this);
waitForConditions(
waitConditions,
System.currentTimeMillis(),
NODE_UP_TIMEOUT_UNIT.toMillis(NODE_UP_TIMEOUT) +
// Installing plugins at config time and loading them when nods start requires additional time we need to
// account for
ADDITIONAL_CONFIG_TIMEOUT_UNIT.toMillis(ADDITIONAL_CONFIG_TIMEOUT *
(
plugins.size() +
keystoreFiles.size() +
keystoreSettings.size() +
credentials.size()
)
),
TimeUnit.MILLISECONDS,
this
);
}
@Override

View File

@ -0,0 +1,59 @@
package org.elasticsearch.gradle.testclusters;
import org.gradle.api.logging.Logger;
import org.gradle.api.logging.Logging;
import java.util.Collection;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Set;
/**
* Keep an inventory of all running Clusters and stop them when interrupted
*
* This takes advantage of the fact that Gradle interrupts all the threads in the daemon when the build completes.
*/
public class TestClusterCleanupOnShutdown implements Runnable {
private final Logger logger = Logging.getLogger(TestClusterCleanupOnShutdown.class);
private Set<ElasticsearchCluster> clustersToWatch = new HashSet<>();
public void watch(Collection<ElasticsearchCluster> cluster) {
synchronized (clustersToWatch) {
clustersToWatch.addAll(clustersToWatch);
}
}
public void unWatch(Collection<ElasticsearchCluster> cluster) {
synchronized (clustersToWatch) {
clustersToWatch.removeAll(clustersToWatch);
}
}
@Override
public void run() {
try {
while (true) {
Thread.sleep(Long.MAX_VALUE);
}
} catch (InterruptedException interrupted) {
synchronized (clustersToWatch) {
if (clustersToWatch.isEmpty()) {
return;
}
logger.info("Cleanup thread was interrupted, shutting down all clusters");
Iterator<ElasticsearchCluster> iterator = clustersToWatch.iterator();
while (iterator.hasNext()) {
ElasticsearchCluster cluster = iterator.next();
iterator.remove();
try {
cluster.stop(false);
} catch (Exception e) {
logger.warn("Could not shut down {}", cluster, e);
}
}
}
}
}
}

View File

@ -0,0 +1,74 @@
package org.elasticsearch.gradle.testclusters;
import org.gradle.api.Project;
import org.gradle.api.logging.Logger;
import org.gradle.api.logging.Logging;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
/**
* This extensions was meant to be used internally by testclusters
*
* It holds synchronization primitives needed to implement the rate limiting.
* This is tricky because we can't use Gradle workers as there's no way to make sure that tests and their clusters are
* allocated atomically, so we could be in a situation where all workers are tests waiting for clusters to start up.
*
* Also auto configures cleanup of executors to make sure we don't leak threads in the daemon.
*/
public class TestClustersCleanupExtension {
private static final int EXECUTOR_SHUTDOWN_TIMEOUT = 1;
private static final TimeUnit EXECUTOR_SHUTDOWN_TIMEOUT_UNIT = TimeUnit.MINUTES;
private static final Logger logger = Logging.getLogger(TestClustersCleanupExtension.class);
private final ExecutorService executorService;
private final TestClusterCleanupOnShutdown cleanupThread;
public TestClustersCleanupExtension() {
executorService = Executors.newSingleThreadExecutor();
cleanupThread = new TestClusterCleanupOnShutdown();
executorService.submit(cleanupThread);
}
public static void createExtension(Project project) {
if (project.getRootProject().getExtensions().findByType(TestClustersCleanupExtension.class) != null) {
return;
}
// Configure the extension on the root project so we have a single instance per run
TestClustersCleanupExtension ext = project.getRootProject().getExtensions().create(
"__testclusters_rate_limit",
TestClustersCleanupExtension.class
);
Thread shutdownHook = new Thread(ext.cleanupThread::run);
Runtime.getRuntime().addShutdownHook(shutdownHook);
project.getGradle().buildFinished(buildResult -> {
ext.executorService.shutdownNow();
try {
if (ext.executorService.awaitTermination(EXECUTOR_SHUTDOWN_TIMEOUT, EXECUTOR_SHUTDOWN_TIMEOUT_UNIT) == false) {
throw new IllegalStateException(
"Failed to shut down executor service after " +
EXECUTOR_SHUTDOWN_TIMEOUT + " " + EXECUTOR_SHUTDOWN_TIMEOUT_UNIT
);
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
try {
if (false == Runtime.getRuntime().removeShutdownHook(shutdownHook)) {
logger.warn("Trying to deregister shutdown hook when it was not registered.");
}
} catch (IllegalStateException ese) {
// Thrown when shutdown is in progress
logger.warn("Can't remove shutdown hook", ese);
}
});
}
public TestClusterCleanupOnShutdown getCleanupThread() {
return cleanupThread;
}
}

View File

@ -43,13 +43,9 @@ import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
public class TestClustersPlugin implements Plugin<Project> {
@ -58,18 +54,14 @@ public class TestClustersPlugin implements Plugin<Project> {
public static final String EXTENSION_NAME = "testClusters";
private static final String HELPER_CONFIGURATION_PREFIX = "testclusters";
private static final String SYNC_ARTIFACTS_TASK_NAME = "syncTestClustersArtifacts";
private static final int EXECUTOR_SHUTDOWN_TIMEOUT = 1;
private static final TimeUnit EXECUTOR_SHUTDOWN_TIMEOUT_UNIT = TimeUnit.MINUTES;
private static final Logger logger = Logging.getLogger(TestClustersPlugin.class);
private static final String TESTCLUSTERS_INSPECT_FAILURE = "testclusters.inspect.failure";
private final Map<Task, List<ElasticsearchCluster>> usedClusters = new HashMap<>();
private final Map<ElasticsearchCluster, Integer> claimsInventory = new HashMap<>();
private final Set<ElasticsearchCluster> runningClusters =new HashSet<>();
private final Thread shutdownHook = new Thread(this::shutDownAllClusters);
private final Set<ElasticsearchCluster> runningClusters = new HashSet<>();
private final Boolean allowClusterToSurvive = Boolean.valueOf(System.getProperty(TESTCLUSTERS_INSPECT_FAILURE, "false"));
private ExecutorService executorService = Executors.newSingleThreadExecutor();
public static String getHelperConfigurationName(String version) {
return HELPER_CONFIGURATION_PREFIX + "-" + version;
@ -82,6 +74,8 @@ public class TestClustersPlugin implements Plugin<Project> {
// enable the DSL to describe clusters
NamedDomainObjectContainer<ElasticsearchCluster> container = createTestClustersContainerExtension(project);
TestClustersCleanupExtension.createExtension(project);
// provide a task to be able to list defined clusters.
createListClustersTask(project, container);
@ -100,9 +94,6 @@ public class TestClustersPlugin implements Plugin<Project> {
// After each task we determine if there are clusters that are no longer needed.
configureStopClustersHook(project);
// configure hooks to make sure no test cluster processes survive the build
configureCleanupHooks(project);
// Since we have everything modeled in the DSL, add all the required dependencies e.x. the distribution to the
// configuration so the user doesn't have to repeat this.
autoConfigureClusterDependencies(project, rootProject, container);
@ -196,8 +187,19 @@ public class TestClustersPlugin implements Plugin<Project> {
@Override
public void beforeActions(Task task) {
// we only start the cluster before the actions, so we'll not start it if the task is up-to-date
usedClusters.getOrDefault(task, Collections.emptyList()).stream()
List<ElasticsearchCluster> neededButNotRunning = usedClusters.getOrDefault(
task,
Collections.emptyList()
)
.stream()
.filter(cluster -> runningClusters.contains(cluster) == false)
.collect(Collectors.toList());
project.getRootProject().getExtensions()
.getByType(TestClustersCleanupExtension.class)
.getCleanupThread()
.watch(neededButNotRunning);
neededButNotRunning
.forEach(elasticsearchCluster -> {
elasticsearchCluster.start();
runningClusters.add(elasticsearchCluster);
@ -220,22 +222,36 @@ public class TestClustersPlugin implements Plugin<Project> {
task,
Collections.emptyList()
);
if (clustersUsedByTask.isEmpty()) {
return;
}
logger.info("Clusters were used, stopping and releasing permits");
final int permitsToRelease;
if (state.getFailure() != null) {
// If the task fails, and other tasks use this cluster, the other task will likely never be
// executed at all, so we will never get to un-claim and terminate it.
// executed at all, so we will never be called again to un-claim and terminate it.
clustersUsedByTask.forEach(cluster -> stopCluster(cluster, true));
permitsToRelease = clustersUsedByTask.stream()
.map(cluster -> cluster.getNumberOfNodes())
.reduce(Integer::sum).get();
} else {
clustersUsedByTask.forEach(
cluster -> claimsInventory.put(cluster, claimsInventory.getOrDefault(cluster, 0) - 1)
);
claimsInventory.entrySet().stream()
List<ElasticsearchCluster> stoppingClusers = claimsInventory.entrySet().stream()
.filter(entry -> entry.getValue() == 0)
.filter(entry -> runningClusters.contains(entry.getKey()))
.map(Map.Entry::getKey)
.forEach(cluster -> {
stopCluster(cluster, false);
runningClusters.remove(cluster);
});
.collect(Collectors.toList());
stoppingClusers.forEach(cluster -> {
stopCluster(cluster, false);
runningClusters.remove(cluster);
});
project.getRootProject().getExtensions()
.getByType(TestClustersCleanupExtension.class)
.getCleanupThread()
.unWatch(stoppingClusers);
}
}
@Override
@ -406,62 +422,4 @@ public class TestClustersPlugin implements Plugin<Project> {
})));
}
private void configureCleanupHooks(Project project) {
// When the Gradle daemon is used, it will interrupt all threads when the build concludes.
// This is our signal to clean up
executorService.submit(() -> {
while (true) {
try {
Thread.sleep(Long.MAX_VALUE);
} catch (InterruptedException interrupted) {
shutDownAllClusters();
Thread.currentThread().interrupt();
return;
}
}
});
// When the Daemon is not used, or runs into issues, rely on a shutdown hook
// When the daemon is used, but does not work correctly and eventually dies off (e.x. due to non interruptible
// thread in the build) process will be stopped eventually when the daemon dies.
Runtime.getRuntime().addShutdownHook(shutdownHook);
// When we don't run into anything out of the ordinary, and the build completes, makes sure to clean up
project.getGradle().buildFinished(buildResult -> {
shutdownExecutorService();
if (false == Runtime.getRuntime().removeShutdownHook(shutdownHook)) {
logger.info("Trying to deregister shutdown hook when it was not registered.");
}
});
}
private void shutdownExecutorService() {
executorService.shutdownNow();
try {
if (executorService.awaitTermination(EXECUTOR_SHUTDOWN_TIMEOUT, EXECUTOR_SHUTDOWN_TIMEOUT_UNIT) == false) {
throw new IllegalStateException(
"Failed to shut down executor service after " +
EXECUTOR_SHUTDOWN_TIMEOUT + " " + EXECUTOR_SHUTDOWN_TIMEOUT_UNIT
);
}
} catch (InterruptedException e) {
logger.info("Wait for testclusters shutdown interrupted", e);
Thread.currentThread().interrupt();
}
}
private void shutDownAllClusters() {
synchronized (runningClusters) {
if (runningClusters.isEmpty()) {
return;
}
Iterator<ElasticsearchCluster> iterator = runningClusters.iterator();
while (iterator.hasNext()) {
ElasticsearchCluster next = iterator.next();
iterator.remove();
next.stop(false);
}
}
}
}

View File

@ -1 +1 @@
5.4.1
5.5

View File

@ -1,4 +1,4 @@
elasticsearch = 7.3.0
elasticsearch = 7.4.0
lucene = 8.1.0
bundled_jdk = 12.0.1+12@69cfe15208a647278a19ef0990eea691

View File

@ -18,10 +18,10 @@
*/
package org.elasticsearch.plugin.noop.action.bulk;
import org.elasticsearch.action.StreamableResponseAction;
import org.elasticsearch.action.StreamableResponseActionType;
import org.elasticsearch.action.bulk.BulkResponse;
public class NoopBulkAction extends StreamableResponseAction<BulkResponse> {
public class NoopBulkAction extends StreamableResponseActionType<BulkResponse> {
public static final String NAME = "mock:data/write/bulk";
public static final NoopBulkAction INSTANCE = new NoopBulkAction();

View File

@ -18,11 +18,11 @@
*/
package org.elasticsearch.plugin.noop.action.search;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionType;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.common.io.stream.Writeable;
public class NoopSearchAction extends Action<SearchResponse> {
public class NoopSearchAction extends ActionType<SearchResponse> {
public static final NoopSearchAction INSTANCE = new NoopSearchAction();
public static final String NAME = "mock:data/read/search";

View File

@ -61,6 +61,8 @@ import org.elasticsearch.client.indices.GetMappingsResponse;
import org.elasticsearch.client.indices.IndexTemplatesExistRequest;
import org.elasticsearch.client.indices.PutIndexTemplateRequest;
import org.elasticsearch.client.indices.PutMappingRequest;
import org.elasticsearch.client.indices.ReloadAnalyzersRequest;
import org.elasticsearch.client.indices.ReloadAnalyzersResponse;
import org.elasticsearch.client.indices.UnfreezeIndexRequest;
import org.elasticsearch.client.indices.rollover.RolloverRequest;
import org.elasticsearch.client.indices.rollover.RolloverResponse;
@ -1328,4 +1330,28 @@ public final class IndicesClient {
restHighLevelClient.performRequestAsyncAndParseEntity(request, IndicesRequestConverters::deleteTemplate,
options, AcknowledgedResponse::fromXContent, listener, emptySet());
}
/**
* Synchronously calls the _reload_search_analyzers API
*
* @param request the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
*/
public ReloadAnalyzersResponse reloadAnalyzers(ReloadAnalyzersRequest request, RequestOptions options) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(request, IndicesRequestConverters::reloadAnalyzers, options,
ReloadAnalyzersResponse::fromXContent, emptySet());
}
/**
* Asynchronously calls the _reload_search_analyzers API
*
* @param request the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @param listener the listener to be notified upon request completion
*/
public void reloadAnalyzersAsync(ReloadAnalyzersRequest request, RequestOptions options,
ActionListener<ReloadAnalyzersResponse> listener) {
restHighLevelClient.performRequestAsyncAndParseEntity(request, IndicesRequestConverters::reloadAnalyzers, options,
ReloadAnalyzersResponse::fromXContent, listener, emptySet());
}
}

View File

@ -50,6 +50,7 @@ import org.elasticsearch.client.indices.GetMappingsRequest;
import org.elasticsearch.client.indices.IndexTemplatesExistRequest;
import org.elasticsearch.client.indices.PutIndexTemplateRequest;
import org.elasticsearch.client.indices.PutMappingRequest;
import org.elasticsearch.client.indices.ReloadAnalyzersRequest;
import org.elasticsearch.client.indices.UnfreezeIndexRequest;
import org.elasticsearch.client.indices.rollover.RolloverRequest;
import org.elasticsearch.common.Strings;
@ -646,4 +647,13 @@ final class IndicesRequestConverters {
request.addParameters(params.asMap());
return request;
}
static Request reloadAnalyzers(ReloadAnalyzersRequest reloadAnalyzersRequest) {
String endpoint = RequestConverters.endpoint(reloadAnalyzersRequest.getIndices(), "_reload_search_analyzers");
Request request = new Request(HttpPost.METHOD_NAME, endpoint);
RequestConverters.Params parameters = new RequestConverters.Params();
parameters.withIndicesOptions(reloadAnalyzersRequest.indicesOptions());
request.addParameters(parameters.asMap());
return request;
}
}

View File

@ -676,6 +676,9 @@ final class MLRequestConverters {
params.putParam(
StopDataFrameAnalyticsRequest.ALLOW_NO_MATCH.getPreferredName(), Boolean.toString(stopRequest.getAllowNoMatch()));
}
if (stopRequest.getForce() != null) {
params.putParam(StopDataFrameAnalyticsRequest.FORCE.getPreferredName(), Boolean.toString(stopRequest.getForce()));
}
request.addParameters(params.asMap());
return request;
}

View File

@ -43,6 +43,8 @@ import org.elasticsearch.client.security.DisableUserRequest;
import org.elasticsearch.client.security.EnableUserRequest;
import org.elasticsearch.client.security.GetApiKeyRequest;
import org.elasticsearch.client.security.GetApiKeyResponse;
import org.elasticsearch.client.security.GetBuiltinPrivilegesRequest;
import org.elasticsearch.client.security.GetBuiltinPrivilegesResponse;
import org.elasticsearch.client.security.GetPrivilegesRequest;
import org.elasticsearch.client.security.GetPrivilegesResponse;
import org.elasticsearch.client.security.GetRoleMappingsRequest;
@ -751,6 +753,34 @@ public final class SecurityClient {
InvalidateTokenResponse::fromXContent, listener, emptySet());
}
/**
* Synchronously get builtin (cluster &amp; index) privilege(s).
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-builtin-privileges.html">
* the docs</a> for more.
*
* @param options the request options (e.g. headers), use
* {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response from the get builtin privileges call
* @throws IOException in case there is a problem sending the request or parsing back the response
*/
public GetBuiltinPrivilegesResponse getBuiltinPrivileges(final RequestOptions options) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(GetBuiltinPrivilegesRequest.INSTANCE,
GetBuiltinPrivilegesRequest::getRequest, options, GetBuiltinPrivilegesResponse::fromXContent, emptySet());
}
/**
* Asynchronously get builtin (cluster &amp; index) privilege(s).
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-builtin-privileges.html">
* the docs</a> for more.
*
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @param listener the listener to be notified upon request completion
*/
public void getBuiltinPrivilegesAsync(final RequestOptions options, final ActionListener<GetBuiltinPrivilegesResponse> listener) {
restHighLevelClient.performRequestAsyncAndParseEntity(GetBuiltinPrivilegesRequest.INSTANCE,
GetBuiltinPrivilegesRequest::getRequest, options, GetBuiltinPrivilegesResponse::fromXContent, listener, emptySet());
}
/**
* Synchronously get application privilege(s).
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-privileges.html">

View File

@ -45,7 +45,7 @@ public class BroadcastResponse {
return shards;
}
BroadcastResponse(final Shards shards) {
protected BroadcastResponse(final Shards shards) {
this.shards = Objects.requireNonNull(shards);
}
@ -56,7 +56,7 @@ public class BroadcastResponse {
a -> new BroadcastResponse((Shards) a[0]));
static {
PARSER.declareObject(ConstructingObjectParser.constructorArg(), Shards.SHARDS_PARSER, SHARDS_FIELD);
declareShardsField(PARSER);
}
/**
@ -70,6 +70,10 @@ public class BroadcastResponse {
return PARSER.parse(parser, null);
}
protected static <T extends BroadcastResponse> void declareShardsField(ConstructingObjectParser<T, Void> PARSER) {
PARSER.declareObject(ConstructingObjectParser.constructorArg(), Shards.SHARDS_PARSER, SHARDS_FIELD);
}
/**
* Represents the results of a collection of shards on which a request was executed against.
*/

View File

@ -29,23 +29,32 @@ import java.util.Objects;
public class PreviewDataFrameTransformResponse {
private static final String PREVIEW = "preview";
private static final String MAPPINGS = "mappings";
@SuppressWarnings("unchecked")
public static PreviewDataFrameTransformResponse fromXContent(final XContentParser parser) throws IOException {
Object previewDocs = parser.map().get(PREVIEW);
return new PreviewDataFrameTransformResponse((List<Map<String, Object>>) previewDocs);
Map<String, Object> previewMap = parser.mapOrdered();
Object previewDocs = previewMap.get(PREVIEW);
Object mappings = previewMap.get(MAPPINGS);
return new PreviewDataFrameTransformResponse((List<Map<String, Object>>) previewDocs, (Map<String, Object>) mappings);
}
private List<Map<String, Object>> docs;
private Map<String, Object> mappings;
public PreviewDataFrameTransformResponse(List<Map<String, Object>> docs) {
public PreviewDataFrameTransformResponse(List<Map<String, Object>> docs, Map<String, Object> mappings) {
this.docs = docs;
this.mappings = mappings;
}
public List<Map<String, Object>> getDocs() {
return docs;
}
public Map<String, Object> getMappings() {
return mappings;
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
@ -57,12 +66,12 @@ public class PreviewDataFrameTransformResponse {
}
PreviewDataFrameTransformResponse other = (PreviewDataFrameTransformResponse) obj;
return Objects.equals(other.docs, docs);
return Objects.equals(other.docs, docs) && Objects.equals(other.mappings, mappings);
}
@Override
public int hashCode() {
return Objects.hashCode(docs);
return Objects.hash(docs, mappings);
}
}

View File

@ -43,6 +43,7 @@ public class DataFrameTransformState {
private static final ParseField CHECKPOINT = new ParseField("checkpoint");
private static final ParseField REASON = new ParseField("reason");
private static final ParseField PROGRESS = new ParseField("progress");
private static final ParseField NODE = new ParseField("node");
@SuppressWarnings("unchecked")
public static final ConstructingObjectParser<DataFrameTransformState, Void> PARSER =
@ -52,7 +53,8 @@ public class DataFrameTransformState {
(Map<String, Object>) args[2],
(long) args[3],
(String) args[4],
(DataFrameTransformProgress) args[5]));
(DataFrameTransformProgress) args[5],
(NodeAttributes) args[6]));
static {
PARSER.declareField(constructorArg(), p -> DataFrameTransformTaskState.fromString(p.text()), TASK_STATE, ValueType.STRING);
@ -61,6 +63,7 @@ public class DataFrameTransformState {
PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), CHECKPOINT);
PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), REASON);
PARSER.declareField(optionalConstructorArg(), DataFrameTransformProgress::fromXContent, PROGRESS, ValueType.OBJECT);
PARSER.declareField(optionalConstructorArg(), NodeAttributes.PARSER::apply, NODE, ValueType.OBJECT);
}
public static DataFrameTransformState fromXContent(XContentParser parser) throws IOException {
@ -73,19 +76,22 @@ public class DataFrameTransformState {
private final Map<String, Object> currentPosition;
private final String reason;
private final DataFrameTransformProgress progress;
private final NodeAttributes node;
public DataFrameTransformState(DataFrameTransformTaskState taskState,
IndexerState indexerState,
@Nullable Map<String, Object> position,
long checkpoint,
@Nullable String reason,
@Nullable DataFrameTransformProgress progress) {
@Nullable DataFrameTransformProgress progress,
@Nullable NodeAttributes node) {
this.taskState = taskState;
this.indexerState = indexerState;
this.currentPosition = position == null ? null : Collections.unmodifiableMap(new LinkedHashMap<>(position));
this.checkpoint = checkpoint;
this.reason = reason;
this.progress = progress;
this.node = node;
}
public IndexerState getIndexerState() {
@ -115,6 +121,11 @@ public class DataFrameTransformState {
return progress;
}
@Nullable
public NodeAttributes getNode() {
return node;
}
@Override
public boolean equals(Object other) {
if (this == other) {
@ -132,12 +143,13 @@ public class DataFrameTransformState {
Objects.equals(this.currentPosition, that.currentPosition) &&
Objects.equals(this.progress, that.progress) &&
this.checkpoint == that.checkpoint &&
Objects.equals(this.node, that.node) &&
Objects.equals(this.reason, that.reason);
}
@Override
public int hashCode() {
return Objects.hash(taskState, indexerState, currentPosition, checkpoint, reason, progress);
return Objects.hash(taskState, indexerState, currentPosition, checkpoint, reason, progress, node);
}
}

View File

@ -0,0 +1,156 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.dataframe.transforms;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.Collections;
import java.util.Map;
import java.util.Objects;
/**
* A Pojo class containing an Elastic Node's attributes
*/
public class NodeAttributes implements ToXContentObject {
public static final ParseField ID = new ParseField("id");
public static final ParseField NAME = new ParseField("name");
public static final ParseField EPHEMERAL_ID = new ParseField("ephemeral_id");
public static final ParseField TRANSPORT_ADDRESS = new ParseField("transport_address");
public static final ParseField ATTRIBUTES = new ParseField("attributes");
@SuppressWarnings("unchecked")
public static final ConstructingObjectParser<NodeAttributes, Void> PARSER =
new ConstructingObjectParser<>("node", true,
(a) -> {
int i = 0;
String id = (String) a[i++];
String name = (String) a[i++];
String ephemeralId = (String) a[i++];
String transportAddress = (String) a[i++];
Map<String, String> attributes = (Map<String, String>) a[i];
return new NodeAttributes(id, name, ephemeralId, transportAddress, attributes);
});
static {
PARSER.declareString(ConstructingObjectParser.constructorArg(), ID);
PARSER.declareString(ConstructingObjectParser.constructorArg(), NAME);
PARSER.declareString(ConstructingObjectParser.constructorArg(), EPHEMERAL_ID);
PARSER.declareString(ConstructingObjectParser.constructorArg(), TRANSPORT_ADDRESS);
PARSER.declareField(ConstructingObjectParser.constructorArg(),
(p, c) -> p.mapStrings(),
ATTRIBUTES,
ObjectParser.ValueType.OBJECT);
}
private final String id;
private final String name;
private final String ephemeralId;
private final String transportAddress;
private final Map<String, String> attributes;
public NodeAttributes(String id, String name, String ephemeralId, String transportAddress, Map<String, String> attributes) {
this.id = id;
this.name = name;
this.ephemeralId = ephemeralId;
this.transportAddress = transportAddress;
this.attributes = Collections.unmodifiableMap(attributes);
}
/**
* The unique identifier of the node.
*/
public String getId() {
return id;
}
/**
* The node name.
*/
public String getName() {
return name;
}
/**
* The ephemeral id of the node.
*/
public String getEphemeralId() {
return ephemeralId;
}
/**
* The host and port where transport HTTP connections are accepted.
*/
public String getTransportAddress() {
return transportAddress;
}
/**
* Additional attributes related to this node
*/
public Map<String, String> getAttributes() {
return attributes;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(ID.getPreferredName(), id);
builder.field(NAME.getPreferredName(), name);
builder.field(EPHEMERAL_ID.getPreferredName(), ephemeralId);
builder.field(TRANSPORT_ADDRESS.getPreferredName(), transportAddress);
builder.field(ATTRIBUTES.getPreferredName(), attributes);
builder.endObject();
return builder;
}
@Override
public int hashCode() {
return Objects.hash(id, name, ephemeralId, transportAddress, attributes);
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other == null || getClass() != other.getClass()) {
return false;
}
NodeAttributes that = (NodeAttributes) other;
return Objects.equals(id, that.id) &&
Objects.equals(name, that.name) &&
Objects.equals(ephemeralId, that.ephemeralId) &&
Objects.equals(transportAddress, that.transportAddress) &&
Objects.equals(attributes, that.attributes);
}
@Override
public String toString() {
return Strings.toString(this);
}
}

View File

@ -0,0 +1,68 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.indices;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.Validatable;
import java.util.Objects;
/**
* Request for the _reload_search_analyzers API
*/
public final class ReloadAnalyzersRequest implements Validatable {
private final String[] indices;
private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen();
/**
* Creates a new reload analyzers request
* @param indices the index for which to reload analyzers
*/
public ReloadAnalyzersRequest(String... indices) {
this.indices = Objects.requireNonNull(indices);
}
/**
* Returns the indices
*/
public String[] getIndices() {
return indices;
}
/**
* Specifies what type of requested indices to ignore and how to deal with wildcard expressions.
* For example indices that don't exist.
*
* @return the current behaviour when it comes to index names and wildcard indices expressions
*/
public IndicesOptions indicesOptions() {
return indicesOptions;
}
/**
* Specifies what type of requested indices to ignore and how to deal with wildcard expressions.
* For example indices that don't exist.
*
* @param indicesOptions the desired behaviour regarding indices to ignore and wildcard indices expressions
*/
public void setIndicesOptions(IndicesOptions indicesOptions) {
this.indicesOptions = indicesOptions;
}
}

View File

@ -0,0 +1,108 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.indices;
import org.elasticsearch.client.core.BroadcastResponse;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.XContentParser;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
/**
* The response object that will be returned when reloading analyzers
*/
public class ReloadAnalyzersResponse extends BroadcastResponse {
private final Map<String, ReloadDetails> reloadDetails;
ReloadAnalyzersResponse(final Shards shards, Map<String, ReloadDetails> reloadDetails) {
super(shards);
this.reloadDetails = reloadDetails;
}
@SuppressWarnings({ "unchecked" })
private static final ConstructingObjectParser<ReloadAnalyzersResponse, Void> PARSER = new ConstructingObjectParser<>("reload_analyzer",
true, arg -> {
Shards shards = (Shards) arg[0];
List<Tuple<String, ReloadDetails>> results = (List<Tuple<String, ReloadDetails>>) arg[1];
Map<String, ReloadDetails> reloadDetails = new HashMap<>();
for (Tuple<String, ReloadDetails> result : results) {
reloadDetails.put(result.v1(), result.v2());
}
return new ReloadAnalyzersResponse(shards, reloadDetails);
});
@SuppressWarnings({ "unchecked" })
private static final ConstructingObjectParser<Tuple<String, ReloadDetails>, Void> ENTRY_PARSER = new ConstructingObjectParser<>(
"reload_analyzer.entry", true, arg -> {
String index = (String) arg[0];
Set<String> nodeIds = new HashSet<>((List<String>) arg[1]);
Set<String> analyzers = new HashSet<>((List<String>) arg[2]);
return new Tuple<>(index, new ReloadDetails(index, nodeIds, analyzers));
});
static {
declareShardsField(PARSER);
PARSER.declareObjectArray(constructorArg(), ENTRY_PARSER, new ParseField("reload_details"));
ENTRY_PARSER.declareString(constructorArg(), new ParseField("index"));
ENTRY_PARSER.declareStringArray(constructorArg(), new ParseField("reloaded_node_ids"));
ENTRY_PARSER.declareStringArray(constructorArg(), new ParseField("reloaded_analyzers"));
}
public static ReloadAnalyzersResponse fromXContent(XContentParser parser) {
return PARSER.apply(parser, null);
}
public Map<String, ReloadDetails> getReloadedDetails() {
return reloadDetails;
}
public static class ReloadDetails {
private final String indexName;
private final Set<String> reloadedIndicesNodes;
private final Set<String> reloadedAnalyzers;
public ReloadDetails(String name, Set<String> reloadedIndicesNodes, Set<String> reloadedAnalyzers) {
this.indexName = name;
this.reloadedIndicesNodes = reloadedIndicesNodes;
this.reloadedAnalyzers = reloadedAnalyzers;
}
public String getIndexName() {
return indexName;
}
public Set<String> getReloadedIndicesNodes() {
return reloadedIndicesNodes;
}
public Set<String> getReloadedAnalyzers() {
return reloadedAnalyzers;
}
}
}

View File

@ -31,10 +31,12 @@ import java.util.Optional;
public class StopDataFrameAnalyticsRequest implements Validatable {
public static final ParseField ALLOW_NO_MATCH = new ParseField("allow_no_match");
public static final ParseField FORCE = new ParseField("force");
private final String id;
private TimeValue timeout;
private Boolean allowNoMatch;
private Boolean force;
private TimeValue timeout;
public StopDataFrameAnalyticsRequest(String id) {
this.id = id;
@ -62,6 +64,15 @@ public class StopDataFrameAnalyticsRequest implements Validatable {
return this;
}
public Boolean getForce() {
return force;
}
public StopDataFrameAnalyticsRequest setForce(boolean force) {
this.force = force;
return this;
}
@Override
public Optional<ValidationException> validate() {
if (id == null) {
@ -78,11 +89,12 @@ public class StopDataFrameAnalyticsRequest implements Validatable {
StopDataFrameAnalyticsRequest other = (StopDataFrameAnalyticsRequest) o;
return Objects.equals(id, other.id)
&& Objects.equals(timeout, other.timeout)
&& Objects.equals(allowNoMatch, other.allowNoMatch);
&& Objects.equals(allowNoMatch, other.allowNoMatch)
&& Objects.equals(force, other.force);
}
@Override
public int hashCode() {
return Objects.hash(id, timeout, allowNoMatch);
return Objects.hash(id, timeout, allowNoMatch, force);
}
}

View File

@ -41,6 +41,7 @@ public class DataFrameAnalyticsStats {
static final ParseField ID = new ParseField("id");
static final ParseField STATE = new ParseField("state");
static final ParseField FAILURE_REASON = new ParseField("failure_reason");
static final ParseField PROGRESS_PERCENT = new ParseField("progress_percent");
static final ParseField NODE = new ParseField("node");
static final ParseField ASSIGNMENT_EXPLANATION = new ParseField("assignment_explanation");
@ -50,9 +51,10 @@ public class DataFrameAnalyticsStats {
args -> new DataFrameAnalyticsStats(
(String) args[0],
(DataFrameAnalyticsState) args[1],
(Integer) args[2],
(NodeAttributes) args[3],
(String) args[4]));
(String) args[2],
(Integer) args[3],
(NodeAttributes) args[4],
(String) args[5]));
static {
PARSER.declareString(constructorArg(), ID);
@ -62,6 +64,7 @@ public class DataFrameAnalyticsStats {
}
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
}, STATE, ObjectParser.ValueType.STRING);
PARSER.declareString(optionalConstructorArg(), FAILURE_REASON);
PARSER.declareInt(optionalConstructorArg(), PROGRESS_PERCENT);
PARSER.declareObject(optionalConstructorArg(), NodeAttributes.PARSER, NODE);
PARSER.declareString(optionalConstructorArg(), ASSIGNMENT_EXPLANATION);
@ -69,14 +72,17 @@ public class DataFrameAnalyticsStats {
private final String id;
private final DataFrameAnalyticsState state;
private final String failureReason;
private final Integer progressPercent;
private final NodeAttributes node;
private final String assignmentExplanation;
public DataFrameAnalyticsStats(String id, DataFrameAnalyticsState state, @Nullable Integer progressPercent,
@Nullable NodeAttributes node, @Nullable String assignmentExplanation) {
public DataFrameAnalyticsStats(String id, DataFrameAnalyticsState state, @Nullable String failureReason,
@Nullable Integer progressPercent, @Nullable NodeAttributes node,
@Nullable String assignmentExplanation) {
this.id = id;
this.state = state;
this.failureReason = failureReason;
this.progressPercent = progressPercent;
this.node = node;
this.assignmentExplanation = assignmentExplanation;
@ -90,6 +96,10 @@ public class DataFrameAnalyticsStats {
return state;
}
public String getFailureReason() {
return failureReason;
}
public Integer getProgressPercent() {
return progressPercent;
}
@ -110,6 +120,7 @@ public class DataFrameAnalyticsStats {
DataFrameAnalyticsStats other = (DataFrameAnalyticsStats) o;
return Objects.equals(id, other.id)
&& Objects.equals(state, other.state)
&& Objects.equals(failureReason, other.failureReason)
&& Objects.equals(progressPercent, other.progressPercent)
&& Objects.equals(node, other.node)
&& Objects.equals(assignmentExplanation, other.assignmentExplanation);
@ -117,7 +128,7 @@ public class DataFrameAnalyticsStats {
@Override
public int hashCode() {
return Objects.hash(id, state, progressPercent, node, assignmentExplanation);
return Objects.hash(id, state, failureReason, progressPercent, node, assignmentExplanation);
}
@Override
@ -125,6 +136,7 @@ public class DataFrameAnalyticsStats {
return new ToStringBuilder(getClass())
.add("id", id)
.add("state", state)
.add("failureReason", failureReason)
.add("progressPercent", progressPercent)
.add("node", node)
.add("assignmentExplanation", assignmentExplanation)

View File

@ -40,6 +40,7 @@ public final class CreateTokenRequest implements Validatable, ToXContentObject {
private final String username;
private final char[] password;
private final String refreshToken;
private final char[] kerberosTicket;
/**
* General purpose constructor. This constructor is typically not useful, and one of the following factory methods should be used
@ -48,10 +49,11 @@ public final class CreateTokenRequest implements Validatable, ToXContentObject {
* <li>{@link #passwordGrant(String, char[])}</li>
* <li>{@link #refreshTokenGrant(String)}</li>
* <li>{@link #clientCredentialsGrant()}</li>
* <li>{@link #kerberosGrant(char[])}</li>
* </ul>
*/
public CreateTokenRequest(String grantType, @Nullable String scope, @Nullable String username, @Nullable char[] password,
@Nullable String refreshToken) {
@Nullable String refreshToken, @Nullable char[] kerberosTicket) {
if (Strings.isNullOrEmpty(grantType)) {
throw new IllegalArgumentException("grant_type is required");
}
@ -60,6 +62,7 @@ public final class CreateTokenRequest implements Validatable, ToXContentObject {
this.password = password;
this.scope = scope;
this.refreshToken = refreshToken;
this.kerberosTicket = kerberosTicket;
}
public static CreateTokenRequest passwordGrant(String username, char[] password) {
@ -69,18 +72,25 @@ public final class CreateTokenRequest implements Validatable, ToXContentObject {
if (password == null || password.length == 0) {
throw new IllegalArgumentException("password is required");
}
return new CreateTokenRequest("password", null, username, password, null);
return new CreateTokenRequest("password", null, username, password, null, null);
}
public static CreateTokenRequest refreshTokenGrant(String refreshToken) {
if (Strings.isNullOrEmpty(refreshToken)) {
throw new IllegalArgumentException("refresh_token is required");
}
return new CreateTokenRequest("refresh_token", null, null, null, refreshToken);
return new CreateTokenRequest("refresh_token", null, null, null, refreshToken, null);
}
public static CreateTokenRequest clientCredentialsGrant() {
return new CreateTokenRequest("client_credentials", null, null, null, null);
return new CreateTokenRequest("client_credentials", null, null, null, null, null);
}
public static CreateTokenRequest kerberosGrant(char[] kerberosTicket) {
if (kerberosTicket == null || kerberosTicket.length == 0) {
throw new IllegalArgumentException("kerberos ticket is required");
}
return new CreateTokenRequest("_kerberos", null, null, null, null, kerberosTicket);
}
public String getGrantType() {
@ -103,6 +113,10 @@ public final class CreateTokenRequest implements Validatable, ToXContentObject {
return refreshToken;
}
public char[] getKerberosTicket() {
return kerberosTicket;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject()
@ -124,6 +138,14 @@ public final class CreateTokenRequest implements Validatable, ToXContentObject {
if (refreshToken != null) {
builder.field("refresh_token", refreshToken);
}
if (kerberosTicket != null) {
byte[] kerberosTicketBytes = CharArrays.toUtf8Bytes(kerberosTicket);
try {
builder.field("kerberos_ticket").utf8Value(kerberosTicketBytes, 0, kerberosTicketBytes.length);
} finally {
Arrays.fill(kerberosTicketBytes, (byte) 0);
}
}
return builder.endObject();
}
@ -140,13 +162,15 @@ public final class CreateTokenRequest implements Validatable, ToXContentObject {
Objects.equals(scope, that.scope) &&
Objects.equals(username, that.username) &&
Arrays.equals(password, that.password) &&
Objects.equals(refreshToken, that.refreshToken);
Objects.equals(refreshToken, that.refreshToken) &&
Arrays.equals(kerberosTicket, that.kerberosTicket);
}
@Override
public int hashCode() {
int result = Objects.hash(grantType, scope, username, refreshToken);
result = 31 * result + Arrays.hashCode(password);
result = 31 * result + Arrays.hashCode(kerberosTicket);
return result;
}
}

View File

@ -0,0 +1,40 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.security;
import org.apache.http.client.methods.HttpGet;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.Validatable;
/**
* Request object to retrieve the privilege that are builtin to the Elasticsearch cluster.
*/
public final class GetBuiltinPrivilegesRequest implements Validatable {
public static final GetBuiltinPrivilegesRequest INSTANCE = new GetBuiltinPrivilegesRequest();
private GetBuiltinPrivilegesRequest() {
}
public Request getRequest() {
return new Request(HttpGet.METHOD_NAME, "/_security/privilege/_builtin");
}
}

View File

@ -0,0 +1,84 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.security;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Objects;
import java.util.Set;
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
/**
* Get builtin privileges response
*/
public final class GetBuiltinPrivilegesResponse {
private final Set<String> clusterPrivileges;
private final Set<String> indexPrivileges;
public GetBuiltinPrivilegesResponse(Collection<String> cluster, Collection<String> index) {
this.clusterPrivileges = Collections.unmodifiableSet(new HashSet<>(cluster));
this.indexPrivileges = Collections.unmodifiableSet(new HashSet<>(index));
}
public Set<String> getClusterPrivileges() {
return clusterPrivileges;
}
public Set<String> getIndexPrivileges() {
return indexPrivileges;
}
public static GetBuiltinPrivilegesResponse fromXContent(XContentParser parser) throws IOException {
return PARSER.parse(parser, null);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
GetBuiltinPrivilegesResponse that = (GetBuiltinPrivilegesResponse) o;
return Objects.equals(this.clusterPrivileges, that.clusterPrivileges)
&& Objects.equals(this.indexPrivileges, that.indexPrivileges);
}
@Override
public int hashCode() {
return Objects.hash(clusterPrivileges, indexPrivileges);
}
@SuppressWarnings("unchecked")
private static final ConstructingObjectParser<GetBuiltinPrivilegesResponse, Void> PARSER = new ConstructingObjectParser<>(
"get_builtin_privileges", true,
args -> new GetBuiltinPrivilegesResponse((Collection<String>) args[0], (Collection<String>) args[1]));
static {
PARSER.declareStringArray(constructorArg(), new ParseField("cluster"));
PARSER.declareStringArray(constructorArg(), new ParseField("index"));
}
}

View File

@ -299,10 +299,12 @@ public final class Role {
public static final String NONE = "none";
public static final String ALL = "all";
public static final String MONITOR = "monitor";
public static final String MONITOR_DATA_FRAME_TRANSFORMS = "monitor_data_frame_transforms";
public static final String MONITOR_ML = "monitor_ml";
public static final String MONITOR_WATCHER = "monitor_watcher";
public static final String MONITOR_ROLLUP = "monitor_rollup";
public static final String MANAGE = "manage";
public static final String MANAGE_DATA_FRAME_TRANSFORMS = "manage_data_frame_transforms";
public static final String MANAGE_ML = "manage_ml";
public static final String MANAGE_WATCHER = "manage_watcher";
public static final String MANAGE_ROLLUP = "manage_rollup";
@ -319,8 +321,9 @@ public final class Role {
public static final String MANAGE_ILM = "manage_ilm";
public static final String READ_ILM = "read_ilm";
public static final String MANAGE_ENRICH = "manage_enrich";
public static final String[] ALL_ARRAY = new String[] { NONE, ALL, MONITOR, MONITOR_ML, MONITOR_WATCHER, MONITOR_ROLLUP, MANAGE,
MANAGE_ML, MANAGE_WATCHER, MANAGE_ROLLUP, MANAGE_INDEX_TEMPLATES, MANAGE_INGEST_PIPELINES, TRANSPORT_CLIENT,
public static final String[] ALL_ARRAY = new String[] { NONE, ALL, MONITOR, MONITOR_DATA_FRAME_TRANSFORMS, MONITOR_ML,
MONITOR_WATCHER, MONITOR_ROLLUP, MANAGE, MANAGE_DATA_FRAME_TRANSFORMS,
MANAGE_ML, MANAGE_WATCHER, MANAGE_ROLLUP, MANAGE_INDEX_TEMPLATES, MANAGE_INGEST_PIPELINES, TRANSPORT_CLIENT,
MANAGE_SECURITY, MANAGE_SAML, MANAGE_OIDC, MANAGE_TOKEN, MANAGE_PIPELINE, MANAGE_CCR, READ_CCR, MANAGE_ILM, READ_ILM,
MANAGE_ENRICH };
}

View File

@ -60,6 +60,7 @@ import org.junit.After;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@ -71,6 +72,7 @@ import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.hasKey;
import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.oneOf;
@ -277,6 +279,7 @@ public class DataFrameTransformIT extends ESRestHighLevelClientTestCase {
assertThat(taskState, is(DataFrameTransformTaskState.STOPPED));
}
@SuppressWarnings("unchecked")
public void testPreview() throws IOException {
String sourceIndex = "transform-source";
createIndex(sourceIndex);
@ -298,6 +301,12 @@ public class DataFrameTransformIT extends ESRestHighLevelClientTestCase {
Optional<Map<String, Object>> michel = docs.stream().filter(doc -> "michel".equals(doc.get("reviewer"))).findFirst();
assertTrue(michel.isPresent());
assertEquals(3.6d, (double) michel.get().get("avg_rating"), 0.1d);
Map<String, Object> mappings = preview.getMappings();
assertThat(mappings, hasKey("properties"));
Map<String, Object> fields = (Map<String, Object>)mappings.get("properties");
assertThat(fields.get("reviewer"), equalTo(Collections.singletonMap("type", "keyword")));
assertThat(fields.get("avg_rating"), equalTo(Collections.singletonMap("type", "double")));
}
private DataFrameTransformConfig validDataFrameTransformConfig(String id, String source, String destination) {

View File

@ -73,6 +73,8 @@ import org.elasticsearch.client.indices.IndexTemplateMetaData;
import org.elasticsearch.client.indices.IndexTemplatesExistRequest;
import org.elasticsearch.client.indices.PutIndexTemplateRequest;
import org.elasticsearch.client.indices.PutMappingRequest;
import org.elasticsearch.client.indices.ReloadAnalyzersRequest;
import org.elasticsearch.client.indices.ReloadAnalyzersResponse;
import org.elasticsearch.client.indices.UnfreezeIndexRequest;
import org.elasticsearch.client.indices.rollover.RolloverRequest;
import org.elasticsearch.client.indices.rollover.RolloverResponse;
@ -1877,4 +1879,14 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
assertTrue(unfreeze.isShardsAcknowledged());
assertTrue(unfreeze.isAcknowledged());
}
public void testReloadAnalyzer() throws IOException {
createIndex("test", Settings.EMPTY);
RestHighLevelClient client = highLevelClient();
ReloadAnalyzersResponse reloadResponse = execute(new ReloadAnalyzersRequest("test"), client.indices()::reloadAnalyzers,
client.indices()::reloadAnalyzersAsync);
assertNotNull(reloadResponse.shards());
assertTrue(reloadResponse.getReloadedDetails().containsKey("test"));
}
}

View File

@ -54,6 +54,7 @@ import org.elasticsearch.client.indices.IndexTemplatesExistRequest;
import org.elasticsearch.client.indices.PutIndexTemplateRequest;
import org.elasticsearch.client.indices.PutMappingRequest;
import org.elasticsearch.client.indices.RandomCreateIndexGenerator;
import org.elasticsearch.client.indices.ReloadAnalyzersRequest;
import org.elasticsearch.client.indices.rollover.RolloverRequest;
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.common.Strings;
@ -1215,4 +1216,21 @@ public class IndicesRequestConvertersTests extends ESTestCase {
Assert.assertThat(request.getParameters(), equalTo(expectedParams));
Assert.assertThat(request.getEntity(), nullValue());
}
public void testReloadAnalyzers() {
String[] indices = RequestConvertersTests.randomIndicesNames(1, 5);
StringJoiner endpoint = new StringJoiner("/", "/", "");
if (indices != null && indices.length > 0) {
endpoint.add(String.join(",", indices));
}
ReloadAnalyzersRequest reloadRequest = new ReloadAnalyzersRequest(indices);
Map<String, String> expectedParams = new HashMap<>();
RequestConvertersTests.setRandomIndicesOptions(reloadRequest::setIndicesOptions, reloadRequest::indicesOptions,
expectedParams);
Request request = IndicesRequestConverters.reloadAnalyzers(reloadRequest);
Assert.assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME));
Assert.assertThat(request.getEndpoint(), equalTo(endpoint + "/_reload_search_analyzers"));
Assert.assertThat(request.getParameters(), equalTo(expectedParams));
Assert.assertThat(request.getEntity(), nullValue());
}
}

View File

@ -758,11 +758,15 @@ public class MLRequestConvertersTests extends ESTestCase {
public void testStopDataFrameAnalytics_WithParams() {
StopDataFrameAnalyticsRequest stopRequest = new StopDataFrameAnalyticsRequest(randomAlphaOfLength(10))
.setTimeout(TimeValue.timeValueMinutes(1))
.setAllowNoMatch(false);
.setAllowNoMatch(false)
.setForce(true);
Request request = MLRequestConverters.stopDataFrameAnalytics(stopRequest);
assertEquals(HttpPost.METHOD_NAME, request.getMethod());
assertEquals("/_ml/data_frame/analytics/" + stopRequest.getId() + "/_stop", request.getEndpoint());
assertThat(request.getParameters(), allOf(hasEntry("timeout", "1m"), hasEntry("allow_no_match", "false")));
assertThat(request.getParameters(), allOf(
hasEntry("timeout", "1m"),
hasEntry("allow_no_match", "false"),
hasEntry("force", "true")));
assertNull(request.getEntity());
}

View File

@ -1359,6 +1359,7 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase {
DataFrameAnalyticsStats stats = statsResponse.getAnalyticsStats().get(0);
assertThat(stats.getId(), equalTo(configId));
assertThat(stats.getState(), equalTo(DataFrameAnalyticsState.STOPPED));
assertNull(stats.getFailureReason());
assertNull(stats.getProgressPercent());
assertNull(stats.getNode());
assertNull(stats.getAssignmentExplanation());
@ -1405,6 +1406,7 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase {
assertTrue(highLevelClient().indices().exists(new GetIndexRequest(destIndex), RequestOptions.DEFAULT));
}
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/43924")
public void testStopDataFrameAnalyticsConfig() throws Exception {
String sourceIndex = "stop-test-source-index";
String destIndex = "stop-test-dest-index";

View File

@ -141,7 +141,7 @@ public class RestHighLevelClientTests extends ESTestCase {
// core
"ping", "info",
// security
"security.get_ssl_certificates", "security.authenticate", "security.get_user_privileges",
"security.get_ssl_certificates", "security.authenticate", "security.get_user_privileges", "security.get_builtin_privileges",
// license
"license.get_trial_status", "license.get_basic_status"
@ -824,6 +824,7 @@ public class RestHighLevelClientTests extends ESTestCase {
apiName.startsWith("ccr.") == false &&
apiName.startsWith("data_frame") == false &&
apiName.endsWith("freeze") == false &&
apiName.endsWith("reload_analyzers") == false &&
// IndicesClientIT.getIndexTemplate should be renamed "getTemplate" in version 8.0 when we
// can get rid of 7.0's deprecated "getTemplate"
apiName.equals("indices.get_index_template") == false) {

View File

@ -316,7 +316,7 @@ public class SecurityRequestConvertersTests extends ESTestCase {
assertNull(request.getEntity());
}
public void testGetAllApplicationPrivileges() throws Exception {
public void testGetAllPrivilegesForApplication() throws Exception {
final String application = randomAlphaOfLength(6);
GetPrivilegesRequest getPrivilegesRequest = GetPrivilegesRequest.getApplicationPrivileges(application);
Request request = SecurityRequestConverters.getPrivileges(getPrivilegesRequest);
@ -340,7 +340,7 @@ public class SecurityRequestConvertersTests extends ESTestCase {
assertNull(request.getEntity());
}
public void testGetAllPrivileges() throws Exception {
public void testGetAllApplicationPrivileges() throws Exception {
GetPrivilegesRequest getPrivilegesRequest = GetPrivilegesRequest.getAllPrivileges();
Request request = SecurityRequestConverters.getPrivileges(getPrivilegesRequest);
assertEquals(HttpGet.METHOD_NAME, request.getMethod());

View File

@ -24,6 +24,7 @@ import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@ -53,8 +54,13 @@ public class PreviewDataFrameTransformResponseTests extends ESTestCase {
}
docs.add(doc);
}
int numMappingEntries = randomIntBetween(5, 10);
Map<String, Object> mappings = new HashMap<>(numMappingEntries);
for (int i = 0; i < numMappingEntries; i++) {
mappings.put(randomAlphaOfLength(10), Collections.singletonMap("type", randomAlphaOfLength(10)));
}
return new PreviewDataFrameTransformResponse(docs);
return new PreviewDataFrameTransformResponse(docs, mappings);
}
private void toXContent(PreviewDataFrameTransformResponse response, XContentBuilder builder) throws IOException {
@ -64,6 +70,7 @@ public class PreviewDataFrameTransformResponseTests extends ESTestCase {
builder.map(doc);
}
builder.endArray();
builder.field("mappings", response.getMappings());
builder.endObject();
}
}

View File

@ -20,6 +20,7 @@
package org.elasticsearch.client.dataframe.transforms;
import org.elasticsearch.client.core.IndexerState;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.test.ESTestCase;
@ -37,7 +38,8 @@ public class DataFrameTransformStateTests extends ESTestCase {
DataFrameTransformStateTests::toXContent,
DataFrameTransformState::fromXContent)
.supportsUnknownFields(true)
.randomFieldsExcludeFilter(field -> field.equals("current_position"))
.randomFieldsExcludeFilter(field -> field.equals("current_position") ||
field.equals("node.attributes"))
.test();
}
@ -47,7 +49,8 @@ public class DataFrameTransformStateTests extends ESTestCase {
randomPositionMap(),
randomLongBetween(0,10),
randomBoolean() ? null : randomAlphaOfLength(10),
randomBoolean() ? null : DataFrameTransformProgressTests.randomInstance());
randomBoolean() ? null : DataFrameTransformProgressTests.randomInstance(),
randomBoolean() ? null : NodeAttributesTests.createRandom());
}
public static void toXContent(DataFrameTransformState state, XContentBuilder builder) throws IOException {
@ -65,6 +68,10 @@ public class DataFrameTransformStateTests extends ESTestCase {
builder.field("progress");
DataFrameTransformProgressTests.toXContent(state.getProgress(), builder);
}
if (state.getNode() != null) {
builder.field("node");
state.getNode().toXContent(builder, ToXContent.EMPTY_PARAMS);
}
builder.endObject();
}

View File

@ -0,0 +1,64 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.dataframe.transforms;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.test.AbstractXContentTestCase;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.function.Predicate;
public class NodeAttributesTests extends AbstractXContentTestCase<NodeAttributes> {
public static NodeAttributes createRandom() {
int numberOfAttributes = randomIntBetween(1, 10);
Map<String, String> attributes = new HashMap<>(numberOfAttributes);
for(int i = 0; i < numberOfAttributes; i++) {
String val = randomAlphaOfLength(10);
attributes.put("key-"+i, val);
}
return new NodeAttributes(randomAlphaOfLength(10),
randomAlphaOfLength(10),
randomAlphaOfLength(10),
randomAlphaOfLength(10),
attributes);
}
@Override
protected NodeAttributes createTestInstance() {
return createRandom();
}
@Override
protected NodeAttributes doParseInstance(XContentParser parser) throws IOException {
return NodeAttributes.PARSER.parse(parser, null);
}
@Override
protected Predicate<String> getRandomFieldsExcludeFilter() {
return field -> !field.isEmpty();
}
@Override
protected boolean supportsUnknownFields() {
return true;
}
}

View File

@ -64,7 +64,7 @@ public class DataFrameTransformStateAndStatsTests extends AbstractHlrcXContentTe
@Override
protected Predicate<String> getRandomFieldsExcludeFilter() {
return field -> field.equals("state.current_position");
return field -> field.equals("state.current_position") || field.equals("state.node") || field.equals("state.node.attributes");
}
}

View File

@ -28,6 +28,7 @@ import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformProgr
import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformState;
import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStateAndStats;
import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformTaskState;
import org.elasticsearch.xpack.core.dataframe.transforms.NodeAttributes;
import org.elasticsearch.xpack.core.indexing.IndexerState;
import java.io.IOException;
@ -40,8 +41,20 @@ public class DataFrameTransformStateTests extends AbstractHlrcXContentTestCase<D
public static DataFrameTransformState fromHlrc(org.elasticsearch.client.dataframe.transforms.DataFrameTransformState instance) {
return new DataFrameTransformState(DataFrameTransformTaskState.fromString(instance.getTaskState().value()),
IndexerState.fromString(instance.getIndexerState().value()), instance.getPosition(), instance.getCheckpoint(),
instance.getReason(), DataFrameTransformProgressTests.fromHlrc(instance.getProgress()));
IndexerState.fromString(instance.getIndexerState().value()),
instance.getPosition(),
instance.getCheckpoint(),
instance.getReason(),
DataFrameTransformProgressTests.fromHlrc(instance.getProgress()),
fromHlrc(instance.getNode()));
}
public static NodeAttributes fromHlrc(org.elasticsearch.client.dataframe.transforms.NodeAttributes attributes) {
return attributes == null ? null : new NodeAttributes(attributes.getId(),
attributes.getName(),
attributes.getEphemeralId(),
attributes.getTransportAddress(),
attributes.getAttributes());
}
@Override
@ -72,7 +85,7 @@ public class DataFrameTransformStateTests extends AbstractHlrcXContentTestCase<D
@Override
protected Predicate<String> getRandomFieldsExcludeFilter() {
return field -> field.equals("current_position");
return field -> field.equals("current_position") || field.equals("node.attributes");
}
public static DataFrameTransformStateAndStats randomDataFrameTransformStateAndStats(String id) {
@ -97,6 +110,20 @@ public class DataFrameTransformStateTests extends AbstractHlrcXContentTestCase<D
return new DataFrameTransformProgress(totalDocs, remainingDocs);
}
public static NodeAttributes randomNodeAttributes() {
int numberOfAttributes = randomIntBetween(1, 10);
Map<String, String> attributes = new HashMap<>(numberOfAttributes);
for(int i = 0; i < numberOfAttributes; i++) {
String val = randomAlphaOfLength(10);
attributes.put("key-"+i, val);
}
return new NodeAttributes(randomAlphaOfLength(10),
randomAlphaOfLength(10),
randomAlphaOfLength(10),
randomAlphaOfLength(10),
attributes);
}
public static DataFrameIndexerTransformStats randomStats(String transformId) {
return new DataFrameIndexerTransformStats(transformId, randomLongBetween(10L, 10000L),
randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L),
@ -110,7 +137,8 @@ public class DataFrameTransformStateTests extends AbstractHlrcXContentTestCase<D
randomPosition(),
randomLongBetween(0,10),
randomBoolean() ? null : randomAlphaOfLength(10),
randomBoolean() ? null : randomDataFrameTransformProgress());
randomBoolean() ? null : randomDataFrameTransformProgress(),
randomBoolean() ? null : randomNodeAttributes());
}
private static Map<String, Object> randomPosition() {

View File

@ -45,6 +45,7 @@ import org.elasticsearch.client.dataframe.transforms.DataFrameTransformProgress;
import org.elasticsearch.client.dataframe.transforms.DataFrameTransformStateAndStats;
import org.elasticsearch.client.dataframe.transforms.DataFrameTransformTaskState;
import org.elasticsearch.client.dataframe.transforms.DestConfig;
import org.elasticsearch.client.dataframe.transforms.NodeAttributes;
import org.elasticsearch.client.dataframe.transforms.QueryConfig;
import org.elasticsearch.client.dataframe.transforms.SourceConfig;
import org.elasticsearch.client.dataframe.transforms.pivot.AggregationConfig;
@ -447,6 +448,7 @@ public class DataFrameTransformDocumentationIT extends ESRestHighLevelClientTest
// end::preview-data-frame-transform-execute
assertNotNull(response.getDocs());
assertNotNull(response.getMappings());
}
{
// tag::preview-data-frame-transform-execute-listener
@ -532,6 +534,8 @@ public class DataFrameTransformDocumentationIT extends ESRestHighLevelClientTest
stateAndStats.getTransformStats(); // <4>
DataFrameTransformProgress progress =
stateAndStats.getTransformState().getProgress(); // <5>
NodeAttributes node =
stateAndStats.getTransformState().getNode(); // <6>
// end::get-data-frame-transform-stats-response
assertEquals(IndexerState.STOPPED, indexerState);

View File

@ -58,6 +58,7 @@ import org.elasticsearch.client.GetAliasesResponse;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.client.SyncedFlushResponse;
import org.elasticsearch.client.core.BroadcastResponse.Shards;
import org.elasticsearch.client.core.ShardsAcknowledgedResponse;
import org.elasticsearch.client.indices.AnalyzeRequest;
import org.elasticsearch.client.indices.AnalyzeResponse;
@ -77,6 +78,9 @@ import org.elasticsearch.client.indices.IndexTemplateMetaData;
import org.elasticsearch.client.indices.IndexTemplatesExistRequest;
import org.elasticsearch.client.indices.PutIndexTemplateRequest;
import org.elasticsearch.client.indices.PutMappingRequest;
import org.elasticsearch.client.indices.ReloadAnalyzersRequest;
import org.elasticsearch.client.indices.ReloadAnalyzersResponse;
import org.elasticsearch.client.indices.ReloadAnalyzersResponse.ReloadDetails;
import org.elasticsearch.client.indices.UnfreezeIndexRequest;
import org.elasticsearch.client.indices.rollover.RolloverRequest;
import org.elasticsearch.client.indices.rollover.RolloverResponse;
@ -2748,4 +2752,77 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
assertTrue(latch.await(30L, TimeUnit.SECONDS));
}
public void testReloadSearchAnalyzers() throws Exception {
RestHighLevelClient client = highLevelClient();
{
CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index"), RequestOptions.DEFAULT);
assertTrue(createIndexResponse.isAcknowledged());
}
{
// tag::reload-analyzers-request
ReloadAnalyzersRequest request = new ReloadAnalyzersRequest("index"); // <1>
// end::reload-analyzers-request
// tag::reload-analyzers-request-indicesOptions
request.setIndicesOptions(IndicesOptions.strictExpandOpen()); // <1>
// end::reload-analyzers-request-indicesOptions
// tag::reload-analyzers-execute
ReloadAnalyzersResponse reloadResponse = client.indices().reloadAnalyzers(request, RequestOptions.DEFAULT);
// end::reload-analyzers-execute
// tag::reload-analyzers-response
Shards shards = reloadResponse.shards(); // <1>
Map<String, ReloadDetails> reloadDetails = reloadResponse.getReloadedDetails(); // <2>
ReloadDetails details = reloadDetails.get("index"); // <3>
String indexName = details.getIndexName(); // <4>
Set<String> indicesNodes = details.getReloadedIndicesNodes(); // <5>
Set<String> analyzers = details.getReloadedAnalyzers(); // <6>
// end::reload-analyzers-response
assertNotNull(shards);
assertEquals("index", indexName);
assertEquals(1, indicesNodes.size());
assertEquals(0, analyzers.size());
// tag::reload-analyzers-execute-listener
ActionListener<ReloadAnalyzersResponse> listener =
new ActionListener<ReloadAnalyzersResponse>() {
@Override
public void onResponse(ReloadAnalyzersResponse reloadResponse) {
// <1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
};
// end::reload-analyzers-execute-listener
// Replace the empty listener by a blocking listener in test
final CountDownLatch latch = new CountDownLatch(1);
listener = new LatchedActionListener<>(listener, latch);
// tag::reload-analyzers-execute-async
client.indices().reloadAnalyzersAsync(request, RequestOptions.DEFAULT, listener); // <1>
// end::reload-analyzers-execute-async
assertTrue(latch.await(30L, TimeUnit.SECONDS));
}
{
// tag::reload-analyzers-notfound
try {
ReloadAnalyzersRequest request = new ReloadAnalyzersRequest("does_not_exist");
client.indices().reloadAnalyzers(request, RequestOptions.DEFAULT);
} catch (ElasticsearchException exception) {
if (exception.status() == RestStatus.BAD_REQUEST) {
// <1>
}
}
// end::reload-analyzers-notfound
}
}
}

View File

@ -3110,6 +3110,7 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
{
// tag::stop-data-frame-analytics-request
StopDataFrameAnalyticsRequest request = new StopDataFrameAnalyticsRequest("my-analytics-config"); // <1>
request.setForce(false); // <2>
// end::stop-data-frame-analytics-request
// tag::stop-data-frame-analytics-execute

View File

@ -50,6 +50,7 @@ import org.elasticsearch.client.security.EnableUserRequest;
import org.elasticsearch.client.security.ExpressionRoleMapping;
import org.elasticsearch.client.security.GetApiKeyRequest;
import org.elasticsearch.client.security.GetApiKeyResponse;
import org.elasticsearch.client.security.GetBuiltinPrivilegesResponse;
import org.elasticsearch.client.security.GetPrivilegesRequest;
import org.elasticsearch.client.security.GetPrivilegesResponse;
import org.elasticsearch.client.security.GetRoleMappingsRequest;
@ -118,6 +119,7 @@ import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.emptyIterable;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.hasItem;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.isIn;
import static org.hamcrest.Matchers.iterableWithSize;
@ -1497,6 +1499,60 @@ public class SecurityDocumentationIT extends ESRestHighLevelClientTestCase {
}
}
public void testGetBuiltinPrivileges() throws Exception {
final RestHighLevelClient client = highLevelClient();
{
//tag::get-builtin-privileges-execute
GetBuiltinPrivilegesResponse response = client.security().getBuiltinPrivileges(RequestOptions.DEFAULT);
//end::get-builtin-privileges-execute
assertNotNull(response);
//tag::get-builtin-privileges-response
final Set<String> cluster = response.getClusterPrivileges();
final Set<String> index = response.getIndexPrivileges();
//end::get-builtin-privileges-response
assertThat(cluster, hasItem("all"));
assertThat(cluster, hasItem("manage"));
assertThat(cluster, hasItem("monitor"));
assertThat(cluster, hasItem("manage_security"));
assertThat(index, hasItem("all"));
assertThat(index, hasItem("manage"));
assertThat(index, hasItem("monitor"));
assertThat(index, hasItem("read"));
assertThat(index, hasItem("write"));
}
{
// tag::get-builtin-privileges-execute-listener
ActionListener<GetBuiltinPrivilegesResponse> listener = new ActionListener<GetBuiltinPrivilegesResponse>() {
@Override
public void onResponse(GetBuiltinPrivilegesResponse response) {
// <1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
};
// end::get-builtin-privileges-execute-listener
// Replace the empty listener by a blocking listener in test
final PlainActionFuture<GetBuiltinPrivilegesResponse> future = new PlainActionFuture<>();
listener = future;
// tag::get-builtin-privileges-execute-async
client.security().getBuiltinPrivilegesAsync(RequestOptions.DEFAULT, listener); // <1>
// end::get-builtin-privileges-execute-async
final GetBuiltinPrivilegesResponse response = future.get(30, TimeUnit.SECONDS);
assertNotNull(response);
assertThat(response.getClusterPrivileges(), hasItem("manage_security"));
assertThat(response.getIndexPrivileges(), hasItem("read"));
}
}
public void testGetPrivileges() throws Exception {
final RestHighLevelClient client = highLevelClient();
final ApplicationPrivilege readTestappPrivilege =
@ -1556,9 +1612,9 @@ public class SecurityDocumentationIT extends ESRestHighLevelClientTestCase {
assertNotNull(response);
assertThat(response.getPrivileges().size(), equalTo(3));
final GetPrivilegesResponse exptectedResponse =
final GetPrivilegesResponse expectedResponse =
new GetPrivilegesResponse(Arrays.asList(readTestappPrivilege, writeTestappPrivilege, allTestappPrivilege));
assertThat(response, equalTo(exptectedResponse));
assertThat(response, equalTo(expectedResponse));
//tag::get-privileges-response
Set<ApplicationPrivilege> privileges = response.getPrivileges();
//end::get-privileges-response

View File

@ -0,0 +1,111 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.indices;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.client.AbstractResponseTestCase;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.seqno.RetentionLeaseNotFoundException;
import org.elasticsearch.xpack.core.action.ReloadAnalyzersResponse.ReloadDetails;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.isIn;
public class ReloadAnalyzersResponseTests
extends AbstractResponseTestCase<org.elasticsearch.xpack.core.action.ReloadAnalyzersResponse, ReloadAnalyzersResponse> {
private String index;
private String id;
private Set<Integer> shardIds;
@Override
protected org.elasticsearch.xpack.core.action.ReloadAnalyzersResponse createServerTestInstance() {
index = randomAlphaOfLength(8);
id = randomAlphaOfLength(8);
final int total = randomIntBetween(1, 16);
final int successful = total - scaledRandomIntBetween(0, total);
final int failed = scaledRandomIntBetween(0, total - successful);
final List<DefaultShardOperationFailedException> failures = new ArrayList<>();
shardIds = new HashSet<>();
for (int i = 0; i < failed; i++) {
final DefaultShardOperationFailedException failure = new DefaultShardOperationFailedException(
index,
randomValueOtherThanMany(shardIds::contains, () -> randomIntBetween(0, total - 1)),
new RetentionLeaseNotFoundException(id));
failures.add(failure);
shardIds.add(failure.shardId());
}
Map<String, ReloadDetails> reloadedDetailsMap = new HashMap<>();
int randomIndices = randomIntBetween(0, 5);
for (int i = 0; i < randomIndices; i++) {
String indexName = randomAlphaOfLengthBetween(5, 10);
Set<String> randomNodeIds = new HashSet<>(Arrays.asList(generateRandomStringArray(5, 5, false, true)));
Set<String> randomAnalyzers = new HashSet<>(Arrays.asList(generateRandomStringArray(5, 5, false, true)));
ReloadDetails reloadedDetails = new ReloadDetails(indexName, randomNodeIds, randomAnalyzers);
reloadedDetailsMap.put(indexName, reloadedDetails);
}
return new org.elasticsearch.xpack.core.action.ReloadAnalyzersResponse(total, successful, failed, failures, reloadedDetailsMap);
}
@Override
protected ReloadAnalyzersResponse doParseToClientInstance(XContentParser parser) throws IOException {
return ReloadAnalyzersResponse.fromXContent(parser);
}
@Override
protected void assertInstances(org.elasticsearch.xpack.core.action.ReloadAnalyzersResponse serverTestInstance,
ReloadAnalyzersResponse clientInstance) {
assertThat(clientInstance.shards().total(), equalTo(serverTestInstance.getTotalShards()));
assertThat(clientInstance.shards().successful(), equalTo(serverTestInstance.getSuccessfulShards()));
assertThat(clientInstance.shards().skipped(), equalTo(0));
assertThat(clientInstance.shards().failed(), equalTo(serverTestInstance.getFailedShards()));
assertThat(clientInstance.shards().failures(), hasSize(clientInstance.shards().failed() == 0 ? 0 : 1)); // failures are grouped
if (clientInstance.shards().failed() > 0) {
final DefaultShardOperationFailedException groupedFailure = clientInstance.shards().failures().iterator().next();
assertThat(groupedFailure.index(), equalTo(index));
assertThat(groupedFailure.shardId(), isIn(shardIds));
assertThat(groupedFailure.reason(), containsString("reason=retention lease with ID [" + id + "] not found"));
}
Map<String, ReloadDetails> serverDetails = serverTestInstance.getReloadDetails();
assertThat(clientInstance.getReloadedDetails().size(), equalTo(serverDetails.size()));
for (Entry<String, org.elasticsearch.client.indices.ReloadAnalyzersResponse.ReloadDetails> entry : clientInstance
.getReloadedDetails().entrySet()) {
String indexName = entry.getKey();
assertTrue(serverDetails.keySet().contains(indexName));
assertEquals(serverDetails.get(indexName).getIndexName(), entry.getValue().getIndexName());
assertEquals(serverDetails.get(indexName).getReloadedAnalyzers(), entry.getValue().getReloadedAnalyzers());
assertEquals(serverDetails.get(indexName).getReloadedIndicesNodes(), entry.getValue().getReloadedIndicesNodes());
}
}
}

View File

@ -43,6 +43,7 @@ public class DataFrameAnalyticsStatsTests extends ESTestCase {
return new DataFrameAnalyticsStats(
randomAlphaOfLengthBetween(1, 10),
randomFrom(DataFrameAnalyticsState.values()),
randomBoolean() ? null : randomAlphaOfLength(10),
randomBoolean() ? null : randomIntBetween(0, 100),
randomBoolean() ? null : NodeAttributesTests.createRandom(),
randomBoolean() ? null : randomAlphaOfLengthBetween(1, 20));
@ -52,6 +53,9 @@ public class DataFrameAnalyticsStatsTests extends ESTestCase {
builder.startObject();
builder.field(DataFrameAnalyticsStats.ID.getPreferredName(), stats.getId());
builder.field(DataFrameAnalyticsStats.STATE.getPreferredName(), stats.getState().value());
if (stats.getFailureReason() != null) {
builder.field(DataFrameAnalyticsStats.FAILURE_REASON.getPreferredName(), stats.getFailureReason());
}
if (stats.getProgressPercent() != null) {
builder.field(DataFrameAnalyticsStats.PROGRESS_PERCENT.getPreferredName(), stats.getProgressPercent());
}

View File

@ -66,31 +66,54 @@ public class CreateTokenRequestTests extends ESTestCase {
assertThat(Strings.toString(request), equalTo("{\"grant_type\":\"client_credentials\"}"));
}
public void testCreateTokenFromKerberosTicket() {
final CreateTokenRequest request = CreateTokenRequest.kerberosGrant("top secret kerberos ticket".toCharArray());
assertThat(request.getGrantType(), equalTo("_kerberos"));
assertThat(request.getScope(), nullValue());
assertThat(request.getUsername(), nullValue());
assertThat(request.getPassword(), nullValue());
assertThat(request.getRefreshToken(), nullValue());
assertThat(new String(request.getKerberosTicket()), equalTo("top secret kerberos ticket"));
assertThat(Strings.toString(request), equalTo("{\"grant_type\":\"_kerberos\"," +
"\"kerberos_ticket\":\"top secret kerberos ticket\"}"));
}
public void testEqualsAndHashCode() {
final String grantType = randomAlphaOfLength(8);
final String scope = randomBoolean() ? null : randomAlphaOfLength(6);
final String username = randomBoolean() ? null : randomAlphaOfLengthBetween(4, 10);
final char[] password = randomBoolean() ? null : randomAlphaOfLengthBetween(8, 12).toCharArray();
final String refreshToken = randomBoolean() ? null : randomAlphaOfLengthBetween(12, 24);
final CreateTokenRequest request = new CreateTokenRequest(grantType, scope, username, password, refreshToken);
final char[] kerberosTicket = randomBoolean() ? null : randomAlphaOfLengthBetween(8, 12).toCharArray();
final CreateTokenRequest request = new CreateTokenRequest(grantType, scope, username, password, refreshToken, kerberosTicket);
EqualsHashCodeTestUtils.checkEqualsAndHashCode(request,
r -> new CreateTokenRequest(r.getGrantType(), r.getScope(), r.getUsername(), r.getPassword(), r.getRefreshToken()),
r -> new CreateTokenRequest(r.getGrantType(), r.getScope(), r.getUsername(), r.getPassword(),
r.getRefreshToken(), r.getKerberosTicket()),
this::mutate);
}
private CreateTokenRequest mutate(CreateTokenRequest req) {
switch (randomIntBetween(1, 5)) {
case 1:
return new CreateTokenRequest("g", req.getScope(), req.getUsername(), req.getPassword(), req.getRefreshToken());
case 2:
return new CreateTokenRequest(req.getGrantType(), "s", req.getUsername(), req.getPassword(), req.getRefreshToken());
case 3:
return new CreateTokenRequest(req.getGrantType(), req.getScope(), "u", req.getPassword(), req.getRefreshToken());
case 4:
final char[] password = {'p'};
return new CreateTokenRequest(req.getGrantType(), req.getScope(), req.getUsername(), password, req.getRefreshToken());
case 5:
return new CreateTokenRequest(req.getGrantType(), req.getScope(), req.getUsername(), req.getPassword(), "r");
switch (randomIntBetween(1, 6)) {
case 1:
return new CreateTokenRequest("g", req.getScope(), req.getUsername(), req.getPassword(), req.getRefreshToken(),
req.getKerberosTicket());
case 2:
return new CreateTokenRequest(req.getGrantType(), "s", req.getUsername(), req.getPassword(), req.getRefreshToken(),
req.getKerberosTicket());
case 3:
return new CreateTokenRequest(req.getGrantType(), req.getScope(), "u", req.getPassword(), req.getRefreshToken(),
req.getKerberosTicket());
case 4:
final char[] password = { 'p' };
return new CreateTokenRequest(req.getGrantType(), req.getScope(), req.getUsername(), password, req.getRefreshToken(),
req.getKerberosTicket());
case 5:
final char[] kerberosTicket = { 'k' };
return new CreateTokenRequest(req.getGrantType(), req.getScope(), req.getUsername(), req.getPassword(), req.getRefreshToken(),
kerberosTicket);
case 6:
return new CreateTokenRequest(req.getGrantType(), req.getScope(), req.getUsername(), req.getPassword(), "r",
req.getKerberosTicket());
}
throw new IllegalStateException("Bad random number");
}

View File

@ -1,8 +1,8 @@
:version: 7.3.0
:version: 7.4.0
////
bare_version never includes -alpha or -beta
////
:bare_version: 7.3.0
:bare_version: 7.4.0
:major-version: 7.x
:prev-major-version: 6.x
:lucene_version: 8.1.0

View File

@ -52,4 +52,5 @@ include-tagged::{doc-tests-file}[{api}-response]
<3> The running state of the transform indexer e.g `started`, `indexing`, etc.
<4> The overall transform statistics recording the number of documents indexed etc.
<5> The progress of the current run in the transform. Supplies the number of docs left until the next checkpoint
and the total number of docs expected.
and the total number of docs expected.
<6> The assigned node information if the task is currently assigned to a node and running.

View File

@ -0,0 +1,50 @@
--
:api: reload-analyzers
:request: ReloadAnalyzersRequest
:response: ReloadAnalyzersResponse
--
[id="{upid}-{api}"]
=== Reload Search Analyzers API
[id="{upid}-{api}-request"]
==== Reload Search Analyzers Request
An +{request}+ requires an `index` argument:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests-file}[{api}-request]
--------------------------------------------------
<1> The index to reload
==== Optional arguments
The following arguments can optionally be provided:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests-file}[{api}-request-indicesOptions]
--------------------------------------------------
<1> Setting `IndicesOptions` controls how unavailable indices are resolved and
how wildcard expressions are expanded
include::../execution.asciidoc[]
[id="{upid}-{api}-response"]
==== Reload Search Analyzers Response
The returned +{response}+ allows to retrieve information about the
executed operation as follows:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests-file}[{api}-response]
--------------------------------------------------
<1> Shard statistics. Note that reloading does not happen on each shard of an
index, but once on each node the index has shards on. The reported shard count
can therefore differ from the number of index shards
<2> Reloading details of all indices the request was executed on
<3> Details can be retrieved by index name
<4> The reloaded index name
<5> The nodes the index was reloaded on
<6> The analyzer names that were reloaded

View File

@ -19,6 +19,7 @@ A +{request}+ object requires a {dataframe-analytics-config} id.
include-tagged::{doc-tests-file}[{api}-request]
---------------------------------------------------
<1> Constructing a new stop request referencing an existing {dataframe-analytics-config}
<2> Optionally used to stop a failed task
include::../execution.asciidoc[]

View File

@ -0,0 +1,27 @@
--
:api: get-builtin-privileges
:request: GetBuiltinPrivilegesRequest
:response: GetBuiltinPrivilegesResponse
--
[id="{upid}-{api}"]
=== Get Builtin Privileges API
include::../execution-no-req.asciidoc[]
[id="{upid}-{api}-response"]
==== Get Builtin Privileges Response
The returned +{response}+ contains the following properties
`clusterPrivileges`::
A `Set` of all _cluster_ privileges that are understood by this node.
`indexPrivileges`::
A `Set` of all _index_ privileges that are understood by this node.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests-file}[{api}-response]
--------------------------------------------------

View File

@ -155,6 +155,7 @@ include::indices/get_index.asciidoc[]
include::indices/freeze_index.asciidoc[]
include::indices/unfreeze_index.asciidoc[]
include::indices/delete_template.asciidoc[]
include::indices/reload_analyzers.asciidoc[]
== Cluster APIs
@ -418,6 +419,7 @@ The Java High Level REST Client supports the following Security APIs:
* <<java-rest-high-security-delete-role-mapping>>
* <<java-rest-high-security-create-token>>
* <<{upid}-invalidate-token>>
* <<{upid}-get-builtin-privileges>>
* <<{upid}-get-privileges>>
* <<{upid}-put-privileges>>
* <<{upid}-delete-privileges>>
@ -435,6 +437,7 @@ include::security/put-role.asciidoc[]
include::security/get-roles.asciidoc[]
include::security/delete-role.asciidoc[]
include::security/delete-privileges.asciidoc[]
include::security/get-builtin-privileges.asciidoc[]
include::security/get-privileges.asciidoc[]
include::security/clear-roles-cache.asciidoc[]
include::security/clear-realm-cache.asciidoc[]
@ -582,4 +585,4 @@ include::dataframe/put_data_frame.asciidoc[]
include::dataframe/delete_data_frame.asciidoc[]
include::dataframe/preview_data_frame.asciidoc[]
include::dataframe/start_data_frame.asciidoc[]
include::dataframe/stop_data_frame.asciidoc[]
include::dataframe/stop_data_frame.asciidoc[]

View File

@ -29,7 +29,7 @@ complex:: a datetime representation as a complex type
(<<reference-types, object>>) that abstracts away internal details of how the
datetime is stored and often provides utilities for modification and
comparison; in Painless this is typically a
<<painless-api-reference-shared-ZonedDateTime>>
<<painless-api-reference-shared-ZonedDateTime, ZonedDateTime>>
Switching between different representations of datetimes is often necessary to
achieve a script's objective(s). A typical pattern in a script is to switch a
@ -335,6 +335,43 @@ if (zdt1.isAfter(zdt2)) {
}
----
==== Datetime Zone
Both string datetimes and complex datetimes have a timezone with a default of
`UTC`. Numeric datetimes do not have enough explicit information to
have a timezone, so `UTC` is always assumed. Use
<<painless-api-reference-shared-ZonedDateTime, methods>> (or fields) in
conjunction with a <<painless-api-reference-shared-ZoneId, ZoneId>> to change
the timezone for a complex datetime. Parse a string datetime into a complex
datetime to change the timezone, and then format the complex datetime back into
a desired string datetime. Note many complex datetimes are immutable so upon
modification a new complex datetime is created that requires
<<variable-assignment, assignment>> or immediate use.
===== Datetime Zone Examples
* Modify the timezone for a complex datetime
+
[source,Painless]
----
ZonedDateTime utc =
ZonedDateTime.of(1983, 10, 13, 22, 15, 30, 0, ZoneId.of('Z'));
ZonedDateTime pst = utc.withZoneSameInstant(ZoneId.of('America/Los_Angeles'));
----
+
* Modify the timezone for a string datetime
+
[source,Painless]
----
String gmtString = 'Thu, 13 Oct 1983 22:15:30 GMT';
ZonedDateTime gmtZdt = ZonedDateTime.parse(gmtString,
DateTimeFormatter.RFC_1123_DATE_TIME); <1>
ZonedDateTime pstZdt =
gmtZdt.withZoneSameInstant(ZoneId.of('America/Los_Angeles'));
String pstString = pstZdt.format(DateTimeFormatter.RFC_1123_DATE_TIME);
----
<1> Note the use of a built-in DateTimeFormatter.
==== Datetime Input
There are several common ways datetimes are used as input for a script
@ -372,7 +409,7 @@ through an input called `params`.
+
[source,Painless]
----
long inputDatetime = params['input_datetime'];
long inputDateTime = params['input_datetime'];
Instant instant = Instant.ofEpochMilli(inputDateTime);
ZonedDateTime zdt = ZonedDateTime.ofInstant(instant, ZoneId.of('Z'));
----
@ -432,7 +469,7 @@ source document is most commonly accessible through an input called
+
[source,Painless]
----
long inputDatetime = ctx['_source']['input_datetime']; <1>
long inputDateTime = ctx['_source']['input_datetime']; <1>
Instant instant = Instant.ofEpochMilli(inputDateTime);
ZonedDateTime zdt = ZonedDateTime.ofInstant(instant, ZoneId.of('Z'));
----
@ -513,10 +550,9 @@ String output = input.format(DateTimeFormatter.ISO_INSTANT); <1>
+
** Assumptions:
+
*** The fields `start_datetime` and `end_datetime` may *not* exist in all
indexes as part of the query
*** The fields `start_datetime` and `end_datetime` may *not* have values in all
indexed documents
*** The fields `start` and `end` may *not* exist in all indexes as part of the
query
*** The fields `start` and `end` may *not* have values in all indexed documents
+
** Mappings:
+
@ -527,10 +563,10 @@ indexed documents
...
"properties": {
...
"start_datetime": {
"start": {
"type": "date"
},
"end_datetime": {
"end": {
"type": "date"
}
...
@ -544,14 +580,13 @@ indexed documents
+
[source,Painless]
----
if (doc.containsKey('start_datetime') && doc.containsKey('end_datetime')) { <1>
if (doc.containsKey('start') && doc.containsKey('end')) { <1>
if (doc['start_datetime'].size() > 0 && doc['end_datetime'].size() > 0) { <2>
if (doc['start'].size() > 0 && doc['end'].size() > 0) { <2>
def startDatetime = doc['start_datetime'].value;
def endDatetime = doc['end_datetime'].value;
long differenceInMillis =
ChronoUnit.MILLIS.between(startDateTime, endDateTime);
def start = doc['start'].value;
def end = doc['end'].value;
long differenceInMillis = ChronoUnit.MILLIS.between(start, end);
// handle difference in times
} else {
@ -564,6 +599,122 @@ if (doc.containsKey('start_datetime') && doc.containsKey('end_datetime')) { <1>
<1> When a query's results span multiple indexes, some indexes may not
contain a specific field. Use the `containsKey` method call on the `doc` input
to ensure a field exists as part of the index for the current document.
<2> Some field's within a document may have no values. Use the `size` method
<2> Some fields within a document may have no values. Use the `size` method
call on a field within the `doc` input to ensure that field has at least one
value for the current document.
==== Datetime Now
Under most Painless contexts the current datetime, `now`, is not supported.
There are two primary reasons for this. The first is scripts are often run once
per document, so each time the script is run a different `now` is returned. The
second is scripts are often run in a distributed fashion without a way to
appropriately synchronize `now`. Instead, pass in a user-defined parameter with
either a string datetime or numeric datetime for `now`. A numeric datetime is
preferred as there is no need to parse it for comparision.
===== Datetime Now Examples
* Use a numeric datetime as `now`
+
** Assumptions:
+
*** The field `input_datetime` exists in all indexes as part of the query
*** All indexed documents contain the field `input_datetime`
+
** Mappings:
+
[source,JSON]
----
{
"mappings": {
...
"properties": {
...
"input_datetime": {
"type": "date"
}
...
}
...
}
}
----
+
** Input:
+
[source,JSON]
----
...
"script": {
...
"params": {
"now": <generated numeric datetime in milliseconds since epoch>
}
}
...
----
+
** Script:
+
[source,Painless]
----
long now = params['now'];
def inputDateTime = doc['input_datetime'];
long millisDateTime = zdt.toInstant().toEpochMilli();
long elapsedTime = now - millisDateTime;
----
+
* Use a string datetime as `now`
+
** Assumptions:
+
*** The field `input_datetime` exists in all indexes as part of the query
*** All indexed documents contain the field `input_datetime`
+
** Mappings:
+
[source,JSON]
----
{
"mappings": {
...
"properties": {
...
"input_datetime": {
"type": "date"
}
...
}
...
}
}
----
+
** Input:
+
[source,JSON]
----
...
"script": {
...
"params": {
"now": "<generated string datetime in ISO-8601>"
}
}
...
----
+
** Script:
+
[source,Painless]
----
String nowString = params['now'];
ZonedDateTime nowZdt = ZonedDateTime.parse(datetime); <1>
long now = ZonedDateTime.toInstant().toEpochMilli();
def inputDateTime = doc['input_datetime'];
long millisDateTime = zdt.toInstant().toEpochMilli();
long elapsedTime = now - millisDateTime;
----
<1> Note this parses the same string datetime every time the script runs. Use a
numeric datetime to avoid a significant performance hit.

View File

@ -55,6 +55,8 @@ include::bucket/parent-aggregation.asciidoc[]
include::bucket/range-aggregation.asciidoc[]
include::bucket/rare-terms-aggregation.asciidoc[]
include::bucket/reverse-nested-aggregation.asciidoc[]
include::bucket/sampler-aggregation.asciidoc[]
@ -64,3 +66,4 @@ include::bucket/significantterms-aggregation.asciidoc[]
include::bucket/significanttext-aggregation.asciidoc[]
include::bucket/terms-aggregation.asciidoc[]

View File

@ -0,0 +1,357 @@
[[search-aggregations-bucket-rare-terms-aggregation]]
=== Rare Terms Aggregation
A multi-bucket value source based aggregation which finds "rare" terms -- terms that are at the long-tail
of the distribution and are not frequent. Conceptually, this is like a `terms` aggregation that is
sorted by `_count` ascending. As noted in the <<search-aggregations-bucket-terms-aggregation-order,terms aggregation docs>>,
actually ordering a `terms` agg by count ascending has unbounded error. Instead, you should use the `rare_terms`
aggregation
//////////////////////////
[source,js]
--------------------------------------------------
PUT /products
{
"mappings": {
"properties": {
"genre": {
"type": "keyword"
},
"product": {
"type": "keyword"
}
}
}
}
POST /products/_doc/_bulk?refresh
{"index":{"_id":0}}
{"genre": "rock", "product": "Product A"}
{"index":{"_id":1}}
{"genre": "rock"}
{"index":{"_id":2}}
{"genre": "rock"}
{"index":{"_id":3}}
{"genre": "jazz", "product": "Product Z"}
{"index":{"_id":4}}
{"genre": "jazz"}
{"index":{"_id":5}}
{"genre": "electronic"}
{"index":{"_id":6}}
{"genre": "electronic"}
{"index":{"_id":7}}
{"genre": "electronic"}
{"index":{"_id":8}}
{"genre": "electronic"}
{"index":{"_id":9}}
{"genre": "electronic"}
{"index":{"_id":10}}
{"genre": "swing"}
-------------------------------------------------
// NOTCONSOLE
// TESTSETUP
//////////////////////////
==== Syntax
A `rare_terms` aggregation looks like this in isolation:
[source,js]
--------------------------------------------------
{
"rare_terms": {
"field": "the_field",
"max_doc_count": 1
}
}
--------------------------------------------------
// NOTCONSOLE
.`rare_terms` Parameters
|===
|Parameter Name |Description |Required |Default Value
|`field` |The field we wish to find rare terms in |Required |
|`max_doc_count` |The maximum number of documents a term should appear in. |Optional |`1`
|`precision` |The precision of the internal CuckooFilters. Smaller precision leads to
better approximation, but higher memory usage. Cannot be smaller than `0.00001` |Optional |`0.01`
|`include` |Terms that should be included in the aggregation|Optional |
|`exclude` |Terms that should be excluded from the aggregation|Optional |
|`missing` |The value that should be used if a document does not have the field being aggregated|Optional |
|===
Example:
[source,js]
--------------------------------------------------
GET /_search
{
"aggs" : {
"genres" : {
"rare_terms" : {
"field" : "genre"
}
}
}
}
--------------------------------------------------
// CONSOLE
// TEST[s/_search/_search\?filter_path=aggregations/]
Response:
[source,js]
--------------------------------------------------
{
...
"aggregations" : {
"genres" : {
"buckets" : [
{
"key" : "swing",
"doc_count" : 1
}
]
}
}
}
--------------------------------------------------
// TESTRESPONSE[s/\.\.\.//]
In this example, the only bucket that we see is the "swing" bucket, because it is the only term that appears in
one document. If we increase the `max_doc_count` to `2`, we'll see some more buckets:
[source,js]
--------------------------------------------------
GET /_search
{
"aggs" : {
"genres" : {
"rare_terms" : {
"field" : "genre",
"max_doc_count": 2
}
}
}
}
--------------------------------------------------
// CONSOLE
// TEST[s/_search/_search\?filter_path=aggregations/]
This now shows the "jazz" term which has a `doc_count` of 2":
[source,js]
--------------------------------------------------
{
...
"aggregations" : {
"genres" : {
"buckets" : [
{
"key" : "swing",
"doc_count" : 1
},
{
"key" : "jazz",
"doc_count" : 2
}
]
}
}
}
--------------------------------------------------
// TESTRESPONSE[s/\.\.\.//]
[[search-aggregations-bucket-rare-terms-aggregation-max-doc-count]]
==== Maximum document count
The `max_doc_count` parameter is used to control the upper bound of document counts that a term can have. There
is not a size limitation on the `rare_terms` agg like `terms` agg has. This means that terms
which match the `max_doc_count` criteria will be returned. The aggregation functions in this manner to avoid
the order-by-ascending issues that afflict the `terms` aggregation.
This does, however, mean that a large number of results can be returned if chosen incorrectly.
To limit the danger of this setting, the maximum `max_doc_count` is 100.
[[search-aggregations-bucket-rare-terms-aggregation-max-buckets]]
==== Max Bucket Limit
The Rare Terms aggregation is more liable to trip the `search.max_buckets` soft limit than other aggregations due
to how it works. The `max_bucket` soft-limit is evaluated on a per-shard basis while the aggregation is collecting
results. It is possible for a term to be "rare" on a shard but become "not rare" once all the shard results are
merged together. This means that individual shards tend to collect more buckets than are truly rare, because
they only have their own local view. This list is ultimately pruned to the correct, smaller list of rare
terms on the coordinating node... but a shard may have already tripped the `max_buckets` soft limit and aborted
the request.
When aggregating on fields that have potentially many "rare" terms, you may need to increase the `max_buckets` soft
limit. Alternatively, you might need to find a way to filter the results to return fewer rare values (smaller time
span, filter by category, etc), or re-evaluate your definition of "rare" (e.g. if something
appears 100,000 times, is it truly "rare"?)
[[search-aggregations-bucket-rare-terms-aggregation-approximate-counts]]
==== Document counts are approximate
The naive way to determine the "rare" terms in a dataset is to place all the values in a map, incrementing counts
as each document is visited, then return the bottom `n` rows. This does not scale beyond even modestly sized data
sets. A sharded approach where only the "top n" values are retained from each shard (ala the `terms` aggregation)
fails because the long-tail nature of the problem means it is impossible to find the "top n" bottom values without
simply collecting all the values from all shards.
Instead, the Rare Terms aggregation uses a different approximate algorithm:
1. Values are placed in a map the first time they are seen.
2. Each addition occurrence of the term increments a counter in the map
3. If the counter > the `max_doc_count` threshold, the term is removed from the map and placed in a
https://www.cs.cmu.edu/~dga/papers/cuckoo-conext2014.pdf[CuckooFilter]
4. The CuckooFilter is consulted on each term. If the value is inside the filter, it is known to be above the
threshold already and skipped.
After execution, the map of values is the map of "rare" terms under the `max_doc_count` threshold. This map and CuckooFilter
are then merged with all other shards. If there are terms that are greater than the threshold (or appear in
a different shard's CuckooFilter) the term is removed from the merged list. The final map of values is returned
to the user as the "rare" terms.
CuckooFilters have the possibility of returning false positives (they can say a value exists in their collection when
it actually does not). Since the CuckooFilter is being used to see if a term is over threshold, this means a false positive
from the CuckooFilter will mistakenly say a value is common when it is not (and thus exclude it from it final list of buckets).
Practically, this means the aggregations exhibits false-negative behavior since the filter is being used "in reverse"
of how people generally think of approximate set membership sketches.
CuckooFilters are described in more detail in the paper:
https://www.cs.cmu.edu/~dga/papers/cuckoo-conext2014.pdf[Fan, Bin, et al. "Cuckoo filter: Practically better than bloom."]
Proceedings of the 10th ACM International on Conference on emerging Networking Experiments and Technologies. ACM, 2014.
==== Precision
Although the internal CuckooFilter is approximate in nature, the false-negative rate can be controlled with a
`precision` parameter. This allows the user to trade more runtime memory for more accurate results.
The default precision is `0.001`, and the smallest (e.g. most accurate and largest memory overhead) is `0.00001`.
Below are some charts which demonstrate how the accuracy of the aggregation is affected by precision and number
of distinct terms.
The X-axis shows the number of distinct values the aggregation has seen, and the Y-axis shows the percent error.
Each line series represents one "rarity" condition (ranging from one rare item to 100,000 rare items). For example,
the orange "10" line means ten of the values were "rare" (`doc_count == 1`), out of 1-20m distinct values (where the
rest of the values had `doc_count > 1`)
This first chart shows precision `0.01`:
image:images/rare_terms/accuracy_01.png[]
And precision `0.001` (the default):
image:images/rare_terms/accuracy_001.png[]
And finally `precision 0.0001`:
image:images/rare_terms/accuracy_0001.png[]
The default precision of `0.001` maintains an accuracy of < 2.5% for the tested conditions, and accuracy slowly
degrades in a controlled, linear fashion as the number of distinct values increases.
The default precision of `0.001` has a memory profile of `1.748⁻⁶ * n` bytes, where `n` is the number
of distinct values the aggregation has seen (it can also be roughly eyeballed, e.g. 20 million unique values is about
30mb of memory). The memory usage is linear to the number of distinct values regardless of which precision is chosen,
the precision only affects the slope of the memory profile as seen in this chart:
image:images/rare_terms/memory.png[]
For comparison, an equivalent terms aggregation at 20 million buckets would be roughly
`20m * 69b == ~1.38gb` (with 69 bytes being a very optimistic estimate of an empty bucket cost, far lower than what
the circuit breaker accounts for). So although the `rare_terms` agg is relatively heavy, it is still orders of
magnitude smaller than the equivalent terms aggregation
==== Filtering Values
It is possible to filter the values for which buckets will be created. This can be done using the `include` and
`exclude` parameters which are based on regular expression strings or arrays of exact values. Additionally,
`include` clauses can filter using `partition` expressions.
===== Filtering Values with regular expressions
[source,js]
--------------------------------------------------
GET /_search
{
"aggs" : {
"genres" : {
"rare_terms" : {
"field" : "genre",
"include" : "swi*",
"exclude" : "electro*"
}
}
}
}
--------------------------------------------------
// CONSOLE
In the above example, buckets will be created for all the tags that starts with `swi`, except those starting
with `electro` (so the tag `swing` will be aggregated but not `electro_swing`). The `include` regular expression will determine what
values are "allowed" to be aggregated, while the `exclude` determines the values that should not be aggregated. When
both are defined, the `exclude` has precedence, meaning, the `include` is evaluated first and only then the `exclude`.
The syntax is the same as <<regexp-syntax,regexp queries>>.
===== Filtering Values with exact values
For matching based on exact values the `include` and `exclude` parameters can simply take an array of
strings that represent the terms as they are found in the index:
[source,js]
--------------------------------------------------
GET /_search
{
"aggs" : {
"genres" : {
"rare_terms" : {
"field" : "genre",
"include" : ["swing", "rock"],
"exclude" : ["jazz"]
}
}
}
}
--------------------------------------------------
// CONSOLE
==== Missing value
The `missing` parameter defines how documents that are missing a value should be treated.
By default they will be ignored but it is also possible to treat them as if they
had a value.
[source,js]
--------------------------------------------------
GET /_search
{
"aggs" : {
"genres" : {
"rare_terms" : {
"field" : "genre",
"missing": "N/A" <1>
}
}
}
}
--------------------------------------------------
// CONSOLE
<1> Documents without a value in the `tags` field will fall into the same bucket as documents that have the value `N/A`.
==== Nested, RareTerms, and scoring sub-aggregations
The RareTerms aggregation has to operate in `breadth_first` mode, since it needs to prune terms as doc count thresholds
are breached. This requirement means the RareTerms aggregation is incompatible with certain combinations of aggregations
that require `depth_first`. In particular, scoring sub-aggregations that are inside a `nested` force the entire aggregation tree to run
in `depth_first` mode. This will throw an exception since RareTerms is unable to process `depth_first`.
As a concrete example, if `rare_terms` aggregation is the child of a `nested` aggregation, and one of the child aggregations of `rare_terms`
needs document scores (like a `top_hits` aggregation), this will throw an exception.

View File

@ -90,7 +90,17 @@ The data that is returned for this example is as follows:
"customer_id" : "12"
}
...
]
],
"mappings": {
"properties": {
"max_price": {
"type": "double"
},
"customer_id": {
"type": "keyword"
}
}
}
}
----
// NOTCONSOLE

View File

@ -5,12 +5,12 @@
[float]
=== Introduction
Each index in Elasticsearch is <<getting-started-shards-and-replicas,divided into shards>>
Each index in Elasticsearch is <<scalability,divided into shards>>
and each shard can have multiple copies. These copies are known as a _replication group_ and must be kept in sync when documents
are added or removed. If we fail to do so, reading from one copy will result in very different results than reading from another.
The process of keeping the shard copies in sync and serving reads from them is what we call the _data replication model_.
Elasticsearchs data replication model is based on the _primary-backup model_ and is described very well in the
Elasticsearchs data replication model is based on the _primary-backup model_ and is described very well in the
https://www.microsoft.com/en-us/research/publication/pacifica-replication-in-log-based-distributed-storage-systems/[PacificA paper] of
Microsoft Research. That model is based on having a single copy from the replication group that acts as the primary shard.
The other copies are called _replica shards_. The primary serves as the main entry point for all indexing operations. It is in charge of
@ -23,7 +23,7 @@ it has for various interactions between write and read operations.
[float]
=== Basic write model
Every indexing operation in Elasticsearch is first resolved to a replication group using <<index-routing,routing>>,
Every indexing operation in Elasticsearch is first resolved to a replication group using <<index-routing,routing>>,
typically based on the document ID. Once the replication group has been determined,
the operation is forwarded internally to the current _primary shard_ of the group. The primary shard is responsible
for validating the operation and forwarding it to the other replicas. Since replicas can be offline, the primary
@ -50,7 +50,7 @@ configuration mistake could cause an operation to fail on a replica despite it b
are infrequent but the primary has to respond to them.
In the case that the primary itself fails, the node hosting the primary will send a message to the master about it. The indexing
operation will wait (up to 1 minute, by <<dynamic-index-settings,default>>) for the master to promote one of the replicas to be a
operation will wait (up to 1 minute, by <<dynamic-index-settings,default>>) for the master to promote one of the replicas to be a
new primary. The operation will then be forwarded to the new primary for processing. Note that the master also monitors the
health of the nodes and may decide to proactively demote a primary. This typically happens when the node holding the primary
is isolated from the cluster by a networking issue. See <<demoted-primary,here>> for more details.
@ -60,8 +60,8 @@ when executing it on the replica shards. This may be caused by an actual failure
issue preventing the operation from reaching the replica (or preventing the replica from responding). All of these
share the same end result: a replica which is part of the in-sync replica set misses an operation that is about to
be acknowledged. In order to avoid violating the invariant, the primary sends a message to the master requesting
that the problematic shard be removed from the in-sync replica set. Only once removal of the shard has been acknowledged
by the master does the primary acknowledge the operation. Note that the master will also instruct another node to start
that the problematic shard be removed from the in-sync replica set. Only once removal of the shard has been acknowledged
by the master does the primary acknowledge the operation. Note that the master will also instruct another node to start
building a new shard copy in order to restore the system to a healthy state.
[[demoted-primary]]
@ -72,13 +72,13 @@ will be rejected by the replicas. When the primary receives a response from the
it is no longer the primary then it will reach out to the master and will learn that it has been replaced. The
operation is then routed to the new primary.
.What happens if there are no replicas?
.What happens if there are no replicas?
************
This is a valid scenario that can happen due to index configuration or simply
because all the replicas have failed. In that case the primary is processing operations without any external validation,
which may seem problematic. On the other hand, the primary cannot fail other shards on its own but request the master to do
so on its behalf. This means that the master knows that the primary is the only single good copy. We are therefore guaranteed
that the master will not promote any other (out-of-date) shard copy to be a new primary and that any operation indexed
so on its behalf. This means that the master knows that the primary is the only single good copy. We are therefore guaranteed
that the master will not promote any other (out-of-date) shard copy to be a new primary and that any operation indexed
into the primary will not be lost. Of course, since at that point we are running with only single copy of the data, physical hardware
issues can cause data loss. See <<index-wait-for-active-shards>> for some mitigation options.
************
@ -91,7 +91,7 @@ take non-trivial CPU power. One of the beauties of the primary-backup model is t
(with the exception of in-flight operations). As such, a single in-sync copy is sufficient to serve read requests.
When a read request is received by a node, that node is responsible for forwarding it to the nodes that hold the relevant shards,
collating the responses, and responding to the client. We call that node the _coordinating node_ for that request. The basic flow
collating the responses, and responding to the client. We call that node the _coordinating node_ for that request. The basic flow
is as follows:
. Resolve the read requests to the relevant shards. Note that since most searches will be sent to one or more indices,
@ -153,8 +153,8 @@ Dirty reads:: An isolated primary can expose writes that will not be acknowledge
[float]
=== The Tip of the Iceberg
This document provides a high level overview of how Elasticsearch deals with data. Of course, there is much much more
going on under the hood. Things like primary terms, cluster state publishing, and master election all play a role in
This document provides a high level overview of how Elasticsearch deals with data. Of course, there is much much more
going on under the hood. Things like primary terms, cluster state publishing, and master election all play a role in
keeping this system behaving correctly. This document also doesn't cover known and important
bugs (both closed and open). We recognize that https://github.com/elastic/elasticsearch/issues?q=label%3Aresiliency[GitHub is hard to keep up with].
To help people stay on top of those, we maintain a dedicated https://www.elastic.co/guide/en/elasticsearch/resiliency/current/index.html[resiliency page]

View File

@ -1,108 +1,34 @@
[[getting-started]]
= Getting started
= Getting started with {es}
[partintro]
--
TIP: The fastest way to get started with {es} is to https://www.elastic.co/cloud/elasticsearch-service/signup[start a free 14-day trial of Elasticsearch Service] in the cloud.
Ready to take {es} for a test drive and see for yourself how you can use the
REST APIs to store, search, and analyze data?
Elasticsearch is a highly scalable open-source full-text search and analytics engine. It allows you to store, search, and analyze big volumes of data quickly and in near real time. It is generally used as the underlying engine/technology that powers applications that have complex search features and requirements.
Step through this getting started tutorial to:
Here are a few sample use-cases that Elasticsearch could be used for:
. Get an {es} instance up and running
. Index some sample documents
. Search for documents using the {es} query language
. Analyze the results using bucket and metrics aggregations
* You run an online web store where you allow your customers to search for products that you sell. In this case, you can use Elasticsearch to store your entire product catalog and inventory and provide search and autocomplete suggestions for them.
* You want to collect log or transaction data and you want to analyze and mine this data to look for trends, statistics, summarizations, or anomalies. In this case, you can use Logstash (part of the Elasticsearch/Logstash/Kibana stack) to collect, aggregate, and parse your data, and then have Logstash feed this data into Elasticsearch. Once the data is in Elasticsearch, you can run searches and aggregations to mine any information that is of interest to you.
* You run a price alerting platform which allows price-savvy customers to specify a rule like "I am interested in buying a specific electronic gadget and I want to be notified if the price of gadget falls below $X from any vendor within the next month". In this case you can scrape vendor prices, push them into Elasticsearch and use its reverse-search (Percolator) capability to match price movements against customer queries and eventually push the alerts out to the customer once matches are found.
* You have analytics/business-intelligence needs and want to quickly investigate, analyze, visualize, and ask ad-hoc questions on a lot of data (think millions or billions of records). In this case, you can use Elasticsearch to store your data and then use Kibana (part of the Elasticsearch/Logstash/Kibana stack) to build custom dashboards that can visualize aspects of your data that are important to you. Additionally, you can use the Elasticsearch aggregations functionality to perform complex business intelligence queries against your data.
For the rest of this tutorial, you will be guided through the process of getting Elasticsearch up and running, taking a peek inside it, and performing basic operations like indexing, searching, and modifying your data. At the end of this tutorial, you should have a good idea of what Elasticsearch is, how it works, and hopefully be inspired to see how you can use it to either build sophisticated search applications or to mine intelligence from your data.
Need more context?
Check out the <<elasticsearch-intro,
Elasticsearch Introduction>> to learn the lingo and understand the basics of
how {es} works. If you're already familiar with {es} and want to see how it works
with the rest of the stack, you might want to jump to the
{stack-gs}/get-started-elastic-stack.html[Elastic Stack
Tutorial] to see how to set up a system monitoring solution with {es}, {kib},
{beats}, and {ls}.
TIP: The fastest way to get started with {es} is to
https://www.elastic.co/cloud/elasticsearch-service/signup[start a free 14-day
trial of Elasticsearch Service] in the cloud.
--
[[getting-started-concepts]]
== Basic Concepts
There are a few concepts that are core to Elasticsearch. Understanding these concepts from the outset will tremendously help ease the learning process.
[float]
=== Near Realtime (NRT)
Elasticsearch is a near-realtime search platform. What this means is there is a slight latency (normally one second) from the time you index a document until the time it becomes searchable.
[float]
=== Cluster
A cluster is a collection of one or more nodes (servers) that together holds your entire data and provides federated indexing and search capabilities across all nodes. A cluster is identified by a unique name which by default is "elasticsearch". This name is important because a node can only be part of a cluster if the node is set up to join the cluster by its name.
Make sure that you don't reuse the same cluster names in different
environments, otherwise you might end up with nodes joining the wrong cluster.
For instance you could use `logging-dev`, `logging-stage`, and `logging-prod`
for the development, staging, and production clusters.
Note that it is valid and perfectly fine to have a cluster with only a single node in it. Furthermore, you may also have multiple independent clusters each with its own unique cluster name.
[float]
=== Node
A node is a single server that is part of your cluster, stores your data, and participates in the cluster's indexing and search
capabilities. Just like a cluster, a node is identified by a name which by default is a random Universally Unique IDentifier (UUID) that is assigned to the node at startup. You can define any node name you want if you do not want the default. This name is important for administration purposes where you want to identify which servers in your network correspond to which nodes in your Elasticsearch cluster.
A node can be configured to join a specific cluster by the cluster name. By default, each node is set up to join a cluster named `elasticsearch` which means that if you start up a number of nodes on your network and--assuming they can discover each other--they will all automatically form and join a single cluster named `elasticsearch`.
In a single cluster, you can have as many nodes as you want. Furthermore, if there are no other Elasticsearch nodes currently running on your network, starting a single node will by default form a new single-node cluster named `elasticsearch`.
[float]
=== Index
An index is a collection of documents that have somewhat similar characteristics. For example, you can have an index for customer data, another index for a product catalog, and yet another index for order data. An index is identified by a name (that must be all lowercase) and this name is used to refer to the index when performing indexing, search, update, and delete operations against the documents in it.
In a single cluster, you can define as many indexes as you want.
[float]
=== Type
deprecated[6.0.0,See <<removal-of-types>>]
A type used to be a logical category/partition of your index to allow you to store different types of documents in the same index, e.g. one type for users, another type for blog posts. It is no longer possible to create multiple types in an index, and the whole concept of types will be removed in a later version. See <<removal-of-types>> for more.
[float]
=== Document
A document is a basic unit of information that can be indexed. For example, you can have a document for a single customer, another document for a single product, and yet another for a single order. This document is expressed in http://json.org/[JSON] (JavaScript Object Notation) which is a ubiquitous internet data interchange format. Within an index, you can store as many documents as you want.
[[getting-started-shards-and-replicas]]
[float]
=== Shards & Replicas
An index can potentially store a large amount of data that can exceed the hardware limits of a single node. For example, a single index of a billion documents taking up 1TB of disk space may not fit on the disk of a single node or may be too slow to serve search requests from a single node alone.
To solve this problem, Elasticsearch provides the ability to subdivide your index into multiple pieces called shards. When you create an index, you can simply define the number of shards that you want. Each shard is in itself a fully-functional and independent "index" that can be hosted on any node in the cluster.
Sharding is important for two primary reasons:
* It allows you to horizontally split/scale your content volume
* It allows you to distribute and parallelize operations across shards (potentially on multiple nodes) thus increasing performance/throughput
The mechanics of how a shard is distributed and also how its documents are aggregated back into search requests are completely managed by Elasticsearch and is transparent to you as the user.
In a network/cloud environment where failures can be expected anytime, it is very useful and highly recommended to have a failover mechanism in case a shard/node somehow goes offline or disappears for whatever reason. To this end, Elasticsearch allows you to make one or more copies of your index's shards into what are called replica shards, or replicas for short.
Replication is important for two primary reasons:
* It provides high availability in case a shard/node fails. For this reason, it is important to note that a replica shard is never allocated on the same node as the original/primary shard that it was copied from.
* It allows you to scale out your search volume/throughput since searches can be executed on all replicas in parallel.
To summarize, each index can be split into multiple shards. An index can also be replicated zero (meaning no replicas) or more times. Once replicated, each index will have primary shards (the original shards that were replicated from) and replica shards (the copies of the primary shards).
The number of shards and replicas can be defined per index at the time the index is created. After the index is created, you may also change the number of replicas dynamically anytime. You can change the number of shards for an existing index using the {ref}/indices-shrink-index.html[`_shrink`] and {ref}/indices-split-index.html[`_split`] APIs, however this is not a trivial task and pre-planning for the correct number of shards is the optimal approach.
By default, each index in Elasticsearch is allocated one primary shard and one replica which means that if you have at least two nodes in your cluster, your index will have one primary shard and another replica shard (one complete replica) for a total of two shards per index.
NOTE: Each Elasticsearch shard is a Lucene index. There is a maximum number of documents you can have in a single Lucene index. As of https://issues.apache.org/jira/browse/LUCENE-5843[`LUCENE-5843`], the limit is `2,147,483,519` (= Integer.MAX_VALUE - 128) documents.
You can monitor shard sizes using the {ref}/cat-shards.html[`_cat/shards`] API.
With that out of the way, let's get started with the fun part...
[[getting-started-install]]
== Installation

View File

@ -120,9 +120,9 @@ configuring allocation rules is optional. When configuring allocation rules,
setting number of replicas is optional. Although this action can be treated as
two separate index settings updates, both can be configured at once.
Read more about index replicas <<getting-started-shards-and-replicas,here>>.
Read more about shard allocation filtering in
the <<shard-allocation-filtering,Shard allocation filtering documentation>>.
For more information about how {es} uses replicas for scaling, see
<<scalability>>. See <<shard-allocation-filtering>> for more information about
controlling where Elasticsearch allocates shards of a particular index.
[[ilm-allocate-options]]
.Allocate Options

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 20 KiB

View File

@ -69,4 +69,37 @@ reload to ensure the new state of the file is reflected everywhere in the cluste
POST /my_index/_reload_search_analyzers
--------------------------------------------------
// CONSOLE
// TEST[s/^/PUT my_index\n/]
// TEST[continued]
The reload request returns information about the nodes it was executed on and the
analyzers that were reloaded:
[source,js]
--------------------------------------------------
{
"_shards" : {
"total" : 2,
"successful" : 2,
"failed" : 0
},
"reload_details" : [
{
"index" : "my_index",
"reloaded_analyzers" : [
"my_synonyms"
],
"reloaded_node_ids" : [
"mfdqTXn_T7SGr2Ho2KT8uw"
]
}
]
}
--------------------------------------------------
// TEST[continued]
// TESTRESPONSE[s/"total" : 2/"total" : $body._shards.total/]
// TESTRESPONSE[s/"successful" : 2/"successful" : $body._shards.successful/]
// TESTRESPONSE[s/mfdqTXn_T7SGr2Ho2KT8uw/$body.reload_details.0.reloaded_node_ids.0/]
NOTE: Reloading does not happen on each shard of an index, but once on each node
the index has shards on. The total shard count can therefore differ from the number
of index shards.

View File

@ -2,23 +2,69 @@
== Open / Close Index API
The open and close index APIs allow to close an index, and later on
opening it. A closed index has almost no overhead on the cluster (except
for maintaining its metadata), and is blocked for read/write operations.
A closed index can be opened which will then go through the normal
recovery process.
opening it.
The REST endpoint is `/{index}/_close` and `/{index}/_open`. For
example:
A closed index is blocked for read/write operations and does not allow
all operations that opened indices allow. It is not possible to index
documents or to search for documents in a closed index. This allows
closed indices to not have to maintain internal data structures for
indexing or searching documents, resulting in a smaller overhead on
the cluster.
When opening or closing an index, the master is responsible for
restarting the index shards to reflect the new state of the index.
The shards will then go through the normal recovery process. The
data of opened/closed indices is automatically replicated by the
cluster to ensure that enough shard copies are safely kept around
at all times.
The REST endpoint is `/{index}/_close` and `/{index}/_open`.
The following example shows how to close an index:
[source,js]
--------------------------------------------------
POST /my_index/_close
POST /my_index/_open
--------------------------------------------------
// CONSOLE
// TEST[s/^/PUT my_index\n/]
This will return the following response:
[source,js]
--------------------------------------------------
{
"acknowledged" : true,
"shards_acknowledged" : true,
"indices" : {
"my_index" : {
"closed" : true
}
}
}
--------------------------------------------------
// TESTRESPONSE
A closed index can be reopened like this:
[source,js]
--------------------------------------------------
POST /my_index/_open
--------------------------------------------------
// CONSOLE
// TEST[s/^/PUT my_index\nPOST my_index\/_close\n/]
which will yield the following response:
[source,js]
--------------------------------------------------
{
"acknowledged" : true,
"shards_acknowledged" : true
}
--------------------------------------------------
// TESTRESPONSE
It is possible to open and close multiple indices. An error will be thrown
if the request explicitly refers to a missing index. This behaviour can be
disabled using the `ignore_unavailable=true` parameter.
@ -36,6 +82,6 @@ API by setting `cluster.indices.close.enable` to `false`. The default is `true`.
[float]
=== Wait For Active Shards
Because opening an index allocates its shards, the
Because opening or closing an index allocates its shards, the
<<create-index-wait-for-active-shards,`wait_for_active_shards`>> setting on
index creation applies to the index opening action as well.
index creation applies to the `_open` and `_close` index actions as well.

View File

@ -23,7 +23,30 @@ another processor that renames a field. The <<cluster-state,cluster state>> then
the configured pipelines.
To use a pipeline, simply specify the `pipeline` parameter on an index or bulk request. This
way, the ingest node knows which pipeline to use. For example:
way, the ingest node knows which pipeline to use.
For example:
Create a pipeline
[source,js]
--------------------------------------------------
PUT _ingest/pipeline/my_pipeline_id
{
"description" : "describe pipeline",
"processors" : [
{
"set" : {
"field": "foo",
"value": "new"
}
}
]
}
--------------------------------------------------
// CONSOLE
// TEST
Index with defined pipeline
[source,js]
--------------------------------------------------
@ -33,7 +56,27 @@ PUT my-index/_doc/my-id?pipeline=my_pipeline_id
}
--------------------------------------------------
// CONSOLE
// TEST[catch:bad_request]
// TEST[continued]
Response
[source,js]
--------------------------------------------------
{
"_index" : "my-index",
"_type" : "_doc",
"_id" : "my-id",
"_version" : 1,
"result" : "created",
"_shards" : {
"total" : 2,
"successful" : 2,
"failed" : 0
},
"_seq_no" : 0,
"_primary_term" : 1
}
--------------------------------------------------
// TESTRESPONSE[s/"successful" : 2/"successful" : 1/]
An index may also declare a <<dynamic-index-settings,default pipeline>> that will be used in the
absence of the `pipeline` parameter.

View File

@ -258,7 +258,7 @@ Elasticsearch 6.x::
* The `_default_` mapping type is deprecated.
* In 6.7, the index creation, index template, and mapping APIs support a query
* In 6.8, the index creation, index template, and mapping APIs support a query
string parameter (`include_type_name`) which indicates whether requests and
responses should include a type name. It defaults to `true`, and should be set
to an explicit value to prepare to upgrade to 7.0. Not setting `include_type_name`
@ -442,12 +442,12 @@ documents to it using typeless `index` calls, and load documents with typeless
Index creation, index template, and mapping APIs support a new `include_type_name`
URL parameter that specifies whether mapping definitions in requests and responses
should contain the type name. The parameter defaults to `true` in version 6.7 to
should contain the type name. The parameter defaults to `true` in version 6.8 to
match the pre-7.0 behavior of using type names in mappings. It defaults to `false`
in version 7.0 and will be removed in version 8.0.
It should be set explicitly in 6.7 to prepare to upgrade to 7.0. To avoid deprecation
warnings in 6.7, the parameter can be set to either `true` or `false`. In 7.0, setting
It should be set explicitly in 6.8 to prepare to upgrade to 7.0. To avoid deprecation
warnings in 6.8, the parameter can be set to either `true` or `false`. In 7.0, setting
`include_type_name` at all will result in a deprecation warning.
See some examples of interactions with Elasticsearch with this option set to `false`:
@ -717,12 +717,12 @@ indices.
[float]
==== Mixed-version clusters
In a cluster composed of both 6.7 and 7.0 nodes, the parameter
In a cluster composed of both 6.8 and 7.0 nodes, the parameter
`include_type_name` should be specified in indices APIs like index
creation. This is because the parameter has a different default between
6.7 and 7.0, so the same mapping definition will not be valid for both
6.8 and 7.0, so the same mapping definition will not be valid for both
node versions.
Typeless document APIs such as `bulk` and `update` are only available as of
7.0, and will not work with 6.7 nodes. This also holds true for the typeless
7.0, and will not work with 6.8 nodes. This also holds true for the typeless
versions of queries that perform document lookups, such as `terms`.

View File

@ -42,8 +42,6 @@ string:: <<text,`text`>> and <<keyword,`keyword`>>
<<parent-join>>:: Defines parent/child relation for documents within the same index
<<alias>>:: Defines an alias to an existing field.
<<rank-feature>>:: Record numeric feature to boost hits at query time.
<<rank-features>>:: Record numeric features to boost hits at query time.
@ -54,6 +52,11 @@ string:: <<text,`text`>> and <<keyword,`keyword`>>
<<search-as-you-type>>:: A text-like field optimized for queries to implement as-you-type completion
<<alias>>:: Defines an alias to an existing field.
<<flattened>>:: Allows an entire JSON object to be indexed as a single field.
[float]
=== Multi-fields
@ -82,6 +85,8 @@ include::types/date.asciidoc[]
include::types/date_nanos.asciidoc[]
include::types/flattened.asciidoc[]
include::types/geo-point.asciidoc[]
include::types/geo-shape.asciidoc[]

View File

@ -7,9 +7,7 @@ experimental[]
A `dense_vector` field stores dense vectors of float values.
The maximum number of dimensions that can be in a vector should
not exceed 1024. The number of dimensions can be
different across documents. A `dense_vector` field is
a single-valued field.
not exceed 1024. A `dense_vector` field is a single-valued field.
These vectors can be used for <<vector-functions,document scoring>>.
For example, a document score can represent a distance between
@ -24,7 +22,8 @@ PUT my_index
"mappings": {
"properties": {
"my_vector": {
"type": "dense_vector"
"type": "dense_vector",
"dims": 3 <1>
},
"my_text" : {
"type" : "keyword"
@ -42,13 +41,14 @@ PUT my_index/_doc/1
PUT my_index/_doc/2
{
"my_text" : "text2",
"my_vector" : [-0.5, 10, 10, 4]
"my_vector" : [-0.5, 10, 10]
}
--------------------------------------------------
// CONSOLE
<1> dims—the number of dimensions in the vector, required parameter.
Internally, each document's dense vector is encoded as a binary
doc value. Its size in bytes is equal to
`4 * NUMBER_OF_DIMENSIONS`, where `NUMBER_OF_DIMENSIONS` -
number of the vector's dimensions.
`4 * dims`, where `dims`—the number of the vector's dimensions.

View File

@ -0,0 +1,188 @@
[role="xpack"]
[testenv="basic"]
[[flattened]]
=== Flattened datatype
By default, each subfield in an object is mapped and indexed separately. If
the names or types of the subfields are not known in advance, then they are
<<dynamic-mapping, mapped dynamically>>.
The `flattened` type provides an alternative approach, where the entire
object is mapped as a single field. Given an object, the `flattened`
mapping will parse out its leaf values and index them into one field as
keywords. The object's contents can then be searched through simple queries
and aggregations.
This data type can be useful for indexing objects with a large or unknown
number of unique keys. Only one field mapping is created for the whole JSON
object, which can help prevent a <<mapping-limit-settings, mappings explosion>>
from having too many distinct field mappings.
On the other hand, flattened object fields present a trade-off in terms of
search functionality. Only basic queries are allowed, with no support for
numeric range queries or highlighting. Further information on the limitations
can be found in the <<supported-operations, Supported operations>> section.
NOTE: The `flattened` mapping type should **not** be used for indexing all
document content, as it treats all values as keywords and does not provide full
search functionality. The default approach, where each subfield has its own
entry in the mappings, works well in the majority of cases.
An flattened object field can be created as follows:
[source,js]
--------------------------------
PUT bug_reports
{
"mappings": {
"properties": {
"title": {
"type": "text"
},
"labels": {
"type": "flattened"
}
}
}
}
POST bug_reports/_doc/1
{
"title": "Results are not sorted correctly.",
"labels": {
"priority": "urgent",
"release": ["v1.2.5", "v1.3.0"],
"timestamp": {
"created": 1541458026,
"closed": 1541457010
}
}
}
--------------------------------
// CONSOLE
// TESTSETUP
During indexing, tokens are created for each leaf value in the JSON object. The
values are indexed as string keywords, without analysis or special handling for
numbers or dates.
Querying the top-level `flattened` field searches all leaf values in the
object:
[source,js]
--------------------------------
POST bug_reports/_search
{
"query": {
"term": {"labels": "urgent"}
}
}
--------------------------------
// CONSOLE
To query on a specific key in the flattened object, object dot notation is used:
[source,js]
--------------------------------
POST bug_reports/_search
{
"query": {
"term": {"labels.release": "v1.3.0"}
}
}
--------------------------------
// CONSOLE
[[supported-operations]]
==== Supported operations
Because of the similarities in the way values are indexed, `flattened`
fields share much of the same mapping and search functionality as
<<keyword, `keyword`>> fields.
Currently, flattened object fields can be used with the following query types:
- `term`, `terms`, and `terms_set`
- `prefix`
- `range`
- `match` and `multi_match`
- `query_string` and `simple_query_string`
- `exists`
When querying, it is not possible to refer to field keys using wildcards, as in
`{ "term": {"labels.time*": 1541457010}}`. Note that all queries, including
`range`, treat the values as string keywords. Highlighting is not supported on
`flattened` fields.
It is possible to sort on an flattened object field, as well as perform simple
keyword-style aggregations such as `terms`. As with queries, there is no
special support for numerics -- all values in the JSON object are treated as
keywords. When sorting, this implies that values are compared
lexicographically.
Flattened object fields currently cannot be stored. It is not possible to
specify the <<mapping-store, `store`>> parameter in the mapping.
[[flattened-params]]
==== Parameters for flattened object fields
The following mapping parameters are accepted:
[horizontal]
<<mapping-boost,`boost`>>::
Mapping field-level query time boosting. Accepts a floating point number,
defaults to `1.0`.
`depth_limit`::
The maximum allowed depth of the flattened object field, in terms of nested
inner objects. If a flattened object field exceeds this limit, then an
error will be thrown. Defaults to `20`.
<<doc-values,`doc_values`>>::
Should the field be stored on disk in a column-stride fashion, so that it
can later be used for sorting, aggregations, or scripting? Accepts `true`
(default) or `false`.
<<eager-global-ordinals,`eager_global_ordinals`>>::
Should global ordinals be loaded eagerly on refresh? Accepts `true` or
`false` (default). Enabling this is a good idea on fields that are
frequently used for terms aggregations.
<<ignore-above,`ignore_above`>>::
Leaf values longer than this limit will not be indexed. By default, there
is no limit and all values will be indexed. Note that this limit applies
to the leaf values within the flattened object field, and not the length of
the entire field.
<<mapping-index,`index`>>::
Determines if the field should be searchable. Accepts `true` (default) or
`false`.
<<index-options,`index_options`>>::
What information should be stored in the index for scoring purposes.
Defaults to `docs` but can also be set to `freqs` to take term frequency
into account when computing scores.
<<null-value,`null_value`>>::
A string value which is substituted for any explicit `null` values within
the flattened object field. Defaults to `null`, which means null sields are
treated as if it were missing.
<<similarity,`similarity`>>::
Which scoring algorithm or _similarity_ should be used. Defaults
to `BM25`.
`split_queries_on_whitespace`::
Whether <<full-text-queries,full text queries>> should split the input on
whitespace when building a query for this field. Accepts `true` or `false`
(default).

View File

@ -20,6 +20,8 @@ coming[7.3.0]
[[breaking_73_mapping_changes]]
=== Mapping changes
`dense_vector` field now requires `dims` parameter, specifying the number of
dimensions for document and query vectors for this field.
[float]
==== Defining multi-fields within multi-fields

View File

@ -0,0 +1,30 @@
[[breaking-changes-7.4]]
== Breaking changes in 7.4
++++
<titleabbrev>7.4</titleabbrev>
++++
This section discusses the changes that you need to be aware of when migrating
your application to Elasticsearch 7.4.
See also <<release-highlights>> and <<es-release-notes>>.
coming[7.4.0]
//NOTE: The notable-breaking-changes tagged regions are re-used in the
//Installation and Upgrade Guide
//tag::notable-breaking-changes[]
// end::notable-breaking-changes[]
[[breaking_74_plugin_changes]]
=== Plugins changes
[float]
==== TokenizerFactory changes
TokenizerFactory now has a `name()` method that must be implemented. Most
plugin-provided TokenizerFactory implementations will extend `AbstractTokenizerFactory`,
which now takes a `name` parameter in its constructor.

View File

@ -109,18 +109,20 @@ To create a dedicated master-eligible node in the {default-dist}, set:
[source,yaml]
-------------------
node.master: true <1>
node.data: false <2>
node.ingest: false <3>
node.ml: false <4>
xpack.ml.enabled: true <5>
cluster.remote.connect: false <6>
node.voting_only: false <2>
node.data: false <3>
node.ingest: false <4>
node.ml: false <5>
xpack.ml.enabled: true <6>
cluster.remote.connect: false <7>
-------------------
<1> The `node.master` role is enabled by default.
<2> Disable the `node.data` role (enabled by default).
<3> Disable the `node.ingest` role (enabled by default).
<4> Disable the `node.ml` role (enabled by default).
<5> The `xpack.ml.enabled` setting is enabled by default.
<6> Disable {ccs} (enabled by default).
<2> The `node.voting_only` role is disabled by default.
<3> Disable the `node.data` role (enabled by default).
<4> Disable the `node.ingest` role (enabled by default).
<5> Disable the `node.ml` role (enabled by default).
<6> The `xpack.ml.enabled` setting is enabled by default.
<7> Disable {ccs} (enabled by default).
To create a dedicated master-eligible node in the {oss-dist}, set:
@ -177,6 +179,30 @@ reasonably fast persistent storage and a reliable and low-latency network
connection to the rest of the cluster, since they are on the critical path for
<<cluster-state-publishing,publishing cluster state updates>>.
Voting-only master-eligible nodes may also fill other roles in your cluster.
For instance, a node may be both a data node and a voting-only master-eligible
node. A _dedicated_ voting-only master-eligible nodes is a voting-only
master-eligible node that fills no other roles in the cluster. To create a
dedicated voting-only master-eligible node in the {default-dist}, set:
[source,yaml]
-------------------
node.master: true <1>
node.voting_only: true <2>
node.data: false <3>
node.ingest: false <4>
node.ml: false <5>
xpack.ml.enabled: true <6>
cluster.remote.connect: false <7>
-------------------
<1> The `node.master` role is enabled by default.
<2> Enable the `node.voting_only` role (disabled by default).
<3> Disable the `node.data` role (enabled by default).
<4> Disable the `node.ingest` role (enabled by default).
<5> Disable the `node.ml` role (enabled by default).
<6> The `xpack.ml.enabled` setting is enabled by default.
<7> Disable {ccs} (enabled by default).
[float]
[[data-node]]
=== Data Node
@ -193,16 +219,18 @@ To create a dedicated data node in the {default-dist}, set:
[source,yaml]
-------------------
node.master: false <1>
node.data: true <2>
node.ingest: false <3>
node.ml: false <4>
cluster.remote.connect: false <5>
node.voting_only: false <2>
node.data: true <3>
node.ingest: false <4>
node.ml: false <5>
cluster.remote.connect: false <6>
-------------------
<1> Disable the `node.master` role (enabled by default).
<2> The `node.data` role is enabled by default.
<3> Disable the `node.ingest` role (enabled by default).
<4> Disable the `node.ml` role (enabled by default).
<5> Disable {ccs} (enabled by default).
<2> The `node.voting_only` role is disabled by default.
<3> The `node.data` role is enabled by default.
<4> Disable the `node.ingest` role (enabled by default).
<5> Disable the `node.ml` role (enabled by default).
<6> Disable {ccs} (enabled by default).
To create a dedicated data node in the {oss-dist}, set:
[source,yaml]
@ -231,16 +259,18 @@ To create a dedicated ingest node in the {default-dist}, set:
[source,yaml]
-------------------
node.master: false <1>
node.data: false <2>
node.ingest: true <3>
node.ml: false <4>
cluster.remote.connect: false <5>
node.voting_only: false <2>
node.data: false <3>
node.ingest: true <4>
node.ml: false <5>
cluster.remote.connect: false <6>
-------------------
<1> Disable the `node.master` role (enabled by default).
<2> Disable the `node.data` role (enabled by default).
<3> The `node.ingest` role is enabled by default.
<4> Disable the `node.ml` role (enabled by default).
<5> Disable {ccs} (enabled by default).
<2> The `node.voting_only` role is disabled by default.
<3> Disable the `node.data` role (enabled by default).
<4> The `node.ingest` role is enabled by default.
<5> Disable the `node.ml` role (enabled by default).
<6> Disable {ccs} (enabled by default).
To create a dedicated ingest node in the {oss-dist}, set:
@ -282,16 +312,18 @@ To create a dedicated coordinating node in the {default-dist}, set:
[source,yaml]
-------------------
node.master: false <1>
node.data: false <2>
node.ingest: false <3>
node.ml: false <4>
cluster.remote.connect: false <5>
node.voting_only: false <2>
node.data: false <3>
node.ingest: false <4>
node.ml: false <5>
cluster.remote.connect: false <6>
-------------------
<1> Disable the `node.master` role (enabled by default).
<2> Disable the `node.data` role (enabled by default).
<3> Disable the `node.ingest` role (enabled by default).
<4> Disable the `node.ml` role (enabled by default).
<5> Disable {ccs} (enabled by default).
<2> The `node.voting_only` role is disabled by default.
<3> Disable the `node.data` role (enabled by default).
<4> Disable the `node.ingest` role (enabled by default).
<5> Disable the `node.ml` role (enabled by default).
<6> Disable {ccs} (enabled by default).
To create a dedicated coordinating node in the {oss-dist}, set:
@ -326,18 +358,20 @@ To create a dedicated {ml} node in the {default-dist}, set:
[source,yaml]
-------------------
node.master: false <1>
node.data: false <2>
node.ingest: false <3>
node.ml: true <4>
xpack.ml.enabled: true <5>
cluster.remote.connect: false <6>
node.voting_only: false <2>
node.data: false <3>
node.ingest: false <4>
node.ml: true <5>
xpack.ml.enabled: true <6>
cluster.remote.connect: false <7>
-------------------
<1> Disable the `node.master` role (enabled by default).
<2> Disable the `node.data` role (enabled by default).
<3> Disable the `node.ingest` role (enabled by default).
<4> The `node.ml` role is enabled by default.
<5> The `xpack.ml.enabled` setting is enabled by default.
<6> Disable {ccs} (enabled by default).
<2> The `node.voting_only` role is disabled by default.
<3> Disable the `node.data` role (enabled by default).
<4> Disable the `node.ingest` role (enabled by default).
<5> The `node.ml` role is enabled by default.
<6> The `xpack.ml.enabled` setting is enabled by default.
<7> Disable {ccs} (enabled by default).
[float]
[[change-node-role]]

View File

@ -1,48 +1,63 @@
[[query-dsl-dis-max-query]]
=== Dis Max Query
=== Disjunction Max Query
A query that generates the union of documents produced by its
subqueries, and that scores each document with the maximum score for
that document as produced by any subquery, plus a tie breaking increment
for any additional matching subqueries.
Returns documents matching one or more wrapped queries, called query clauses or
clauses.
This is useful when searching for a word in multiple fields with
different boost factors (so that the fields cannot be combined
equivalently into a single search field). We want the primary score to
be the one associated with the highest boost, not the sum of the field
scores (as Boolean Query would give). If the query is "albino elephant"
this ensures that "albino" matching one field and "elephant" matching
another gets a higher score than "albino" matching both fields. To get
this result, use both Boolean Query and DisjunctionMax Query: for each
term a DisjunctionMaxQuery searches for it in each field, while the set
of these DisjunctionMaxQuery's is combined into a BooleanQuery.
If a returned document matches multiple query clauses, the `dis_max` query
assigns the document the highest relevance score from any matching clause, plus
a tie breaking increment for any additional matching subqueries.
The tie breaker capability allows results that include the same term in
multiple fields to be judged better than results that include this term
in only the best of those multiple fields, without confusing this with
the better case of two different terms in the multiple fields. The
default `tie_breaker` is `0.0`.
You can use the `dis_max` to search for a term in fields mapped with different
<<mapping-boost,boost>> factors.
This query maps to Lucene `DisjunctionMaxQuery`.
[[query-dsl-dis-max-query-ex-request]]
==== Example request
[source,js]
--------------------------------------------------
----
GET /_search
{
"query": {
"dis_max" : {
"tie_breaker" : 0.7,
"boost" : 1.2,
"queries" : [
{
"term" : { "age" : 34 }
},
{
"term" : { "age" : 35 }
}
]
{ "term" : { "title" : "Quick pets" }},
{ "term" : { "body" : "Quick pets" }}
],
"tie_breaker" : 0.7
}
}
}
--------------------------------------------------
----
// CONSOLE
[[query-dsl-dis-max-query-top-level-params]]
==== Top-level parameters for `dis_max`
`queries` (Required)::
(array of query objects) Contains one or more query clauses. Returned documents
**must match one or more** of these queries. If a document matches multiple
queries, {es} uses the highest <<query-filter-context, relevance score>>.
`tie_breaker` (Optional)::
+
--
(float) Floating point number between `0` and `1.0` used to increase the
<<query-filter-context, relevance scores>> of documents matching multiple query
clauses. Defaults to `0.0`.
You can use the `tie_breaker` value to assign higher relevance scores to
documents that contain the same term in multiple fields than documents that
contain this term in only the best of those multiple fields, without confusing
this with the better case of two different terms in the multiple fields.
If a document matches multiple clauses, the `dis_max` query calculates the
relevance score for the document as follows:
. Take the relevance score from a matching clause with the highest score.
. Multiply the score from any other matching clauses by the `tie_breaker` value.
. Add the highest score to the multiplied scores.
If the `tie_breaker` value is greater than `0.0`, all matching clauses count,
but the clause with the highest score counts most.
--

View File

@ -186,8 +186,7 @@ a vector function is executed, 0 is returned as a result
for this document.
NOTE: If a document's dense vector field has a number of dimensions
different from the query's vector, 0 is used for missing dimensions
in the calculations of vector functions.
different from the query's vector, an error will be thrown.
[[random-score-function]]

View File

@ -71,6 +71,10 @@ Example response:
"available" : true,
"enabled" : true
},
"flattened" : {
"available" : true,
"enabled" : true
},
"graph" : {
"available" : true,
"enabled" : true

View File

@ -43,11 +43,13 @@ instances:
`.env`:
[source,yaml]
----
CERTS_DIR=/usr/share/elasticsearch/config/certificates <1>
ELASTIC_PASSWORD=PleaseChangeMe <2>
COMPOSE_PROJECT_NAME=es <1>
CERTS_DIR=/usr/share/elasticsearch/config/certificates <2>
ELASTIC_PASSWORD=PleaseChangeMe <3>
----
<1> The path, inside the Docker image, where certificates are expected to be found.
<2> Initial password for the `elastic` user.
<1> Use an `es_` prefix for all volumes and networks created by docker-compose.
<2> The path, inside the Docker image, where certificates are expected to be found.
<3> Initial password for the `elastic` user.
[[getting-starter-tls-create-certs-composefile]]
`create-certs.yml`:
@ -69,21 +71,21 @@ services:
image: {docker-image}
command: >
bash -c '
if [[ ! -d config/certificates/certs ]]; then
mkdir config/certificates/certs;
yum install -y -q -e 0 unzip;
if [[ ! -f /certs/bundle.zip ]]; then
bin/elasticsearch-certutil cert --silent --pem --in config/certificates/instances.yml -out /certs/bundle.zip;
unzip /certs/bundle.zip -d /certs; <1>
fi;
if [[ ! -f /local/certs/bundle.zip ]]; then
bin/elasticsearch-certgen --silent --in config/certificates/instances.yml --out config/certificates/certs/bundle.zip;
unzip config/certificates/certs/bundle.zip -d config/certificates/certs; <1>
fi;
chgrp -R 0 config/certificates/certs
chown -R 1000:0 /certs
'
user: $\{UID:-1000\}
user: "0"
working_dir: /usr/share/elasticsearch
volumes: ['.:/usr/share/elasticsearch/config/certificates']
volumes: ['certs:/certs', '.:/usr/share/elasticsearch/config/certificates']
volumes: {"certs"}
----
<1> The new node certificates and CA certificate+key are placed under the local directory `certs`.
<1> The new node certificates and CA certificate+key are placed in a docker volume `es_certs`.
endif::[]
[[getting-starter-tls-create-docker-compose]]
@ -106,7 +108,7 @@ services:
image: {docker-image}
environment:
- node.name=es01
- discovery.seed_hosts=es02
- discovery.seed_hosts=es01,es02
- cluster.initial_master_nodes=es01,es02
- ELASTIC_PASSWORD=$ELASTIC_PASSWORD <1>
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
@ -121,7 +123,7 @@ services:
- xpack.security.transport.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt
- xpack.security.transport.ssl.certificate=$CERTS_DIR/es01/es01.crt
- xpack.security.transport.ssl.key=$CERTS_DIR/es01/es01.key
volumes: ['esdata_01:/usr/share/elasticsearch/data', './certs:$CERTS_DIR']
volumes: ['data01:/usr/share/elasticsearch/data', 'certs:$CERTS_DIR']
ports:
- 9200:9200
healthcheck:
@ -135,7 +137,7 @@ services:
image: {docker-image}
environment:
- node.name=es02
- discovery.seed_hosts=es01
- discovery.seed_hosts=es01,es02
- cluster.initial_master_nodes=es01,es02
- ELASTIC_PASSWORD=$ELASTIC_PASSWORD
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
@ -150,14 +152,14 @@ services:
- xpack.security.transport.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt
- xpack.security.transport.ssl.certificate=$CERTS_DIR/es02/es02.crt
- xpack.security.transport.ssl.key=$CERTS_DIR/es02/es02.key
volumes: ['esdata_02:/usr/share/elasticsearch/data', './certs:$CERTS_DIR']
volumes: ['data02:/usr/share/elasticsearch/data', 'certs:$CERTS_DIR']
wait_until_ready:
image: {docker-image}
command: /usr/bin/true
depends_on: {"es01": {"condition": "service_healthy"}}
volumes: {"esdata_01": {"driver": "local"}, "esdata_02": {"driver": "local"}}
volumes: {"data01", "data02", "certs"}
----
<1> Bootstrap `elastic` with the password defined in `.env`. See
@ -175,7 +177,7 @@ endif::[]
--
["source","sh"]
----
docker-compose -f create-certs.yml up
docker-compose -f create-certs.yml run --rm create_certs
----
--
. Start two {es} nodes configured for SSL/TLS:
@ -189,9 +191,9 @@ docker-compose up -d
. Access the {es} API over SSL/TLS using the bootstrapped password:
+
--
["source","sh"]
["source","sh",subs="attributes"]
----
curl --cacert certs/ca/ca.crt -u elastic:PleaseChangeMe https://localhost:9200
docker run --rm -v es_certs:/certs --network=es_default {docker-image} curl --cacert /certs/ca/ca.crt -u elastic:PleaseChangeMe https://es01:9200
----
// NOTCONSOLE
--
@ -210,3 +212,13 @@ auto --batch \
--url https://localhost:9200"
----
--
[float]
==== Tear everything down
To remove all the Docker resources created by the example, issue:
--
["source","sh"]
----
docker-compose down -v
----
--

View File

@ -2,6 +2,8 @@
[[separating-node-client-traffic]]
=== Separating node-to-node and client traffic
deprecated[7.3.0, Transport Client is deprecated and will be removed]
Elasticsearch has the feature of so called
{ref}/modules-transport.html[TCP transport profiles]
that allows it to bind to several ports and addresses. The {es}

View File

@ -39,9 +39,9 @@ first election. In <<dev-vs-prod-mode,development mode>>, with no discovery
settings configured, this step is automatically performed by the nodes
themselves. As this auto-bootstrapping is <<modules-discovery-quorums,inherently
unsafe>>, when you start a brand new cluster in <<dev-vs-prod-mode,production
mode>>, you must explicitly list the names or IP addresses of the
master-eligible nodes whose votes should be counted in the very first election.
This list is set using the `cluster.initial_master_nodes` setting.
mode>>, you must explicitly list the master-eligible nodes whose votes should be
counted in the very first election. This list is set using the
`cluster.initial_master_nodes` setting.
[source,yaml]
--------------------------------------------------

View File

@ -147,7 +147,7 @@ ST_Y(
.Description:
Returns the the latitude of the first point in the geometry.
Returns the latitude of the first point in the geometry.
["source","sql",subs="attributes,macros"]
--------------------------------------------------
@ -206,4 +206,4 @@ Returns the distance between geometries in meters. Both geometries have to be po
["source","sql",subs="attributes,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/docs/geo.csv-spec[distance]
--------------------------------------------------
--------------------------------------------------

View File

@ -5,7 +5,7 @@ To upgrade directly to {es} {version} from versions 6.0-6.7, you must shut down
all nodes in the cluster, upgrade each node to {version}, and restart the cluster.
NOTE: If you are running a version prior to 6.0,
https://www.elastic.co/guide/en/elastic-stack/6.8/upgrading-elastic-stack.html[upgrade to 6.8]
{stack-ref-68}/upgrading-elastic-stack.html[upgrade to 6.8]
and reindex your old indices or bring up a new {version} cluster and
<<reindex-upgrade-remote, reindex from remote>>.

View File

@ -36,7 +36,7 @@ been deleted.
[[reindex-upgrade-inplace]]
=== Reindex in place
You can use the Upgrade Assistant in {kib} 6.7 to automatically reindex 5.x
You can use the Upgrade Assistant in {kib} 6.8 to automatically reindex 5.x
indices you need to carry forward to {version}.
To manually reindex your old indices in place:
@ -103,7 +103,7 @@ endif::include-xpack[]
You can use <<reindex-from-remote,reindex from remote>> to migrate indices from
your old cluster to a new {version} cluster. This enables you move to {version}
from a pre-6.7 cluster without interrupting service.
from a pre-6.8 cluster without interrupting service.
[WARNING]
=============================================
@ -196,4 +196,4 @@ monitor progress of the reindex job with the <<tasks,task API>>:
`30s` and `1`).
.. Once reindexing is complete and the status of the new index is `green`,
you can delete the old index.
you can delete the old index.

View File

@ -10,7 +10,7 @@ running the older version.
Rolling upgrades are supported:
* Between minor versions
* https://www.elastic.co/guide/en/elastic-stack/6.8/upgrading-elastic-stack.html[From 5.6 to 6.8]
* {stack-ref-68}/upgrading-elastic-stack.html[From 5.6 to 6.8]
* From 6.8 to {version}
Upgrading directly to {version} from 6.7 or earlier requires a

View File

@ -1,3 +1,3 @@
org.gradle.daemon=true
org.gradle.jvmargs=-Xmx2g -XX:+HeapDumpOnOutOfMemoryError -Xss2m
org.gradle.jvmargs=-Xmx3g -XX:+HeapDumpOnOutOfMemoryError -Xss2m
options.forkOptions.memoryMaximumSize=2g

View File

@ -1,6 +1,6 @@
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
distributionUrl=https\://services.gradle.org/distributions/gradle-5.4.1-all.zip
distributionUrl=https\://services.gradle.org/distributions/gradle-5.5-all.zip
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists
distributionSha256Sum=14cd15fc8cc8705bd69dcfa3c8fefb27eb7027f5de4b47a8b279218f76895a91
distributionSha256Sum=302b7df46730ce75c582542c056c9bf5cac2b94fbf2cc656d0e37e41e8a5d371

2
gradlew vendored
View File

@ -7,7 +7,7 @@
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,

2
gradlew.bat vendored
View File

@ -5,7 +5,7 @@
@rem you may not use this file except in compliance with the License.
@rem You may obtain a copy of the License at
@rem
@rem http://www.apache.org/licenses/LICENSE-2.0
@rem https://www.apache.org/licenses/LICENSE-2.0
@rem
@rem Unless required by applicable law or agreed to in writing, software
@rem distributed under the License is distributed on an "AS IS" BASIS,

View File

@ -39,7 +39,7 @@ public class CharGroupTokenizerFactory extends AbstractTokenizerFactory{
private boolean tokenizeOnSymbol = false;
public CharGroupTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, settings);
super(indexSettings, settings, name);
for (final String c : settings.getAsList("tokenize_on_chars")) {
if (c == null || c.length() == 0) {

View File

@ -35,7 +35,7 @@ public class ClassicTokenizerFactory extends AbstractTokenizerFactory {
private final int maxTokenLength;
ClassicTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, settings);
super(indexSettings, settings, name);
maxTokenLength = settings.getAsInt("max_token_length", StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH);
}

View File

@ -36,7 +36,7 @@ public class EdgeNGramTokenizerFactory extends AbstractTokenizerFactory {
private final CharMatcher matcher;
EdgeNGramTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, settings);
super(indexSettings, settings, name);
this.minGram = settings.getAsInt("min_gram", NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE);
this.maxGram = settings.getAsInt("max_gram", NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE);
this.matcher = parseTokenChars(settings.getAsList("token_chars"));

View File

@ -31,7 +31,7 @@ public class KeywordTokenizerFactory extends AbstractTokenizerFactory {
private final int bufferSize;
KeywordTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, settings);
super(indexSettings, settings, name);
bufferSize = settings.getAsInt("buffer_size", 256);
}

View File

@ -29,7 +29,7 @@ import org.elasticsearch.index.analysis.AbstractTokenizerFactory;
public class LetterTokenizerFactory extends AbstractTokenizerFactory {
LetterTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, settings);
super(indexSettings, settings, name);
}
@Override

View File

@ -85,7 +85,7 @@ public class NGramTokenizerFactory extends AbstractTokenizerFactory {
}
NGramTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, settings);
super(indexSettings, settings, name);
int maxAllowedNgramDiff = indexSettings.getMaxNgramDiff();
this.minGram = settings.getAsInt("min_gram", NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE);
this.maxGram = settings.getAsInt("max_gram", NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE);

View File

@ -37,7 +37,7 @@ public class PathHierarchyTokenizerFactory extends AbstractTokenizerFactory {
private final boolean reverse;
PathHierarchyTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, settings);
super(indexSettings, settings, name);
bufferSize = settings.getAsInt("buffer_size", 1024);
String delimiter = settings.get("delimiter");
if (delimiter == null) {

View File

@ -35,7 +35,7 @@ public class PatternTokenizerFactory extends AbstractTokenizerFactory {
private final int group;
PatternTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, settings);
super(indexSettings, settings, name);
String sPattern = settings.get("pattern", "\\W+" /*PatternAnalyzer.NON_WORD_PATTERN*/);
if (sPattern == null) {

View File

@ -31,7 +31,7 @@ public class SimplePatternSplitTokenizerFactory extends AbstractTokenizerFactory
private final String pattern;
public SimplePatternSplitTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, settings);
super(indexSettings, settings, name);
pattern = settings.get("pattern", "");
}

View File

@ -31,7 +31,7 @@ public class SimplePatternTokenizerFactory extends AbstractTokenizerFactory {
private final String pattern;
public SimplePatternTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, settings);
super(indexSettings, settings, name);
pattern = settings.get("pattern", "");
}

View File

@ -116,7 +116,7 @@ public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory {
Analyzer buildSynonymAnalyzer(TokenizerFactory tokenizer, List<CharFilterFactory> charFilters,
List<TokenFilterFactory> tokenFilters, Function<String, TokenFilterFactory> allFilters) {
return new CustomAnalyzer("synonyms", tokenizer, charFilters.toArray(new CharFilterFactory[0]),
return new CustomAnalyzer(tokenizer, charFilters.toArray(new CharFilterFactory[0]),
tokenFilters.stream()
.map(TokenFilterFactory::getSynonymFilter)
.toArray(TokenFilterFactory[]::new));

View File

@ -32,7 +32,7 @@ import org.elasticsearch.index.analysis.AbstractTokenizerFactory;
public class ThaiTokenizerFactory extends AbstractTokenizerFactory {
ThaiTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, settings);
super(indexSettings, settings, name);
}
@Override

View File

@ -32,7 +32,7 @@ public class UAX29URLEmailTokenizerFactory extends AbstractTokenizerFactory {
private final int maxTokenLength;
UAX29URLEmailTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, settings);
super(indexSettings, settings, name);
maxTokenLength = settings.getAsInt("max_token_length", StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH);
}

View File

@ -34,7 +34,7 @@ public class WhitespaceTokenizerFactory extends AbstractTokenizerFactory {
private Integer maxTokenLength;
WhitespaceTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, settings);
super(indexSettings, settings, name);
maxTokenLength = settings.getAsInt(MAX_TOKEN_LENGTH, StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH);
}

Some files were not shown because too many files have changed in this diff Show More