Merge branch 'master' into index-lifecycle
This commit is contained in:
commit
8d9b98569f
|
@ -39,6 +39,7 @@ import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyReposito
|
|||
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
|
||||
import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest;
|
||||
import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequest;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest;
|
||||
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
|
||||
import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest;
|
||||
import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest;
|
||||
|
@ -844,6 +845,18 @@ final class RequestConverters {
|
|||
return request;
|
||||
}
|
||||
|
||||
static Request deleteSnapshot(DeleteSnapshotRequest deleteSnapshotRequest) {
|
||||
String endpoint = new EndpointBuilder().addPathPartAsIs("_snapshot")
|
||||
.addPathPart(deleteSnapshotRequest.repository())
|
||||
.addPathPart(deleteSnapshotRequest.snapshot())
|
||||
.build();
|
||||
Request request = new Request(HttpDelete.METHOD_NAME, endpoint);
|
||||
|
||||
Params parameters = new Params(request);
|
||||
parameters.withMasterTimeout(deleteSnapshotRequest.masterNodeTimeout());
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request putTemplate(PutIndexTemplateRequest putIndexTemplateRequest) throws IOException {
|
||||
String endpoint = new EndpointBuilder().addPathPartAsIs("_template").addPathPart(putIndexTemplateRequest.name()).build();
|
||||
Request request = new Request(HttpPut.METHOD_NAME, endpoint);
|
||||
|
|
|
@ -28,6 +28,8 @@ import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequ
|
|||
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotResponse;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -161,4 +163,34 @@ public final class SnapshotClient {
|
|||
restHighLevelClient.performRequestAsyncAndParseEntity(verifyRepositoryRequest, RequestConverters::verifyRepository, options,
|
||||
VerifyRepositoryResponse::fromXContent, listener, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes a snapshot.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html"> Snapshot and Restore
|
||||
* API on elastic.co</a>
|
||||
*
|
||||
* @param deleteSnapshotRequest the request
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @return the response
|
||||
* @throws IOException in case there is a problem sending the request or parsing back the response
|
||||
*/
|
||||
public DeleteSnapshotResponse delete(DeleteSnapshotRequest deleteSnapshotRequest, RequestOptions options) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(deleteSnapshotRequest, RequestConverters::deleteSnapshot, options,
|
||||
DeleteSnapshotResponse::fromXContent, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously deletes a snapshot.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html"> Snapshot and Restore
|
||||
* API on elastic.co</a>
|
||||
*
|
||||
* @param deleteSnapshotRequest the request
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @param listener the listener to be notified upon request completion
|
||||
*/
|
||||
public void deleteAsync(DeleteSnapshotRequest deleteSnapshotRequest, RequestOptions options,
|
||||
ActionListener<DeleteSnapshotResponse> listener) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(deleteSnapshotRequest, RequestConverters::deleteSnapshot, options,
|
||||
DeleteSnapshotResponse::fromXContent, listener, emptySet());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,6 +37,7 @@ import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRe
|
|||
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest;
|
||||
import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest;
|
||||
import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequest;
|
||||
import org.elasticsearch.action.admin.indices.alias.Alias;
|
||||
|
@ -1857,6 +1858,25 @@ public class RequestConvertersTests extends ESTestCase {
|
|||
assertThat(expectedParams, equalTo(request.getParameters()));
|
||||
}
|
||||
|
||||
public void testDeleteSnapshot() {
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
String repository = randomIndicesNames(1, 1)[0];
|
||||
String snapshot = "snapshot-" + randomAlphaOfLengthBetween(2, 5).toLowerCase(Locale.ROOT);
|
||||
|
||||
String endpoint = String.format(Locale.ROOT, "/_snapshot/%s/%s", repository, snapshot);
|
||||
|
||||
DeleteSnapshotRequest deleteSnapshotRequest = new DeleteSnapshotRequest();
|
||||
deleteSnapshotRequest.repository(repository);
|
||||
deleteSnapshotRequest.snapshot(snapshot);
|
||||
setRandomMasterTimeout(deleteSnapshotRequest, expectedParams);
|
||||
|
||||
Request request = RequestConverters.deleteSnapshot(deleteSnapshotRequest);
|
||||
assertThat(endpoint, equalTo(request.getEndpoint()));
|
||||
assertThat(HttpDelete.METHOD_NAME, equalTo(request.getMethod()));
|
||||
assertThat(expectedParams, equalTo(request.getParameters()));
|
||||
assertNull(request.getEntity());
|
||||
}
|
||||
|
||||
public void testPutTemplateRequest() throws Exception {
|
||||
Map<String, String> names = new HashMap<>();
|
||||
names.put("log", "log");
|
||||
|
|
|
@ -28,11 +28,14 @@ import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequ
|
|||
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotResponse;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.repositories.fs.FsRepository;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Locale;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
|
@ -46,6 +49,13 @@ public class SnapshotIT extends ESRestHighLevelClientTestCase {
|
|||
highLevelClient().snapshot()::createRepositoryAsync);
|
||||
}
|
||||
|
||||
private Response createTestSnapshot(String repository, String snapshot) throws IOException {
|
||||
Request createSnapshot = new Request("put", String.format(Locale.ROOT, "_snapshot/%s/%s", repository, snapshot));
|
||||
createSnapshot.addParameter("wait_for_completion", "true");
|
||||
return highLevelClient().getLowLevelClient().performRequest(createSnapshot);
|
||||
}
|
||||
|
||||
|
||||
public void testCreateRepository() throws IOException {
|
||||
PutRepositoryResponse response = createTestRepository("test", FsRepository.TYPE, "{\"location\": \".\"}");
|
||||
assertTrue(response.isAcknowledged());
|
||||
|
@ -108,4 +118,21 @@ public class SnapshotIT extends ESRestHighLevelClientTestCase {
|
|||
highLevelClient().snapshot()::verifyRepositoryAsync);
|
||||
assertThat(response.getNodes().size(), equalTo(1));
|
||||
}
|
||||
|
||||
public void testDeleteSnapshot() throws IOException {
|
||||
String repository = "test_repository";
|
||||
String snapshot = "test_snapshot";
|
||||
|
||||
PutRepositoryResponse putRepositoryResponse = createTestRepository(repository, FsRepository.TYPE, "{\"location\": \".\"}");
|
||||
assertTrue(putRepositoryResponse.isAcknowledged());
|
||||
|
||||
Response putSnapshotResponse = createTestSnapshot(repository, snapshot);
|
||||
// check that the request went ok without parsing JSON here. When using the high level client, check acknowledgement instead.
|
||||
assertEquals(200, putSnapshotResponse.getStatusLine().getStatusCode());
|
||||
|
||||
DeleteSnapshotRequest request = new DeleteSnapshotRequest(repository, snapshot);
|
||||
DeleteSnapshotResponse response = execute(request, highLevelClient().snapshot()::delete, highLevelClient().snapshot()::deleteAsync);
|
||||
|
||||
assertTrue(response.isAcknowledged());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,8 +29,12 @@ import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequ
|
|||
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotResponse;
|
||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -41,6 +45,7 @@ import org.elasticsearch.repositories.fs.FsRepository;
|
|||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
@ -69,6 +74,8 @@ public class SnapshotClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
|
||||
private static final String repositoryName = "test_repository";
|
||||
|
||||
private static final String snapshotName = "test_snapshot";
|
||||
|
||||
public void testSnapshotCreateRepository() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
|
@ -360,10 +367,76 @@ public class SnapshotClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
}
|
||||
}
|
||||
|
||||
public void testSnapshotDeleteSnapshot() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
createTestRepositories();
|
||||
createTestSnapshots();
|
||||
|
||||
// tag::delete-snapshot-request
|
||||
DeleteSnapshotRequest request = new DeleteSnapshotRequest(repositoryName);
|
||||
request.snapshot(snapshotName);
|
||||
// end::delete-snapshot-request
|
||||
|
||||
// tag::delete-snapshot-request-masterTimeout
|
||||
request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
request.masterNodeTimeout("1m"); // <2>
|
||||
// end::delete-snapshot-request-masterTimeout
|
||||
|
||||
// tag::delete-snapshot-execute
|
||||
DeleteSnapshotResponse response = client.snapshot().delete(request, RequestOptions.DEFAULT);
|
||||
// end::delete-snapshot-execute
|
||||
|
||||
// tag::delete-snapshot-response
|
||||
boolean acknowledged = response.isAcknowledged(); // <1>
|
||||
// end::delete-snapshot-response
|
||||
assertTrue(acknowledged);
|
||||
}
|
||||
|
||||
public void testSnapshotDeleteSnapshotAsync() throws InterruptedException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
DeleteSnapshotRequest request = new DeleteSnapshotRequest();
|
||||
|
||||
// tag::delete-snapshot-execute-listener
|
||||
ActionListener<DeleteSnapshotResponse> listener =
|
||||
new ActionListener<DeleteSnapshotResponse>() {
|
||||
@Override
|
||||
public void onResponse(DeleteSnapshotResponse deleteSnapshotResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::delete-snapshot-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::delete-snapshot-execute-async
|
||||
client.snapshot().deleteAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::delete-snapshot-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
private void createTestRepositories() throws IOException {
|
||||
PutRepositoryRequest request = new PutRepositoryRequest(repositoryName);
|
||||
request.type(FsRepository.TYPE);
|
||||
request.settings("{\"location\": \".\"}", XContentType.JSON);
|
||||
assertTrue(highLevelClient().snapshot().createRepository(request, RequestOptions.DEFAULT).isAcknowledged());
|
||||
}
|
||||
|
||||
private void createTestSnapshots() throws IOException {
|
||||
Request createSnapshot = new Request("put", String.format(Locale.ROOT, "_snapshot/%s/%s", repositoryName, snapshotName));
|
||||
createSnapshot.addParameter("wait_for_completion", "true");
|
||||
Response response = highLevelClient().getLowLevelClient().performRequest(createSnapshot);
|
||||
// check that the request went ok without parsing JSON here. When using the high level client, check acknowledgement instead.
|
||||
assertEquals(200, response.getStatusLine().getStatusCode());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,108 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.tools.launchers;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
/**
|
||||
* Tunes Elasticsearch JVM settings based on inspection of provided JVM options.
|
||||
*/
|
||||
final class JvmErgonomics {
|
||||
private static final long KB = 1024L;
|
||||
|
||||
private static final long MB = 1024L * 1024L;
|
||||
|
||||
private static final long GB = 1024L * 1024L * 1024L;
|
||||
|
||||
|
||||
private JvmErgonomics() {
|
||||
throw new AssertionError("No instances intended");
|
||||
}
|
||||
|
||||
/**
|
||||
* Chooses additional JVM options for Elasticsearch.
|
||||
*
|
||||
* @param userDefinedJvmOptions A list of JVM options that have been defined by the user.
|
||||
* @return A list of additional JVM options to set.
|
||||
*/
|
||||
static List<String> choose(List<String> userDefinedJvmOptions) {
|
||||
List<String> ergonomicChoices = new ArrayList<>();
|
||||
Long heapSize = extractHeapSize(userDefinedJvmOptions);
|
||||
Map<String, String> systemProperties = extractSystemProperties(userDefinedJvmOptions);
|
||||
if (heapSize != null) {
|
||||
if (systemProperties.containsKey("io.netty.allocator.type") == false) {
|
||||
if (heapSize <= 1 * GB) {
|
||||
ergonomicChoices.add("-Dio.netty.allocator.type=unpooled");
|
||||
} else {
|
||||
ergonomicChoices.add("-Dio.netty.allocator.type=pooled");
|
||||
}
|
||||
}
|
||||
}
|
||||
return ergonomicChoices;
|
||||
}
|
||||
|
||||
private static final Pattern MAX_HEAP_SIZE = Pattern.compile("^(-Xmx|-XX:MaxHeapSize=)(?<size>\\d+)(?<unit>\\w)?$");
|
||||
|
||||
// package private for testing
|
||||
static Long extractHeapSize(List<String> userDefinedJvmOptions) {
|
||||
for (String jvmOption : userDefinedJvmOptions) {
|
||||
final Matcher matcher = MAX_HEAP_SIZE.matcher(jvmOption);
|
||||
if (matcher.matches()) {
|
||||
final long size = Long.parseLong(matcher.group("size"));
|
||||
final String unit = matcher.group("unit");
|
||||
if (unit == null) {
|
||||
return size;
|
||||
} else {
|
||||
switch (unit.toLowerCase(Locale.ROOT)) {
|
||||
case "k":
|
||||
return size * KB;
|
||||
case "m":
|
||||
return size * MB;
|
||||
case "g":
|
||||
return size * GB;
|
||||
default:
|
||||
throw new IllegalArgumentException("Unknown unit [" + unit + "] for max heap size in [" + jvmOption + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private static final Pattern SYSTEM_PROPERTY = Pattern.compile("^-D(?<key>[\\w+].*?)=(?<value>.*)$");
|
||||
|
||||
// package private for testing
|
||||
static Map<String, String> extractSystemProperties(List<String> userDefinedJvmOptions) {
|
||||
Map<String, String> systemProperties = new HashMap<>();
|
||||
for (String jvmOption : userDefinedJvmOptions) {
|
||||
final Matcher matcher = SYSTEM_PROPERTY.matcher(jvmOption);
|
||||
if (matcher.matches()) {
|
||||
systemProperties.put(matcher.group("key"), matcher.group("value"));
|
||||
}
|
||||
}
|
||||
return systemProperties;
|
||||
}
|
||||
}
|
|
@ -78,6 +78,8 @@ final class JvmOptionsParser {
|
|||
}
|
||||
|
||||
if (invalidLines.isEmpty()) {
|
||||
List<String> ergonomicJvmOptions = JvmErgonomics.choose(jvmOptions);
|
||||
jvmOptions.addAll(ergonomicJvmOptions);
|
||||
final String spaceDelimitedJvmOptions = spaceDelimitJvmOptions(jvmOptions);
|
||||
Launchers.outPrintln(spaceDelimitedJvmOptions);
|
||||
Launchers.exit(0);
|
||||
|
|
|
@ -0,0 +1,83 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.tools.launchers;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
public class JvmErgonomicsTests extends LaunchersTestCase {
|
||||
public void testExtractValidHeapSize() {
|
||||
assertEquals(Long.valueOf(1024), JvmErgonomics.extractHeapSize(Collections.singletonList("-Xmx1024")));
|
||||
assertEquals(Long.valueOf(2L * 1024 * 1024 * 1024), JvmErgonomics.extractHeapSize(Collections.singletonList("-Xmx2g")));
|
||||
assertEquals(Long.valueOf(32 * 1024 * 1024), JvmErgonomics.extractHeapSize(Collections.singletonList("-Xmx32M")));
|
||||
assertEquals(Long.valueOf(32 * 1024 * 1024), JvmErgonomics.extractHeapSize(Collections.singletonList("-XX:MaxHeapSize=32M")));
|
||||
}
|
||||
|
||||
public void testExtractInvalidHeapSize() {
|
||||
try {
|
||||
JvmErgonomics.extractHeapSize(Collections.singletonList("-Xmx2T"));
|
||||
fail("Expected IllegalArgumentException to be raised");
|
||||
} catch (IllegalArgumentException expected) {
|
||||
assertEquals("Unknown unit [T] for max heap size in [-Xmx2T]", expected.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public void testExtractNoHeapSize() {
|
||||
assertNull("No spaces allowed", JvmErgonomics.extractHeapSize(Collections.singletonList("-Xmx 1024")));
|
||||
assertNull("JVM option is not present", JvmErgonomics.extractHeapSize(Collections.singletonList("")));
|
||||
assertNull("Multiple JVM options per line", JvmErgonomics.extractHeapSize(Collections.singletonList("-Xms2g -Xmx2g")));
|
||||
}
|
||||
|
||||
public void testExtractSystemProperties() {
|
||||
Map<String, String> expectedSystemProperties = new HashMap<>();
|
||||
expectedSystemProperties.put("file.encoding", "UTF-8");
|
||||
expectedSystemProperties.put("kv.setting", "ABC=DEF");
|
||||
|
||||
Map<String, String> parsedSystemProperties = JvmErgonomics.extractSystemProperties(
|
||||
Arrays.asList("-Dfile.encoding=UTF-8", "-Dkv.setting=ABC=DEF"));
|
||||
|
||||
assertEquals(expectedSystemProperties, parsedSystemProperties);
|
||||
}
|
||||
|
||||
public void testExtractNoSystemProperties() {
|
||||
Map<String, String> parsedSystemProperties = JvmErgonomics.extractSystemProperties(Arrays.asList("-Xms1024M", "-Xmx1024M"));
|
||||
assertTrue(parsedSystemProperties.isEmpty());
|
||||
}
|
||||
|
||||
public void testLittleMemoryErgonomicChoices() {
|
||||
String smallHeap = randomFrom(Arrays.asList("64M", "512M", "1024M", "1G"));
|
||||
List<String> expectedChoices = Collections.singletonList("-Dio.netty.allocator.type=unpooled");
|
||||
assertEquals(expectedChoices, JvmErgonomics.choose(Arrays.asList("-Xms" + smallHeap, "-Xmx" + smallHeap)));
|
||||
}
|
||||
|
||||
public void testPlentyMemoryErgonomicChoices() {
|
||||
String largeHeap = randomFrom(Arrays.asList("1025M", "2048M", "2G", "8G"));
|
||||
List<String> expectedChoices = Collections.singletonList("-Dio.netty.allocator.type=pooled");
|
||||
assertEquals(expectedChoices, JvmErgonomics.choose(Arrays.asList("-Xms" + largeHeap, "-Xmx" + largeHeap)));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,73 @@
|
|||
[[java-rest-high-snapshot-delete-snapshot]]
|
||||
=== Delete Snapshot API
|
||||
|
||||
The Delete Snapshot API allows to delete a snapshot.
|
||||
|
||||
[[java-rest-high-snapshot-delete-snapshot-request]]
|
||||
==== Delete Snapshot Request
|
||||
|
||||
A `DeleteSnapshotRequest`:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[delete-snapshot-request]
|
||||
--------------------------------------------------
|
||||
|
||||
==== Optional Arguments
|
||||
The following arguments can optionally be provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[delete-snapshot-request-masterTimeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to connect to the master node as a `TimeValue`
|
||||
<2> Timeout to connect to the master node as a `String`
|
||||
|
||||
[[java-rest-high-snapshot-delete-snapshot-sync]]
|
||||
==== Synchronous Execution
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[delete-snapshot-execute]
|
||||
--------------------------------------------------
|
||||
|
||||
[[java-rest-high-snapshot-delete-snapshot-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The asynchronous execution of a delete snapshot request requires both the
|
||||
`DeleteSnapshotRequest` instance and an `ActionListener` instance to be
|
||||
passed to the asynchronous method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[delete-snapshot-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `DeleteSnapshotRequest` to execute and the `ActionListener`
|
||||
to use when the execution completes
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once it is
|
||||
completed the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed.
|
||||
|
||||
A typical listener for `DeleteSnapshotResponse` looks like:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[delete-snapshot-execute-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed. The response is
|
||||
provided as an argument
|
||||
<2> Called in case of a failure. The raised exception is provided as an argument
|
||||
|
||||
[[java-rest-high-cluster-delete-snapshot-response]]
|
||||
==== Delete Snapshot Response
|
||||
|
||||
The returned `DeleteSnapshotResponse` allows to retrieve information about the
|
||||
executed operation as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[delete-snapshot-response]
|
||||
--------------------------------------------------
|
||||
<1> Indicates the node has acknowledged the request
|
|
@ -136,11 +136,13 @@ The Java High Level REST Client supports the following Snapshot APIs:
|
|||
* <<java-rest-high-snapshot-create-repository>>
|
||||
* <<java-rest-high-snapshot-delete-repository>>
|
||||
* <<java-rest-high-snapshot-verify-repository>>
|
||||
* <<java-rest-high-snapshot-delete-snapshot>>
|
||||
|
||||
include::snapshot/get_repository.asciidoc[]
|
||||
include::snapshot/create_repository.asciidoc[]
|
||||
include::snapshot/delete_repository.asciidoc[]
|
||||
include::snapshot/verify_repository.asciidoc[]
|
||||
include::snapshot/delete_snapshot.asciidoc[]
|
||||
|
||||
== Tasks APIs
|
||||
|
||||
|
|
|
@ -158,6 +158,9 @@ On macOS, Elasticsearch can also be installed via https://brew.sh[Homebrew]:
|
|||
brew install elasticsearch
|
||||
--------------------------------------------------
|
||||
|
||||
If installation succeeds, Homebrew will finish by saying that you can start Elasticsearch by entering
|
||||
`elasticsearch`. Do that now. The expected response is described below, under <<successfully-running-node>>.
|
||||
|
||||
[float]
|
||||
=== Installation example with MSI Windows Installer
|
||||
|
||||
|
@ -216,6 +219,7 @@ And now we are ready to start our node and single cluster:
|
|||
--------------------------------------------------
|
||||
|
||||
[float]
|
||||
[[successfully-running-node]]
|
||||
=== Successfully running node
|
||||
|
||||
If everything goes well with installation, you should see a bunch of messages that look like below:
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[delete-license]]
|
||||
=== Delete License API
|
||||
|
||||
|
@ -41,3 +42,4 @@ When the license is successfully deleted, the API returns the following response
|
|||
"acknowledged": true
|
||||
}
|
||||
------------------------------------------------------------
|
||||
// NOTCONSOLE
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[get-basic-status]]
|
||||
=== Get Basic Status API
|
||||
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[get-license]]
|
||||
=== Get License API
|
||||
|
||||
|
@ -52,11 +53,9 @@ GET _xpack/license
|
|||
"license" : {
|
||||
"status" : "active",
|
||||
"uid" : "cbff45e7-c553-41f7-ae4f-9205eabd80xx",
|
||||
"type" : "trial",
|
||||
"type" : "basic",
|
||||
"issue_date" : "2018-02-22T23:12:05.550Z",
|
||||
"issue_date_in_millis" : 1519341125550,
|
||||
"expiry_date" : "2018-03-24T23:12:05.550Z",
|
||||
"expiry_date_in_millis" : 1521933125550,
|
||||
"max_nodes" : 1000,
|
||||
"issued_to" : "test",
|
||||
"issuer" : "elasticsearch",
|
||||
|
@ -65,11 +64,9 @@ GET _xpack/license
|
|||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE[s/"cbff45e7-c553-41f7-ae4f-9205eabd80xx"/$body.license.uid/]
|
||||
// TESTRESPONSE[s/"trial"/$body.license.type/]
|
||||
// TESTRESPONSE[s/"basic"/$body.license.type/]
|
||||
// TESTRESPONSE[s/"2018-02-22T23:12:05.550Z"/$body.license.issue_date/]
|
||||
// TESTRESPONSE[s/1519341125550/$body.license.issue_date_in_millis/]
|
||||
// TESTRESPONSE[s/"2018-03-24T23:12:05.550Z"/$body.license.expiry_date/]
|
||||
// TESTRESPONSE[s/1521933125550/$body.license.expiry_date_in_millis/]
|
||||
// TESTRESPONSE[s/1000/$body.license.max_nodes/]
|
||||
// TESTRESPONSE[s/"test"/$body.license.issued_to/]
|
||||
// TESTRESPONSE[s/"elasticsearch"/$body.license.issuer/]
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[get-trial-status]]
|
||||
=== Get Trial Status API
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
[role="xpack"]
|
||||
[[licensing-apis]]
|
||||
== Licensing APIs
|
||||
|
||||
You can use the following APIs to manage your licenses:
|
||||
|
||||
* <<delete-license>>
|
||||
* <<get-license>>
|
||||
* <<get-trial-status>>
|
||||
* <<start-trial>>
|
||||
* <<get-basic-status>>
|
||||
* <<start-basic>>
|
||||
* <<update-license>>
|
||||
|
||||
|
||||
include::delete-license.asciidoc[]
|
||||
include::get-license.asciidoc[]
|
||||
include::get-trial-status.asciidoc[]
|
||||
include::start-trial.asciidoc[]
|
||||
include::get-basic-status.asciidoc[]
|
||||
include::start-basic.asciidoc[]
|
||||
include::update-license.asciidoc[]
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[start-basic]]
|
||||
=== Start Basic API
|
||||
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[start-trial]]
|
||||
=== Start Trial API
|
||||
|
|
@ -1,4 +1,5 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[update-license]]
|
||||
=== Update License API
|
||||
|
||||
|
@ -123,6 +124,7 @@ receive the following response:
|
|||
}
|
||||
}
|
||||
------------------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
To complete the update, you must re-submit the API request and set the
|
||||
`acknowledge` parameter to `true`. For example:
|
|
@ -36,6 +36,8 @@ GET my_index/_search
|
|||
// CONSOLE
|
||||
// TESTSETUP
|
||||
|
||||
NOTE: You can also store ip ranges in a single field using an <<range,ip_range datatype>>.
|
||||
|
||||
[[ip-params]]
|
||||
==== Parameters for `ip` fields
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ directly to configure and access {xpack} features.
|
|||
|
||||
include::info.asciidoc[]
|
||||
include::{xes-repo-dir}/rest-api/graph/explore.asciidoc[]
|
||||
include::{xes-repo-dir}/rest-api/licensing.asciidoc[]
|
||||
include::{es-repo-dir}/licensing/index.asciidoc[]
|
||||
include::{xes-repo-dir}/rest-api/migration.asciidoc[]
|
||||
include::{xes-repo-dir}/rest-api/ml-api.asciidoc[]
|
||||
include::{xes-repo-dir}/rest-api/rollup-api.asciidoc[]
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.action.admin.cluster.snapshots.delete;
|
||||
|
||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
/**
|
||||
* Delete snapshot response
|
||||
|
@ -32,4 +33,9 @@ public class DeleteSnapshotResponse extends AcknowledgedResponse {
|
|||
DeleteSnapshotResponse(boolean acknowledged) {
|
||||
super(acknowledged);
|
||||
}
|
||||
|
||||
public static DeleteSnapshotResponse fromXContent(XContentParser parser) {
|
||||
return new DeleteSnapshotResponse(parseAcknowledged(parser));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -151,7 +151,6 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
|
|||
IndexModule.INDEX_STORE_TYPE_SETTING,
|
||||
IndexModule.INDEX_STORE_PRE_LOAD_SETTING,
|
||||
IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING,
|
||||
IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING,
|
||||
FsDirectoryService.INDEX_LOCK_FACTOR_SETTING,
|
||||
EngineConfig.INDEX_CODEC_SETTING,
|
||||
EngineConfig.INDEX_OPTIMIZE_AUTO_GENERATED_IDS,
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.lucene.index.SegmentCommitInfo;
|
|||
import org.apache.lucene.index.SegmentInfos;
|
||||
import org.apache.lucene.index.SegmentReader;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.QueryCachingPolicy;
|
||||
import org.apache.lucene.search.ReferenceManager;
|
||||
import org.apache.lucene.search.Sort;
|
||||
|
@ -299,7 +300,16 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
// the query cache is a node-level thing, however we want the most popular filters
|
||||
// to be computed on a per-shard basis
|
||||
if (IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.get(settings)) {
|
||||
cachingPolicy = QueryCachingPolicy.ALWAYS_CACHE;
|
||||
cachingPolicy = new QueryCachingPolicy() {
|
||||
@Override
|
||||
public void onUse(Query query) {
|
||||
|
||||
}
|
||||
@Override
|
||||
public boolean shouldCache(Query query) {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
} else {
|
||||
cachingPolicy = new UsageTrackingQueryCachingPolicy();
|
||||
}
|
||||
|
|
|
@ -0,0 +1,41 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.snapshots.delete;
|
||||
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.test.AbstractStreamableXContentTestCase;
|
||||
|
||||
public class DeleteSnapshotResponseTests extends AbstractStreamableXContentTestCase<DeleteSnapshotResponse> {
|
||||
|
||||
@Override
|
||||
protected DeleteSnapshotResponse doParseInstance(XContentParser parser) {
|
||||
return DeleteSnapshotResponse.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected DeleteSnapshotResponse createBlankInstance() {
|
||||
return new DeleteSnapshotResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected DeleteSnapshotResponse createTestInstance() {
|
||||
return new DeleteSnapshotResponse(randomBoolean());
|
||||
}
|
||||
}
|
|
@ -89,6 +89,19 @@ public class IndicesQueryCacheTests extends ESTestCase {
|
|||
|
||||
}
|
||||
|
||||
private static QueryCachingPolicy alwaysCachePolicy() {
|
||||
return new QueryCachingPolicy() {
|
||||
@Override
|
||||
public void onUse(Query query) {
|
||||
|
||||
}
|
||||
@Override
|
||||
public boolean shouldCache(Query query) {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
public void testBasics() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
|
||||
|
@ -98,7 +111,7 @@ public class IndicesQueryCacheTests extends ESTestCase {
|
|||
ShardId shard = new ShardId("index", "_na_", 0);
|
||||
r = ElasticsearchDirectoryReader.wrap(r, shard);
|
||||
IndexSearcher s = new IndexSearcher(r);
|
||||
s.setQueryCachingPolicy(QueryCachingPolicy.ALWAYS_CACHE);
|
||||
s.setQueryCachingPolicy(alwaysCachePolicy());
|
||||
|
||||
Settings settings = Settings.builder()
|
||||
.put(IndicesQueryCache.INDICES_CACHE_QUERY_COUNT_SETTING.getKey(), 10)
|
||||
|
@ -169,7 +182,7 @@ public class IndicesQueryCacheTests extends ESTestCase {
|
|||
ShardId shard1 = new ShardId("index", "_na_", 0);
|
||||
r1 = ElasticsearchDirectoryReader.wrap(r1, shard1);
|
||||
IndexSearcher s1 = new IndexSearcher(r1);
|
||||
s1.setQueryCachingPolicy(QueryCachingPolicy.ALWAYS_CACHE);
|
||||
s1.setQueryCachingPolicy(alwaysCachePolicy());
|
||||
|
||||
Directory dir2 = newDirectory();
|
||||
IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig());
|
||||
|
@ -179,7 +192,7 @@ public class IndicesQueryCacheTests extends ESTestCase {
|
|||
ShardId shard2 = new ShardId("index", "_na_", 1);
|
||||
r2 = ElasticsearchDirectoryReader.wrap(r2, shard2);
|
||||
IndexSearcher s2 = new IndexSearcher(r2);
|
||||
s2.setQueryCachingPolicy(QueryCachingPolicy.ALWAYS_CACHE);
|
||||
s2.setQueryCachingPolicy(alwaysCachePolicy());
|
||||
|
||||
Settings settings = Settings.builder()
|
||||
.put(IndicesQueryCache.INDICES_CACHE_QUERY_COUNT_SETTING.getKey(), 10)
|
||||
|
@ -295,7 +308,7 @@ public class IndicesQueryCacheTests extends ESTestCase {
|
|||
ShardId shard1 = new ShardId("index", "_na_", 0);
|
||||
r1 = ElasticsearchDirectoryReader.wrap(r1, shard1);
|
||||
IndexSearcher s1 = new IndexSearcher(r1);
|
||||
s1.setQueryCachingPolicy(QueryCachingPolicy.ALWAYS_CACHE);
|
||||
s1.setQueryCachingPolicy(alwaysCachePolicy());
|
||||
|
||||
Directory dir2 = newDirectory();
|
||||
IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig());
|
||||
|
@ -305,7 +318,7 @@ public class IndicesQueryCacheTests extends ESTestCase {
|
|||
ShardId shard2 = new ShardId("index", "_na_", 1);
|
||||
r2 = ElasticsearchDirectoryReader.wrap(r2, shard2);
|
||||
IndexSearcher s2 = new IndexSearcher(r2);
|
||||
s2.setQueryCachingPolicy(QueryCachingPolicy.ALWAYS_CACHE);
|
||||
s2.setQueryCachingPolicy(alwaysCachePolicy());
|
||||
|
||||
Settings settings = Settings.builder()
|
||||
.put(IndicesQueryCache.INDICES_CACHE_QUERY_COUNT_SETTING.getKey(), 10)
|
||||
|
|
|
@ -31,8 +31,10 @@ import org.elasticsearch.script.Script;
|
|||
import org.elasticsearch.script.ScriptType;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.InternalSettingsPlugin;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Base64;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
|
@ -52,7 +54,7 @@ public class ScriptQuerySearchIT extends ESIntegTestCase {
|
|||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return Collections.singleton(CustomScriptPlugin.class);
|
||||
return Arrays.asList(CustomScriptPlugin.class, InternalSettingsPlugin.class);
|
||||
}
|
||||
|
||||
public static class CustomScriptPlugin extends MockScriptPlugin {
|
||||
|
|
|
@ -434,10 +434,6 @@ public abstract class ESIntegTestCase extends ESTestCase {
|
|||
if (randomBoolean()) {
|
||||
randomSettingsBuilder.put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), randomBoolean());
|
||||
}
|
||||
|
||||
if (randomBoolean()) {
|
||||
randomSettingsBuilder.put(IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.getKey(), randomBoolean());
|
||||
}
|
||||
PutIndexTemplateRequestBuilder putTemplate = client().admin().indices()
|
||||
.preparePutTemplate("random_index_template")
|
||||
.setPatterns(Collections.singletonList("*"))
|
||||
|
|
|
@ -22,6 +22,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
|
|||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.IndexModule;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
|
||||
|
@ -51,6 +52,8 @@ public final class InternalSettingsPlugin extends Plugin {
|
|||
INDEX_CREATION_DATE_SETTING,
|
||||
PROVIDED_NAME_SETTING,
|
||||
TRANSLOG_RETENTION_CHECK_INTERVAL_SETTING,
|
||||
IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING);
|
||||
IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING,
|
||||
IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -47,8 +47,6 @@ buildRestTests.expectedUnconvertedCandidates = [
|
|||
'en/watcher/trigger/schedule/weekly.asciidoc',
|
||||
'en/watcher/trigger/schedule/yearly.asciidoc',
|
||||
'en/watcher/troubleshooting.asciidoc',
|
||||
'en/rest-api/license/delete-license.asciidoc',
|
||||
'en/rest-api/license/update-license.asciidoc',
|
||||
'en/rest-api/ml/delete-snapshot.asciidoc',
|
||||
'en/rest-api/ml/forecast.asciidoc',
|
||||
'en/rest-api/ml/get-bucket.asciidoc',
|
||||
|
|
|
@ -1,22 +0,0 @@
|
|||
[role="xpack"]
|
||||
[[licensing-apis]]
|
||||
== Licensing APIs
|
||||
|
||||
You can use the following APIs to manage your licenses:
|
||||
|
||||
* <<delete-license>>
|
||||
* <<get-license>>
|
||||
* <<get-trial-status>>
|
||||
* <<start-trial>>
|
||||
* <<get-basic-status>>
|
||||
* <<start-basic>>
|
||||
* <<update-license>>
|
||||
|
||||
|
||||
include::license/delete-license.asciidoc[]
|
||||
include::license/get-license.asciidoc[]
|
||||
include::license/get-trial-status.asciidoc[]
|
||||
include::license/start-trial.asciidoc[]
|
||||
include::license/get-basic-status.asciidoc[]
|
||||
include::license/start-basic.asciidoc[]
|
||||
include::license/update-license.asciidoc[]
|
|
@ -169,7 +169,8 @@ public class SecuritySettingsSource extends ClusterDiscoveryConfiguration.Unicas
|
|||
|
||||
@Override
|
||||
public Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return Arrays.asList(LocalStateSecurity.class, Netty4Plugin.class, ReindexPlugin.class, CommonAnalysisPlugin.class);
|
||||
return Arrays.asList(LocalStateSecurity.class, Netty4Plugin.class, ReindexPlugin.class, CommonAnalysisPlugin.class,
|
||||
InternalSettingsPlugin.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
Loading…
Reference in New Issue