Merge remote-tracking branch 'es/7.x' into enrich-7.x
This commit is contained in:
commit
837cfa2640
|
@ -41,3 +41,4 @@ BWC_VERSION:
|
||||||
- "7.2.1"
|
- "7.2.1"
|
||||||
- "7.3.0"
|
- "7.3.0"
|
||||||
- "7.3.1"
|
- "7.3.1"
|
||||||
|
- "7.3.2"
|
||||||
|
|
|
@ -12,6 +12,7 @@ ES_RUNTIME_JAVA:
|
||||||
- java12
|
- java12
|
||||||
- openjdk12
|
- openjdk12
|
||||||
- openjdk13
|
- openjdk13
|
||||||
|
- openjdk14
|
||||||
- zulu8
|
- zulu8
|
||||||
- zulu11
|
- zulu11
|
||||||
- zulu12
|
- zulu12
|
||||||
|
|
|
@ -95,10 +95,12 @@ public class DistroTestPlugin implements Plugin<Project> {
|
||||||
TaskProvider<Copy> copyUpgradeTask = configureCopyUpgradeTask(project, upgradeVersion, upgradeDir);
|
TaskProvider<Copy> copyUpgradeTask = configureCopyUpgradeTask(project, upgradeVersion, upgradeDir);
|
||||||
TaskProvider<Copy> copyPluginsTask = configureCopyPluginsTask(project, pluginsDir);
|
TaskProvider<Copy> copyPluginsTask = configureCopyPluginsTask(project, pluginsDir);
|
||||||
|
|
||||||
Map<String, TaskProvider<?>> batsTests = new HashMap<>();
|
TaskProvider<Task> destructiveDistroTest = project.getTasks().register("destructiveDistroTest");
|
||||||
for (ElasticsearchDistribution distribution : distributions) {
|
for (ElasticsearchDistribution distribution : distributions) {
|
||||||
configureDistroTest(project, distribution);
|
TaskProvider<?> destructiveTask = configureDistroTest(project, distribution);
|
||||||
|
destructiveDistroTest.configure(t -> t.dependsOn(destructiveTask));
|
||||||
}
|
}
|
||||||
|
Map<String, TaskProvider<?>> batsTests = new HashMap<>();
|
||||||
batsTests.put("bats oss", configureBatsTest(project, "oss", distributionsDir, copyDistributionsTask));
|
batsTests.put("bats oss", configureBatsTest(project, "oss", distributionsDir, copyDistributionsTask));
|
||||||
batsTests.put("bats default", configureBatsTest(project, "default", distributionsDir, copyDistributionsTask));
|
batsTests.put("bats default", configureBatsTest(project, "default", distributionsDir, copyDistributionsTask));
|
||||||
configureBatsTest(project, "plugins",distributionsDir, copyDistributionsTask, copyPluginsTask).configure(t ->
|
configureBatsTest(project, "plugins",distributionsDir, copyDistributionsTask, copyPluginsTask).configure(t ->
|
||||||
|
@ -126,7 +128,6 @@ public class DistroTestPlugin implements Plugin<Project> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
batsTests.forEach((desc, task) -> {
|
batsTests.forEach((desc, task) -> {
|
||||||
configureVMWrapperTask(vmProject, desc, task.getName(), vmDependencies).configure(t -> {
|
configureVMWrapperTask(vmProject, desc, task.getName(), vmDependencies).configure(t -> {
|
||||||
t.setProgressHandler(new BatsProgressLogger(project.getLogger()));
|
t.setProgressHandler(new BatsProgressLogger(project.getLogger()));
|
||||||
|
|
|
@ -20,6 +20,8 @@
|
||||||
package org.elasticsearch.client;
|
package org.elasticsearch.client;
|
||||||
|
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
|
import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequest;
|
||||||
|
import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryResponse;
|
||||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
|
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
|
||||||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
|
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
|
||||||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse;
|
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse;
|
||||||
|
@ -170,6 +172,35 @@ public final class SnapshotClient {
|
||||||
VerifyRepositoryResponse::fromXContent, listener, emptySet());
|
VerifyRepositoryResponse::fromXContent, listener, emptySet());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Cleans up a snapshot repository.
|
||||||
|
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html"> Snapshot and Restore
|
||||||
|
* API on elastic.co</a>
|
||||||
|
* @param cleanupRepositoryRequest the request
|
||||||
|
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||||
|
* @return the response
|
||||||
|
* @throws IOException in case there is a problem sending the request or parsing back the response
|
||||||
|
*/
|
||||||
|
public CleanupRepositoryResponse cleanupRepository(CleanupRepositoryRequest cleanupRepositoryRequest, RequestOptions options)
|
||||||
|
throws IOException {
|
||||||
|
return restHighLevelClient.performRequestAndParseEntity(cleanupRepositoryRequest, SnapshotRequestConverters::cleanupRepository,
|
||||||
|
options, CleanupRepositoryResponse::fromXContent, emptySet());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Asynchronously cleans up a snapshot repository.
|
||||||
|
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html"> Snapshot and Restore
|
||||||
|
* API on elastic.co</a>
|
||||||
|
* @param cleanupRepositoryRequest the request
|
||||||
|
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||||
|
* @param listener the listener to be notified upon request completion
|
||||||
|
*/
|
||||||
|
public void cleanupRepositoryAsync(CleanupRepositoryRequest cleanupRepositoryRequest, RequestOptions options,
|
||||||
|
ActionListener<CleanupRepositoryResponse> listener) {
|
||||||
|
restHighLevelClient.performRequestAsyncAndParseEntity(cleanupRepositoryRequest, SnapshotRequestConverters::cleanupRepository,
|
||||||
|
options, CleanupRepositoryResponse::fromXContent, listener, emptySet());
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a snapshot.
|
* Creates a snapshot.
|
||||||
* <p>
|
* <p>
|
||||||
|
|
|
@ -23,6 +23,7 @@ import org.apache.http.client.methods.HttpDelete;
|
||||||
import org.apache.http.client.methods.HttpGet;
|
import org.apache.http.client.methods.HttpGet;
|
||||||
import org.apache.http.client.methods.HttpPost;
|
import org.apache.http.client.methods.HttpPost;
|
||||||
import org.apache.http.client.methods.HttpPut;
|
import org.apache.http.client.methods.HttpPut;
|
||||||
|
import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequest;
|
||||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
|
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
|
||||||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
|
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
|
||||||
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
|
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
|
||||||
|
@ -94,6 +95,20 @@ final class SnapshotRequestConverters {
|
||||||
return request;
|
return request;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static Request cleanupRepository(CleanupRepositoryRequest cleanupRepositoryRequest) {
|
||||||
|
String endpoint = new RequestConverters.EndpointBuilder().addPathPartAsIs("_snapshot")
|
||||||
|
.addPathPart(cleanupRepositoryRequest.name())
|
||||||
|
.addPathPartAsIs("_cleanup")
|
||||||
|
.build();
|
||||||
|
Request request = new Request(HttpPost.METHOD_NAME, endpoint);
|
||||||
|
|
||||||
|
RequestConverters.Params parameters = new RequestConverters.Params();
|
||||||
|
parameters.withMasterTimeout(cleanupRepositoryRequest.masterNodeTimeout());
|
||||||
|
parameters.withTimeout(cleanupRepositoryRequest.timeout());
|
||||||
|
request.addParameters(parameters.asMap());
|
||||||
|
return request;
|
||||||
|
}
|
||||||
|
|
||||||
static Request createSnapshot(CreateSnapshotRequest createSnapshotRequest) throws IOException {
|
static Request createSnapshot(CreateSnapshotRequest createSnapshotRequest) throws IOException {
|
||||||
String endpoint = new RequestConverters.EndpointBuilder().addPathPart("_snapshot")
|
String endpoint = new RequestConverters.EndpointBuilder().addPathPart("_snapshot")
|
||||||
.addPathPart(createSnapshotRequest.repository())
|
.addPathPart(createSnapshotRequest.repository())
|
||||||
|
|
|
@ -21,7 +21,9 @@ package org.elasticsearch.client.ml;
|
||||||
|
|
||||||
import org.elasticsearch.client.Validatable;
|
import org.elasticsearch.client.Validatable;
|
||||||
import org.elasticsearch.client.ValidationException;
|
import org.elasticsearch.client.ValidationException;
|
||||||
|
import org.elasticsearch.client.ml.dataframe.QueryConfig;
|
||||||
import org.elasticsearch.client.ml.dataframe.evaluation.Evaluation;
|
import org.elasticsearch.client.ml.dataframe.evaluation.Evaluation;
|
||||||
|
import org.elasticsearch.common.Nullable;
|
||||||
import org.elasticsearch.common.ParseField;
|
import org.elasticsearch.common.ParseField;
|
||||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||||
|
@ -37,20 +39,25 @@ import java.util.Objects;
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
|
|
||||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
|
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
|
||||||
|
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
|
||||||
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
|
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
|
||||||
|
|
||||||
public class EvaluateDataFrameRequest implements ToXContentObject, Validatable {
|
public class EvaluateDataFrameRequest implements ToXContentObject, Validatable {
|
||||||
|
|
||||||
private static final ParseField INDEX = new ParseField("index");
|
private static final ParseField INDEX = new ParseField("index");
|
||||||
|
private static final ParseField QUERY = new ParseField("query");
|
||||||
private static final ParseField EVALUATION = new ParseField("evaluation");
|
private static final ParseField EVALUATION = new ParseField("evaluation");
|
||||||
|
|
||||||
@SuppressWarnings("unchecked")
|
@SuppressWarnings("unchecked")
|
||||||
private static final ConstructingObjectParser<EvaluateDataFrameRequest, Void> PARSER =
|
private static final ConstructingObjectParser<EvaluateDataFrameRequest, Void> PARSER =
|
||||||
new ConstructingObjectParser<>(
|
new ConstructingObjectParser<>(
|
||||||
"evaluate_data_frame_request", true, args -> new EvaluateDataFrameRequest((List<String>) args[0], (Evaluation) args[1]));
|
"evaluate_data_frame_request",
|
||||||
|
true,
|
||||||
|
args -> new EvaluateDataFrameRequest((List<String>) args[0], (QueryConfig) args[1], (Evaluation) args[2]));
|
||||||
|
|
||||||
static {
|
static {
|
||||||
PARSER.declareStringArray(constructorArg(), INDEX);
|
PARSER.declareStringArray(constructorArg(), INDEX);
|
||||||
|
PARSER.declareObject(optionalConstructorArg(), (p, c) -> QueryConfig.fromXContent(p), QUERY);
|
||||||
PARSER.declareObject(constructorArg(), (p, c) -> parseEvaluation(p), EVALUATION);
|
PARSER.declareObject(constructorArg(), (p, c) -> parseEvaluation(p), EVALUATION);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -67,14 +74,16 @@ public class EvaluateDataFrameRequest implements ToXContentObject, Validatable {
|
||||||
}
|
}
|
||||||
|
|
||||||
private List<String> indices;
|
private List<String> indices;
|
||||||
|
private QueryConfig queryConfig;
|
||||||
private Evaluation evaluation;
|
private Evaluation evaluation;
|
||||||
|
|
||||||
public EvaluateDataFrameRequest(String index, Evaluation evaluation) {
|
public EvaluateDataFrameRequest(String index, @Nullable QueryConfig queryConfig, Evaluation evaluation) {
|
||||||
this(Arrays.asList(index), evaluation);
|
this(Arrays.asList(index), queryConfig, evaluation);
|
||||||
}
|
}
|
||||||
|
|
||||||
public EvaluateDataFrameRequest(List<String> indices, Evaluation evaluation) {
|
public EvaluateDataFrameRequest(List<String> indices, @Nullable QueryConfig queryConfig, Evaluation evaluation) {
|
||||||
setIndices(indices);
|
setIndices(indices);
|
||||||
|
setQueryConfig(queryConfig);
|
||||||
setEvaluation(evaluation);
|
setEvaluation(evaluation);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -87,6 +96,14 @@ public class EvaluateDataFrameRequest implements ToXContentObject, Validatable {
|
||||||
this.indices = new ArrayList<>(indices);
|
this.indices = new ArrayList<>(indices);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public QueryConfig getQueryConfig() {
|
||||||
|
return queryConfig;
|
||||||
|
}
|
||||||
|
|
||||||
|
public final void setQueryConfig(QueryConfig queryConfig) {
|
||||||
|
this.queryConfig = queryConfig;
|
||||||
|
}
|
||||||
|
|
||||||
public Evaluation getEvaluation() {
|
public Evaluation getEvaluation() {
|
||||||
return evaluation;
|
return evaluation;
|
||||||
}
|
}
|
||||||
|
@ -111,18 +128,22 @@ public class EvaluateDataFrameRequest implements ToXContentObject, Validatable {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||||
return builder
|
builder.startObject();
|
||||||
.startObject()
|
builder.array(INDEX.getPreferredName(), indices.toArray());
|
||||||
.array(INDEX.getPreferredName(), indices.toArray())
|
if (queryConfig != null) {
|
||||||
|
builder.field(QUERY.getPreferredName(), queryConfig.getQuery());
|
||||||
|
}
|
||||||
|
builder
|
||||||
.startObject(EVALUATION.getPreferredName())
|
.startObject(EVALUATION.getPreferredName())
|
||||||
.field(evaluation.getName(), evaluation)
|
.field(evaluation.getName(), evaluation)
|
||||||
.endObject()
|
|
||||||
.endObject();
|
.endObject();
|
||||||
|
builder.endObject();
|
||||||
|
return builder;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int hashCode() {
|
public int hashCode() {
|
||||||
return Objects.hash(indices, evaluation);
|
return Objects.hash(indices, queryConfig, evaluation);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -131,6 +152,7 @@ public class EvaluateDataFrameRequest implements ToXContentObject, Validatable {
|
||||||
if (o == null || getClass() != o.getClass()) return false;
|
if (o == null || getClass() != o.getClass()) return false;
|
||||||
EvaluateDataFrameRequest that = (EvaluateDataFrameRequest) o;
|
EvaluateDataFrameRequest that = (EvaluateDataFrameRequest) o;
|
||||||
return Objects.equals(indices, that.indices)
|
return Objects.equals(indices, that.indices)
|
||||||
|
&& Objects.equals(queryConfig, that.queryConfig)
|
||||||
&& Objects.equals(evaluation, that.evaluation);
|
&& Objects.equals(evaluation, that.evaluation);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,6 +36,7 @@ import org.elasticsearch.client.ml.DeleteForecastRequest;
|
||||||
import org.elasticsearch.client.ml.DeleteJobRequest;
|
import org.elasticsearch.client.ml.DeleteJobRequest;
|
||||||
import org.elasticsearch.client.ml.DeleteModelSnapshotRequest;
|
import org.elasticsearch.client.ml.DeleteModelSnapshotRequest;
|
||||||
import org.elasticsearch.client.ml.EvaluateDataFrameRequest;
|
import org.elasticsearch.client.ml.EvaluateDataFrameRequest;
|
||||||
|
import org.elasticsearch.client.ml.EvaluateDataFrameRequestTests;
|
||||||
import org.elasticsearch.client.ml.FindFileStructureRequest;
|
import org.elasticsearch.client.ml.FindFileStructureRequest;
|
||||||
import org.elasticsearch.client.ml.FindFileStructureRequestTests;
|
import org.elasticsearch.client.ml.FindFileStructureRequestTests;
|
||||||
import org.elasticsearch.client.ml.FlushJobRequest;
|
import org.elasticsearch.client.ml.FlushJobRequest;
|
||||||
|
@ -85,9 +86,6 @@ import org.elasticsearch.client.ml.datafeed.DatafeedConfigTests;
|
||||||
import org.elasticsearch.client.ml.dataframe.DataFrameAnalyticsConfig;
|
import org.elasticsearch.client.ml.dataframe.DataFrameAnalyticsConfig;
|
||||||
import org.elasticsearch.client.ml.dataframe.MlDataFrameAnalysisNamedXContentProvider;
|
import org.elasticsearch.client.ml.dataframe.MlDataFrameAnalysisNamedXContentProvider;
|
||||||
import org.elasticsearch.client.ml.dataframe.evaluation.MlEvaluationNamedXContentProvider;
|
import org.elasticsearch.client.ml.dataframe.evaluation.MlEvaluationNamedXContentProvider;
|
||||||
import org.elasticsearch.client.ml.dataframe.evaluation.softclassification.BinarySoftClassification;
|
|
||||||
import org.elasticsearch.client.ml.dataframe.evaluation.softclassification.PrecisionMetric;
|
|
||||||
import org.elasticsearch.client.ml.dataframe.evaluation.softclassification.RecallMetric;
|
|
||||||
import org.elasticsearch.client.ml.filestructurefinder.FileStructure;
|
import org.elasticsearch.client.ml.filestructurefinder.FileStructure;
|
||||||
import org.elasticsearch.client.ml.job.config.AnalysisConfig;
|
import org.elasticsearch.client.ml.job.config.AnalysisConfig;
|
||||||
import org.elasticsearch.client.ml.job.config.Detector;
|
import org.elasticsearch.client.ml.job.config.Detector;
|
||||||
|
@ -779,13 +777,7 @@ public class MLRequestConvertersTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testEvaluateDataFrame() throws IOException {
|
public void testEvaluateDataFrame() throws IOException {
|
||||||
EvaluateDataFrameRequest evaluateRequest =
|
EvaluateDataFrameRequest evaluateRequest = EvaluateDataFrameRequestTests.createRandom();
|
||||||
new EvaluateDataFrameRequest(
|
|
||||||
Arrays.asList(generateRandomStringArray(1, 10, false, false)),
|
|
||||||
new BinarySoftClassification(
|
|
||||||
randomAlphaOfLengthBetween(1, 10),
|
|
||||||
randomAlphaOfLengthBetween(1, 10),
|
|
||||||
PrecisionMetric.at(0.5), RecallMetric.at(0.6, 0.7)));
|
|
||||||
Request request = MLRequestConverters.evaluateDataFrame(evaluateRequest);
|
Request request = MLRequestConverters.evaluateDataFrame(evaluateRequest);
|
||||||
assertEquals(HttpPost.METHOD_NAME, request.getMethod());
|
assertEquals(HttpPost.METHOD_NAME, request.getMethod());
|
||||||
assertEquals("/_ml/data_frame/_evaluate", request.getEndpoint());
|
assertEquals("/_ml/data_frame/_evaluate", request.getEndpoint());
|
||||||
|
|
|
@ -149,6 +149,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||||
import org.elasticsearch.common.xcontent.XContentType;
|
import org.elasticsearch.common.xcontent.XContentType;
|
||||||
import org.elasticsearch.index.query.MatchAllQueryBuilder;
|
import org.elasticsearch.index.query.MatchAllQueryBuilder;
|
||||||
|
import org.elasticsearch.index.query.QueryBuilders;
|
||||||
import org.elasticsearch.rest.RestStatus;
|
import org.elasticsearch.rest.RestStatus;
|
||||||
import org.elasticsearch.search.SearchHit;
|
import org.elasticsearch.search.SearchHit;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
|
@ -1455,7 +1456,7 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase {
|
||||||
public void testStopDataFrameAnalyticsConfig() throws Exception {
|
public void testStopDataFrameAnalyticsConfig() throws Exception {
|
||||||
String sourceIndex = "stop-test-source-index";
|
String sourceIndex = "stop-test-source-index";
|
||||||
String destIndex = "stop-test-dest-index";
|
String destIndex = "stop-test-dest-index";
|
||||||
createIndex(sourceIndex, mappingForClassification());
|
createIndex(sourceIndex, defaultMappingForTest());
|
||||||
highLevelClient().index(new IndexRequest(sourceIndex).source(XContentType.JSON, "total", 10000)
|
highLevelClient().index(new IndexRequest(sourceIndex).source(XContentType.JSON, "total", 10000)
|
||||||
.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), RequestOptions.DEFAULT);
|
.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), RequestOptions.DEFAULT);
|
||||||
|
|
||||||
|
@ -1553,27 +1554,28 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase {
|
||||||
assertThat(exception.status().getStatus(), equalTo(404));
|
assertThat(exception.status().getStatus(), equalTo(404));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testEvaluateDataFrame() throws IOException {
|
public void testEvaluateDataFrame_BinarySoftClassification() throws IOException {
|
||||||
String indexName = "evaluate-test-index";
|
String indexName = "evaluate-test-index";
|
||||||
createIndex(indexName, mappingForClassification());
|
createIndex(indexName, mappingForClassification());
|
||||||
BulkRequest bulk = new BulkRequest()
|
BulkRequest bulk = new BulkRequest()
|
||||||
.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)
|
.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)
|
||||||
.add(docForClassification(indexName, false, 0.1)) // #0
|
.add(docForClassification(indexName, "blue", false, 0.1)) // #0
|
||||||
.add(docForClassification(indexName, false, 0.2)) // #1
|
.add(docForClassification(indexName, "blue", false, 0.2)) // #1
|
||||||
.add(docForClassification(indexName, false, 0.3)) // #2
|
.add(docForClassification(indexName, "blue", false, 0.3)) // #2
|
||||||
.add(docForClassification(indexName, false, 0.4)) // #3
|
.add(docForClassification(indexName, "blue", false, 0.4)) // #3
|
||||||
.add(docForClassification(indexName, false, 0.7)) // #4
|
.add(docForClassification(indexName, "blue", false, 0.7)) // #4
|
||||||
.add(docForClassification(indexName, true, 0.2)) // #5
|
.add(docForClassification(indexName, "blue", true, 0.2)) // #5
|
||||||
.add(docForClassification(indexName, true, 0.3)) // #6
|
.add(docForClassification(indexName, "green", true, 0.3)) // #6
|
||||||
.add(docForClassification(indexName, true, 0.4)) // #7
|
.add(docForClassification(indexName, "green", true, 0.4)) // #7
|
||||||
.add(docForClassification(indexName, true, 0.8)) // #8
|
.add(docForClassification(indexName, "green", true, 0.8)) // #8
|
||||||
.add(docForClassification(indexName, true, 0.9)); // #9
|
.add(docForClassification(indexName, "green", true, 0.9)); // #9
|
||||||
highLevelClient().bulk(bulk, RequestOptions.DEFAULT);
|
highLevelClient().bulk(bulk, RequestOptions.DEFAULT);
|
||||||
|
|
||||||
MachineLearningClient machineLearningClient = highLevelClient().machineLearning();
|
MachineLearningClient machineLearningClient = highLevelClient().machineLearning();
|
||||||
EvaluateDataFrameRequest evaluateDataFrameRequest =
|
EvaluateDataFrameRequest evaluateDataFrameRequest =
|
||||||
new EvaluateDataFrameRequest(
|
new EvaluateDataFrameRequest(
|
||||||
indexName,
|
indexName,
|
||||||
|
null,
|
||||||
new BinarySoftClassification(
|
new BinarySoftClassification(
|
||||||
actualField,
|
actualField,
|
||||||
probabilityField,
|
probabilityField,
|
||||||
|
@ -1624,7 +1626,48 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase {
|
||||||
assertThat(curvePointAtThreshold1.getTruePositiveRate(), equalTo(0.0));
|
assertThat(curvePointAtThreshold1.getTruePositiveRate(), equalTo(0.0));
|
||||||
assertThat(curvePointAtThreshold1.getFalsePositiveRate(), equalTo(0.0));
|
assertThat(curvePointAtThreshold1.getFalsePositiveRate(), equalTo(0.0));
|
||||||
assertThat(curvePointAtThreshold1.getThreshold(), equalTo(1.0));
|
assertThat(curvePointAtThreshold1.getThreshold(), equalTo(1.0));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testEvaluateDataFrame_BinarySoftClassification_WithQuery() throws IOException {
|
||||||
|
String indexName = "evaluate-with-query-test-index";
|
||||||
|
createIndex(indexName, mappingForClassification());
|
||||||
|
BulkRequest bulk = new BulkRequest()
|
||||||
|
.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)
|
||||||
|
.add(docForClassification(indexName, "blue", true, 1.0)) // #0
|
||||||
|
.add(docForClassification(indexName, "blue", true, 1.0)) // #1
|
||||||
|
.add(docForClassification(indexName, "blue", true, 1.0)) // #2
|
||||||
|
.add(docForClassification(indexName, "blue", true, 1.0)) // #3
|
||||||
|
.add(docForClassification(indexName, "blue", true, 0.0)) // #4
|
||||||
|
.add(docForClassification(indexName, "blue", true, 0.0)) // #5
|
||||||
|
.add(docForClassification(indexName, "green", true, 0.0)) // #6
|
||||||
|
.add(docForClassification(indexName, "green", true, 0.0)) // #7
|
||||||
|
.add(docForClassification(indexName, "green", true, 0.0)) // #8
|
||||||
|
.add(docForClassification(indexName, "green", true, 1.0)); // #9
|
||||||
|
highLevelClient().bulk(bulk, RequestOptions.DEFAULT);
|
||||||
|
|
||||||
|
MachineLearningClient machineLearningClient = highLevelClient().machineLearning();
|
||||||
|
EvaluateDataFrameRequest evaluateDataFrameRequest =
|
||||||
|
new EvaluateDataFrameRequest(
|
||||||
|
indexName,
|
||||||
|
// Request only "blue" subset to be evaluated
|
||||||
|
new QueryConfig(QueryBuilders.termQuery(datasetField, "blue")),
|
||||||
|
new BinarySoftClassification(actualField, probabilityField, ConfusionMatrixMetric.at(0.5)));
|
||||||
|
|
||||||
|
EvaluateDataFrameResponse evaluateDataFrameResponse =
|
||||||
|
execute(evaluateDataFrameRequest, machineLearningClient::evaluateDataFrame, machineLearningClient::evaluateDataFrameAsync);
|
||||||
|
assertThat(evaluateDataFrameResponse.getEvaluationName(), equalTo(BinarySoftClassification.NAME));
|
||||||
|
assertThat(evaluateDataFrameResponse.getMetrics().size(), equalTo(1));
|
||||||
|
|
||||||
|
ConfusionMatrixMetric.Result confusionMatrixResult = evaluateDataFrameResponse.getMetricByName(ConfusionMatrixMetric.NAME);
|
||||||
|
assertThat(confusionMatrixResult.getMetricName(), equalTo(ConfusionMatrixMetric.NAME));
|
||||||
|
ConfusionMatrixMetric.ConfusionMatrix confusionMatrix = confusionMatrixResult.getScoreByThreshold("0.5");
|
||||||
|
assertThat(confusionMatrix.getTruePositives(), equalTo(4L)); // docs #0, #1, #2 and #3
|
||||||
|
assertThat(confusionMatrix.getFalsePositives(), equalTo(0L));
|
||||||
|
assertThat(confusionMatrix.getTrueNegatives(), equalTo(0L));
|
||||||
|
assertThat(confusionMatrix.getFalseNegatives(), equalTo(2L)); // docs #4 and #5
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testEvaluateDataFrame_Regression() throws IOException {
|
||||||
String regressionIndex = "evaluate-regression-test-index";
|
String regressionIndex = "evaluate-regression-test-index";
|
||||||
createIndex(regressionIndex, mappingForRegression());
|
createIndex(regressionIndex, mappingForRegression());
|
||||||
BulkRequest regressionBulk = new BulkRequest()
|
BulkRequest regressionBulk = new BulkRequest()
|
||||||
|
@ -1641,10 +1684,14 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase {
|
||||||
.add(docForRegression(regressionIndex, 0.5, 0.9)); // #9
|
.add(docForRegression(regressionIndex, 0.5, 0.9)); // #9
|
||||||
highLevelClient().bulk(regressionBulk, RequestOptions.DEFAULT);
|
highLevelClient().bulk(regressionBulk, RequestOptions.DEFAULT);
|
||||||
|
|
||||||
evaluateDataFrameRequest = new EvaluateDataFrameRequest(regressionIndex,
|
MachineLearningClient machineLearningClient = highLevelClient().machineLearning();
|
||||||
|
EvaluateDataFrameRequest evaluateDataFrameRequest =
|
||||||
|
new EvaluateDataFrameRequest(
|
||||||
|
regressionIndex,
|
||||||
|
null,
|
||||||
new Regression(actualRegression, probabilityRegression, new MeanSquaredErrorMetric(), new RSquaredMetric()));
|
new Regression(actualRegression, probabilityRegression, new MeanSquaredErrorMetric(), new RSquaredMetric()));
|
||||||
|
|
||||||
evaluateDataFrameResponse =
|
EvaluateDataFrameResponse evaluateDataFrameResponse =
|
||||||
execute(evaluateDataFrameRequest, machineLearningClient::evaluateDataFrame, machineLearningClient::evaluateDataFrameAsync);
|
execute(evaluateDataFrameRequest, machineLearningClient::evaluateDataFrame, machineLearningClient::evaluateDataFrameAsync);
|
||||||
assertThat(evaluateDataFrameResponse.getEvaluationName(), equalTo(Regression.NAME));
|
assertThat(evaluateDataFrameResponse.getEvaluationName(), equalTo(Regression.NAME));
|
||||||
assertThat(evaluateDataFrameResponse.getMetrics().size(), equalTo(2));
|
assertThat(evaluateDataFrameResponse.getMetrics().size(), equalTo(2));
|
||||||
|
@ -1671,12 +1718,16 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase {
|
||||||
.endObject();
|
.endObject();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static final String datasetField = "dataset";
|
||||||
private static final String actualField = "label";
|
private static final String actualField = "label";
|
||||||
private static final String probabilityField = "p";
|
private static final String probabilityField = "p";
|
||||||
|
|
||||||
private static XContentBuilder mappingForClassification() throws IOException {
|
private static XContentBuilder mappingForClassification() throws IOException {
|
||||||
return XContentFactory.jsonBuilder().startObject()
|
return XContentFactory.jsonBuilder().startObject()
|
||||||
.startObject("properties")
|
.startObject("properties")
|
||||||
|
.startObject(datasetField)
|
||||||
|
.field("type", "keyword")
|
||||||
|
.endObject()
|
||||||
.startObject(actualField)
|
.startObject(actualField)
|
||||||
.field("type", "keyword")
|
.field("type", "keyword")
|
||||||
.endObject()
|
.endObject()
|
||||||
|
@ -1687,10 +1738,10 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase {
|
||||||
.endObject();
|
.endObject();
|
||||||
}
|
}
|
||||||
|
|
||||||
private static IndexRequest docForClassification(String indexName, boolean isTrue, double p) {
|
private static IndexRequest docForClassification(String indexName, String dataset, boolean isTrue, double p) {
|
||||||
return new IndexRequest()
|
return new IndexRequest()
|
||||||
.index(indexName)
|
.index(indexName)
|
||||||
.source(XContentType.JSON, actualField, Boolean.toString(isTrue), probabilityField, p);
|
.source(XContentType.JSON, datasetField, dataset, actualField, Boolean.toString(isTrue), probabilityField, p);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static final String actualRegression = "regression_actual";
|
private static final String actualRegression = "regression_actual";
|
||||||
|
@ -1725,7 +1776,7 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase {
|
||||||
BulkRequest bulk1 = new BulkRequest()
|
BulkRequest bulk1 = new BulkRequest()
|
||||||
.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
|
.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
|
||||||
for (int i = 0; i < 10; ++i) {
|
for (int i = 0; i < 10; ++i) {
|
||||||
bulk1.add(docForClassification(indexName, randomBoolean(), randomDoubleBetween(0.0, 1.0, true)));
|
bulk1.add(docForClassification(indexName, randomAlphaOfLength(10), randomBoolean(), randomDoubleBetween(0.0, 1.0, true)));
|
||||||
}
|
}
|
||||||
highLevelClient().bulk(bulk1, RequestOptions.DEFAULT);
|
highLevelClient().bulk(bulk1, RequestOptions.DEFAULT);
|
||||||
|
|
||||||
|
@ -1751,7 +1802,7 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase {
|
||||||
BulkRequest bulk2 = new BulkRequest()
|
BulkRequest bulk2 = new BulkRequest()
|
||||||
.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
|
.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
|
||||||
for (int i = 10; i < 100; ++i) {
|
for (int i = 10; i < 100; ++i) {
|
||||||
bulk2.add(docForClassification(indexName, randomBoolean(), randomDoubleBetween(0.0, 1.0, true)));
|
bulk2.add(docForClassification(indexName, randomAlphaOfLength(10), randomBoolean(), randomDoubleBetween(0.0, 1.0, true)));
|
||||||
}
|
}
|
||||||
highLevelClient().bulk(bulk2, RequestOptions.DEFAULT);
|
highLevelClient().bulk(bulk2, RequestOptions.DEFAULT);
|
||||||
|
|
||||||
|
|
|
@ -43,6 +43,7 @@ import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||||
import org.elasticsearch.common.xcontent.XContentType;
|
import org.elasticsearch.common.xcontent.XContentType;
|
||||||
|
import org.elasticsearch.index.query.MatchAllQueryBuilder;
|
||||||
import org.elasticsearch.index.query.MatchQueryBuilder;
|
import org.elasticsearch.index.query.MatchQueryBuilder;
|
||||||
import org.elasticsearch.index.query.QueryBuilders;
|
import org.elasticsearch.index.query.QueryBuilders;
|
||||||
import org.elasticsearch.index.query.ScriptQueryBuilder;
|
import org.elasticsearch.index.query.ScriptQueryBuilder;
|
||||||
|
@ -81,6 +82,7 @@ import org.elasticsearch.search.sort.SortOrder;
|
||||||
import org.elasticsearch.search.suggest.Suggest;
|
import org.elasticsearch.search.suggest.Suggest;
|
||||||
import org.elasticsearch.search.suggest.SuggestBuilder;
|
import org.elasticsearch.search.suggest.SuggestBuilder;
|
||||||
import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder;
|
import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder;
|
||||||
|
import org.elasticsearch.xpack.core.index.query.PinnedQueryBuilder;
|
||||||
import org.hamcrest.Matchers;
|
import org.hamcrest.Matchers;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
|
|
||||||
|
@ -92,7 +94,10 @@ import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||||
|
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit;
|
||||||
|
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit;
|
||||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent;
|
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent;
|
||||||
|
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId;
|
||||||
import static org.hamcrest.Matchers.both;
|
import static org.hamcrest.Matchers.both;
|
||||||
import static org.hamcrest.Matchers.containsString;
|
import static org.hamcrest.Matchers.containsString;
|
||||||
import static org.hamcrest.Matchers.either;
|
import static org.hamcrest.Matchers.either;
|
||||||
|
@ -1374,6 +1379,18 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
||||||
assertEquals(3, countResponse.getCount());
|
assertEquals(3, countResponse.getCount());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testSearchWithBasicLicensedQuery() throws IOException {
|
||||||
|
SearchRequest searchRequest = new SearchRequest("index");
|
||||||
|
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
|
||||||
|
PinnedQueryBuilder pinnedQuery = new PinnedQueryBuilder(new MatchAllQueryBuilder(), "2", "1");
|
||||||
|
searchSourceBuilder.query(pinnedQuery);
|
||||||
|
searchRequest.source(searchSourceBuilder);
|
||||||
|
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync);
|
||||||
|
assertSearchHeader(searchResponse);
|
||||||
|
assertFirstHit(searchResponse, hasId("2"));
|
||||||
|
assertSecondHit(searchResponse, hasId("1"));
|
||||||
|
}
|
||||||
|
|
||||||
private static void assertCountHeader(CountResponse countResponse) {
|
private static void assertCountHeader(CountResponse countResponse) {
|
||||||
assertEquals(0, countResponse.getSkippedShards());
|
assertEquals(0, countResponse.getSkippedShards());
|
||||||
assertEquals(0, countResponse.getFailedShards());
|
assertEquals(0, countResponse.getFailedShards());
|
||||||
|
|
|
@ -20,6 +20,8 @@
|
||||||
package org.elasticsearch.client;
|
package org.elasticsearch.client;
|
||||||
|
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
|
import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequest;
|
||||||
|
import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryResponse;
|
||||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
|
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
|
||||||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
|
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
|
||||||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse;
|
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse;
|
||||||
|
@ -135,6 +137,17 @@ public class SnapshotIT extends ESRestHighLevelClientTestCase {
|
||||||
assertThat(response.getNodes().size(), equalTo(1));
|
assertThat(response.getNodes().size(), equalTo(1));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testCleanupRepository() throws IOException {
|
||||||
|
AcknowledgedResponse putRepositoryResponse = createTestRepository("test", FsRepository.TYPE, "{\"location\": \".\"}");
|
||||||
|
assertTrue(putRepositoryResponse.isAcknowledged());
|
||||||
|
|
||||||
|
CleanupRepositoryRequest request = new CleanupRepositoryRequest("test");
|
||||||
|
CleanupRepositoryResponse response = execute(request, highLevelClient().snapshot()::cleanupRepository,
|
||||||
|
highLevelClient().snapshot()::cleanupRepositoryAsync);
|
||||||
|
assertThat(response.result().bytes(), equalTo(0L));
|
||||||
|
assertThat(response.result().blobs(), equalTo(0L));
|
||||||
|
}
|
||||||
|
|
||||||
public void testCreateSnapshot() throws IOException {
|
public void testCreateSnapshot() throws IOException {
|
||||||
String repository = "test_repository";
|
String repository = "test_repository";
|
||||||
assertTrue(createTestRepository(repository, FsRepository.TYPE, "{\"location\": \".\"}").isAcknowledged());
|
assertTrue(createTestRepository(repository, FsRepository.TYPE, "{\"location\": \".\"}").isAcknowledged());
|
||||||
|
|
|
@ -48,6 +48,7 @@ import org.elasticsearch.client.ml.DeleteForecastRequest;
|
||||||
import org.elasticsearch.client.ml.DeleteJobRequest;
|
import org.elasticsearch.client.ml.DeleteJobRequest;
|
||||||
import org.elasticsearch.client.ml.DeleteJobResponse;
|
import org.elasticsearch.client.ml.DeleteJobResponse;
|
||||||
import org.elasticsearch.client.ml.DeleteModelSnapshotRequest;
|
import org.elasticsearch.client.ml.DeleteModelSnapshotRequest;
|
||||||
|
import org.elasticsearch.client.ml.EstimateMemoryUsageResponse;
|
||||||
import org.elasticsearch.client.ml.EvaluateDataFrameRequest;
|
import org.elasticsearch.client.ml.EvaluateDataFrameRequest;
|
||||||
import org.elasticsearch.client.ml.EvaluateDataFrameResponse;
|
import org.elasticsearch.client.ml.EvaluateDataFrameResponse;
|
||||||
import org.elasticsearch.client.ml.FindFileStructureRequest;
|
import org.elasticsearch.client.ml.FindFileStructureRequest;
|
||||||
|
@ -177,7 +178,6 @@ import org.elasticsearch.search.aggregations.AggregatorFactories;
|
||||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||||
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
|
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
|
||||||
import org.elasticsearch.tasks.TaskId;
|
import org.elasticsearch.tasks.TaskId;
|
||||||
import org.hamcrest.CoreMatchers;
|
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
@ -194,11 +194,13 @@ import java.util.concurrent.CountDownLatch;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
|
import static org.hamcrest.Matchers.allOf;
|
||||||
import static org.hamcrest.Matchers.closeTo;
|
import static org.hamcrest.Matchers.closeTo;
|
||||||
import static org.hamcrest.Matchers.containsInAnyOrder;
|
import static org.hamcrest.Matchers.containsInAnyOrder;
|
||||||
import static org.hamcrest.Matchers.equalTo;
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
import static org.hamcrest.Matchers.greaterThan;
|
import static org.hamcrest.Matchers.greaterThan;
|
||||||
import static org.hamcrest.Matchers.hasSize;
|
import static org.hamcrest.Matchers.hasSize;
|
||||||
|
import static org.hamcrest.Matchers.lessThan;
|
||||||
import static org.hamcrest.core.Is.is;
|
import static org.hamcrest.core.Is.is;
|
||||||
|
|
||||||
public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
|
public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||||
|
@ -3175,16 +3177,16 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||||
BulkRequest bulkRequest =
|
BulkRequest bulkRequest =
|
||||||
new BulkRequest(indexName)
|
new BulkRequest(indexName)
|
||||||
.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)
|
.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)
|
||||||
.add(new IndexRequest().source(XContentType.JSON, "label", false, "p", 0.1)) // #0
|
.add(new IndexRequest().source(XContentType.JSON, "dataset", "blue", "label", false, "p", 0.1)) // #0
|
||||||
.add(new IndexRequest().source(XContentType.JSON, "label", false, "p", 0.2)) // #1
|
.add(new IndexRequest().source(XContentType.JSON, "dataset", "blue", "label", false, "p", 0.2)) // #1
|
||||||
.add(new IndexRequest().source(XContentType.JSON, "label", false, "p", 0.3)) // #2
|
.add(new IndexRequest().source(XContentType.JSON, "dataset", "blue", "label", false, "p", 0.3)) // #2
|
||||||
.add(new IndexRequest().source(XContentType.JSON, "label", false, "p", 0.4)) // #3
|
.add(new IndexRequest().source(XContentType.JSON, "dataset", "blue", "label", false, "p", 0.4)) // #3
|
||||||
.add(new IndexRequest().source(XContentType.JSON, "label", false, "p", 0.7)) // #4
|
.add(new IndexRequest().source(XContentType.JSON, "dataset", "blue", "label", false, "p", 0.7)) // #4
|
||||||
.add(new IndexRequest().source(XContentType.JSON, "label", true, "p", 0.2)) // #5
|
.add(new IndexRequest().source(XContentType.JSON, "dataset", "blue", "label", true, "p", 0.2)) // #5
|
||||||
.add(new IndexRequest().source(XContentType.JSON, "label", true, "p", 0.3)) // #6
|
.add(new IndexRequest().source(XContentType.JSON, "dataset", "blue", "label", true, "p", 0.3)) // #6
|
||||||
.add(new IndexRequest().source(XContentType.JSON, "label", true, "p", 0.4)) // #7
|
.add(new IndexRequest().source(XContentType.JSON, "dataset", "blue", "label", true, "p", 0.4)) // #7
|
||||||
.add(new IndexRequest().source(XContentType.JSON, "label", true, "p", 0.8)) // #8
|
.add(new IndexRequest().source(XContentType.JSON, "dataset", "blue", "label", true, "p", 0.8)) // #8
|
||||||
.add(new IndexRequest().source(XContentType.JSON, "label", true, "p", 0.9)); // #9
|
.add(new IndexRequest().source(XContentType.JSON, "dataset", "blue", "label", true, "p", 0.9)); // #9
|
||||||
RestHighLevelClient client = highLevelClient();
|
RestHighLevelClient client = highLevelClient();
|
||||||
client.indices().create(createIndexRequest, RequestOptions.DEFAULT);
|
client.indices().create(createIndexRequest, RequestOptions.DEFAULT);
|
||||||
client.bulk(bulkRequest, RequestOptions.DEFAULT);
|
client.bulk(bulkRequest, RequestOptions.DEFAULT);
|
||||||
|
@ -3192,14 +3194,15 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||||
// tag::evaluate-data-frame-request
|
// tag::evaluate-data-frame-request
|
||||||
EvaluateDataFrameRequest request = new EvaluateDataFrameRequest( // <1>
|
EvaluateDataFrameRequest request = new EvaluateDataFrameRequest( // <1>
|
||||||
indexName, // <2>
|
indexName, // <2>
|
||||||
new BinarySoftClassification( // <3>
|
new QueryConfig(QueryBuilders.termQuery("dataset", "blue")), // <3>
|
||||||
"label", // <4>
|
new BinarySoftClassification( // <4>
|
||||||
"p", // <5>
|
"label", // <5>
|
||||||
// Evaluation metrics // <6>
|
"p", // <6>
|
||||||
PrecisionMetric.at(0.4, 0.5, 0.6), // <7>
|
// Evaluation metrics // <7>
|
||||||
RecallMetric.at(0.5, 0.7), // <8>
|
PrecisionMetric.at(0.4, 0.5, 0.6), // <8>
|
||||||
ConfusionMatrixMetric.at(0.5), // <9>
|
RecallMetric.at(0.5, 0.7), // <9>
|
||||||
AucRocMetric.withCurve())); // <10>
|
ConfusionMatrixMetric.at(0.5), // <10>
|
||||||
|
AucRocMetric.withCurve())); // <11>
|
||||||
// end::evaluate-data-frame-request
|
// end::evaluate-data-frame-request
|
||||||
|
|
||||||
// tag::evaluate-data-frame-execute
|
// tag::evaluate-data-frame-execute
|
||||||
|
@ -3220,14 +3223,15 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||||
metrics.stream().map(m -> m.getMetricName()).collect(Collectors.toList()),
|
metrics.stream().map(m -> m.getMetricName()).collect(Collectors.toList()),
|
||||||
containsInAnyOrder(PrecisionMetric.NAME, RecallMetric.NAME, ConfusionMatrixMetric.NAME, AucRocMetric.NAME));
|
containsInAnyOrder(PrecisionMetric.NAME, RecallMetric.NAME, ConfusionMatrixMetric.NAME, AucRocMetric.NAME));
|
||||||
assertThat(precision, closeTo(0.6, 1e-9));
|
assertThat(precision, closeTo(0.6, 1e-9));
|
||||||
assertThat(confusionMatrix.getTruePositives(), CoreMatchers.equalTo(2L)); // docs #8 and #9
|
assertThat(confusionMatrix.getTruePositives(), equalTo(2L)); // docs #8 and #9
|
||||||
assertThat(confusionMatrix.getFalsePositives(), CoreMatchers.equalTo(1L)); // doc #4
|
assertThat(confusionMatrix.getFalsePositives(), equalTo(1L)); // doc #4
|
||||||
assertThat(confusionMatrix.getTrueNegatives(), CoreMatchers.equalTo(4L)); // docs #0, #1, #2 and #3
|
assertThat(confusionMatrix.getTrueNegatives(), equalTo(4L)); // docs #0, #1, #2 and #3
|
||||||
assertThat(confusionMatrix.getFalseNegatives(), CoreMatchers.equalTo(3L)); // docs #5, #6 and #7
|
assertThat(confusionMatrix.getFalseNegatives(), equalTo(3L)); // docs #5, #6 and #7
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
EvaluateDataFrameRequest request = new EvaluateDataFrameRequest(
|
EvaluateDataFrameRequest request = new EvaluateDataFrameRequest(
|
||||||
indexName,
|
indexName,
|
||||||
|
new QueryConfig(QueryBuilders.termQuery("dataset", "blue")),
|
||||||
new BinarySoftClassification(
|
new BinarySoftClassification(
|
||||||
"label",
|
"label",
|
||||||
"p",
|
"p",
|
||||||
|
@ -3262,6 +3266,72 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testEstimateMemoryUsage() throws Exception {
|
||||||
|
createIndex("estimate-test-source-index");
|
||||||
|
BulkRequest bulkRequest =
|
||||||
|
new BulkRequest("estimate-test-source-index")
|
||||||
|
.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
|
||||||
|
for (int i = 0; i < 10; ++i) {
|
||||||
|
bulkRequest.add(new IndexRequest().source(XContentType.JSON, "timestamp", 123456789L, "total", 10L));
|
||||||
|
}
|
||||||
|
RestHighLevelClient client = highLevelClient();
|
||||||
|
client.bulk(bulkRequest, RequestOptions.DEFAULT);
|
||||||
|
{
|
||||||
|
// tag::estimate-memory-usage-request
|
||||||
|
DataFrameAnalyticsConfig config = DataFrameAnalyticsConfig.builder()
|
||||||
|
.setSource(DataFrameAnalyticsSource.builder().setIndex("estimate-test-source-index").build())
|
||||||
|
.setAnalysis(OutlierDetection.createDefault())
|
||||||
|
.build();
|
||||||
|
PutDataFrameAnalyticsRequest request = new PutDataFrameAnalyticsRequest(config); // <1>
|
||||||
|
// end::estimate-memory-usage-request
|
||||||
|
|
||||||
|
// tag::estimate-memory-usage-execute
|
||||||
|
EstimateMemoryUsageResponse response = client.machineLearning().estimateMemoryUsage(request, RequestOptions.DEFAULT);
|
||||||
|
// end::estimate-memory-usage-execute
|
||||||
|
|
||||||
|
// tag::estimate-memory-usage-response
|
||||||
|
ByteSizeValue expectedMemoryWithoutDisk = response.getExpectedMemoryWithoutDisk(); // <1>
|
||||||
|
ByteSizeValue expectedMemoryWithDisk = response.getExpectedMemoryWithDisk(); // <2>
|
||||||
|
// end::estimate-memory-usage-response
|
||||||
|
|
||||||
|
// We are pretty liberal here as this test does not aim at verifying concrete numbers but rather end-to-end user workflow.
|
||||||
|
ByteSizeValue lowerBound = new ByteSizeValue(1, ByteSizeUnit.KB);
|
||||||
|
ByteSizeValue upperBound = new ByteSizeValue(1, ByteSizeUnit.GB);
|
||||||
|
assertThat(expectedMemoryWithoutDisk, allOf(greaterThan(lowerBound), lessThan(upperBound)));
|
||||||
|
assertThat(expectedMemoryWithDisk, allOf(greaterThan(lowerBound), lessThan(upperBound)));
|
||||||
|
}
|
||||||
|
{
|
||||||
|
DataFrameAnalyticsConfig config = DataFrameAnalyticsConfig.builder()
|
||||||
|
.setSource(DataFrameAnalyticsSource.builder().setIndex("estimate-test-source-index").build())
|
||||||
|
.setAnalysis(OutlierDetection.createDefault())
|
||||||
|
.build();
|
||||||
|
PutDataFrameAnalyticsRequest request = new PutDataFrameAnalyticsRequest(config);
|
||||||
|
// tag::estimate-memory-usage-execute-listener
|
||||||
|
ActionListener<EstimateMemoryUsageResponse> listener = new ActionListener<EstimateMemoryUsageResponse>() {
|
||||||
|
@Override
|
||||||
|
public void onResponse(EstimateMemoryUsageResponse response) {
|
||||||
|
// <1>
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void onFailure(Exception e) {
|
||||||
|
// <2>
|
||||||
|
}
|
||||||
|
};
|
||||||
|
// end::estimate-memory-usage-execute-listener
|
||||||
|
|
||||||
|
// Replace the empty listener by a blocking listener in test
|
||||||
|
final CountDownLatch latch = new CountDownLatch(1);
|
||||||
|
listener = new LatchedActionListener<>(listener, latch);
|
||||||
|
|
||||||
|
// tag::estimate-memory-usage-execute-async
|
||||||
|
client.machineLearning().estimateMemoryUsageAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||||
|
// end::estimate-memory-usage-execute-async
|
||||||
|
|
||||||
|
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public void testCreateFilter() throws Exception {
|
public void testCreateFilter() throws Exception {
|
||||||
RestHighLevelClient client = highLevelClient();
|
RestHighLevelClient client = highLevelClient();
|
||||||
{
|
{
|
||||||
|
|
|
@ -0,0 +1,82 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
package org.elasticsearch.client.ml;
|
||||||
|
|
||||||
|
import org.elasticsearch.client.ml.dataframe.QueryConfig;
|
||||||
|
import org.elasticsearch.client.ml.dataframe.evaluation.Evaluation;
|
||||||
|
import org.elasticsearch.client.ml.dataframe.evaluation.MlEvaluationNamedXContentProvider;
|
||||||
|
import org.elasticsearch.client.ml.dataframe.evaluation.regression.RegressionTests;
|
||||||
|
import org.elasticsearch.client.ml.dataframe.evaluation.softclassification.BinarySoftClassificationTests;
|
||||||
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||||
|
import org.elasticsearch.common.xcontent.XContentParser;
|
||||||
|
import org.elasticsearch.index.query.QueryBuilders;
|
||||||
|
import org.elasticsearch.search.SearchModule;
|
||||||
|
import org.elasticsearch.test.AbstractXContentTestCase;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.function.Predicate;
|
||||||
|
|
||||||
|
public class EvaluateDataFrameRequestTests extends AbstractXContentTestCase<EvaluateDataFrameRequest> {
|
||||||
|
|
||||||
|
public static EvaluateDataFrameRequest createRandom() {
|
||||||
|
int indicesCount = randomIntBetween(1, 5);
|
||||||
|
List<String> indices = new ArrayList<>(indicesCount);
|
||||||
|
for (int i = 0; i < indicesCount; i++) {
|
||||||
|
indices.add(randomAlphaOfLength(10));
|
||||||
|
}
|
||||||
|
QueryConfig queryConfig = randomBoolean()
|
||||||
|
? new QueryConfig(QueryBuilders.termQuery(randomAlphaOfLength(10), randomAlphaOfLength(10)))
|
||||||
|
: null;
|
||||||
|
Evaluation evaluation = randomBoolean() ? BinarySoftClassificationTests.createRandom() : RegressionTests.createRandom();
|
||||||
|
return new EvaluateDataFrameRequest(indices, queryConfig, evaluation);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected EvaluateDataFrameRequest createTestInstance() {
|
||||||
|
return createRandom();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected EvaluateDataFrameRequest doParseInstance(XContentParser parser) throws IOException {
|
||||||
|
return EvaluateDataFrameRequest.fromXContent(parser);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected boolean supportsUnknownFields() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected Predicate<String> getRandomFieldsExcludeFilter() {
|
||||||
|
// allow unknown fields in root only
|
||||||
|
return field -> !field.isEmpty();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected NamedXContentRegistry xContentRegistry() {
|
||||||
|
List<NamedXContentRegistry.Entry> namedXContent = new ArrayList<>();
|
||||||
|
namedXContent.addAll(new SearchModule(Settings.EMPTY, false, Collections.emptyList()).getNamedXContents());
|
||||||
|
namedXContent.addAll(new MlEvaluationNamedXContentProvider().getNamedXContentParsers());
|
||||||
|
return new NamedXContentRegistry(namedXContent);
|
||||||
|
}
|
||||||
|
}
|
|
@ -36,8 +36,7 @@ public class RegressionTests extends AbstractXContentTestCase<Regression> {
|
||||||
return new NamedXContentRegistry(new MlEvaluationNamedXContentProvider().getNamedXContentParsers());
|
return new NamedXContentRegistry(new MlEvaluationNamedXContentProvider().getNamedXContentParsers());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
public static Regression createRandom() {
|
||||||
protected Regression createTestInstance() {
|
|
||||||
List<EvaluationMetric> metrics = new ArrayList<>();
|
List<EvaluationMetric> metrics = new ArrayList<>();
|
||||||
if (randomBoolean()) {
|
if (randomBoolean()) {
|
||||||
metrics.add(new MeanSquaredErrorMetric());
|
metrics.add(new MeanSquaredErrorMetric());
|
||||||
|
@ -50,6 +49,11 @@ public class RegressionTests extends AbstractXContentTestCase<Regression> {
|
||||||
new Regression(randomAlphaOfLength(10), randomAlphaOfLength(10), metrics.isEmpty() ? null : metrics);
|
new Regression(randomAlphaOfLength(10), randomAlphaOfLength(10), metrics.isEmpty() ? null : metrics);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected Regression createTestInstance() {
|
||||||
|
return createRandom();
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected Regression doParseInstance(XContentParser parser) throws IOException {
|
protected Regression doParseInstance(XContentParser parser) throws IOException {
|
||||||
return Regression.fromXContent(parser);
|
return Regression.fromXContent(parser);
|
||||||
|
|
|
@ -37,8 +37,7 @@ public class BinarySoftClassificationTests extends AbstractXContentTestCase<Bina
|
||||||
return new NamedXContentRegistry(new MlEvaluationNamedXContentProvider().getNamedXContentParsers());
|
return new NamedXContentRegistry(new MlEvaluationNamedXContentProvider().getNamedXContentParsers());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
public static BinarySoftClassification createRandom() {
|
||||||
protected BinarySoftClassification createTestInstance() {
|
|
||||||
List<EvaluationMetric> metrics = new ArrayList<>();
|
List<EvaluationMetric> metrics = new ArrayList<>();
|
||||||
if (randomBoolean()) {
|
if (randomBoolean()) {
|
||||||
metrics.add(new AucRocMetric(randomBoolean()));
|
metrics.add(new AucRocMetric(randomBoolean()));
|
||||||
|
@ -66,6 +65,11 @@ public class BinarySoftClassificationTests extends AbstractXContentTestCase<Bina
|
||||||
new BinarySoftClassification(randomAlphaOfLength(10), randomAlphaOfLength(10), metrics.isEmpty() ? null : metrics);
|
new BinarySoftClassification(randomAlphaOfLength(10), randomAlphaOfLength(10), metrics.isEmpty() ? null : metrics);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected BinarySoftClassification createTestInstance() {
|
||||||
|
return createRandom();
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected BinarySoftClassification doParseInstance(XContentParser parser) throws IOException {
|
protected BinarySoftClassification doParseInstance(XContentParser parser) throws IOException {
|
||||||
return BinarySoftClassification.fromXContent(parser);
|
return BinarySoftClassification.fromXContent(parser);
|
||||||
|
|
|
@ -80,22 +80,8 @@ if [ ! -x "$DAEMON" ]; then
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
checkJava() {
|
|
||||||
if [ ! -z "${JAVA_HOME}" ]; then
|
|
||||||
JAVA="${JAVA_HOME}"/bin/java
|
|
||||||
else
|
|
||||||
JAVA="${ES_HOME}"/jdk/bin/java
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ ! -x "$JAVA" ]; then
|
|
||||||
echo "could not find java in JAVA_HOME or bundled at ${JAVA}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
case "$1" in
|
case "$1" in
|
||||||
start)
|
start)
|
||||||
checkJava
|
|
||||||
|
|
||||||
log_daemon_msg "Starting $DESC"
|
log_daemon_msg "Starting $DESC"
|
||||||
|
|
||||||
|
|
|
@ -67,21 +67,7 @@ if [ ! -x "$exec" ]; then
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
checkJava() {
|
|
||||||
if [ ! -z "${JAVA_HOME}" ]; then
|
|
||||||
JAVA="${JAVA_HOME}"/bin/java
|
|
||||||
else
|
|
||||||
JAVA="${ES_HOME}"/jdk/bin/java
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ ! -x "$JAVA" ]; then
|
|
||||||
echo "could not find java in JAVA_HOME or bundled at ${JAVA}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
start() {
|
start() {
|
||||||
checkJava
|
|
||||||
[ -x $exec ] || exit 5
|
[ -x $exec ] || exit 5
|
||||||
|
|
||||||
if [ -n "$MAX_OPEN_FILES" ]; then
|
if [ -n "$MAX_OPEN_FILES" ]; then
|
||||||
|
|
|
@ -66,7 +66,7 @@ class ListPluginsCommand extends EnvironmentAwareCommand {
|
||||||
PluginInfo info = PluginInfo.readFromProperties(env.pluginsFile().resolve(plugin));
|
PluginInfo info = PluginInfo.readFromProperties(env.pluginsFile().resolve(plugin));
|
||||||
terminal.println(Terminal.Verbosity.VERBOSE, info.toString(prefix));
|
terminal.println(Terminal.Verbosity.VERBOSE, info.toString(prefix));
|
||||||
if (info.getElasticsearchVersion().equals(Version.CURRENT) == false) {
|
if (info.getElasticsearchVersion().equals(Version.CURRENT) == false) {
|
||||||
terminal.println("WARNING: plugin [" + info.getName() + "] was built for Elasticsearch version " + info.getVersion() +
|
terminal.errorPrintln("WARNING: plugin [" + info.getName() + "] was built for Elasticsearch version " + info.getVersion() +
|
||||||
" but version " + Version.CURRENT + " is required");
|
" but version " + Version.CURRENT + " is required");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -789,7 +789,7 @@ public class InstallPluginCommandTests extends ESTestCase {
|
||||||
public void testBatchFlag() throws Exception {
|
public void testBatchFlag() throws Exception {
|
||||||
MockTerminal terminal = new MockTerminal();
|
MockTerminal terminal = new MockTerminal();
|
||||||
installPlugin(terminal, true);
|
installPlugin(terminal, true);
|
||||||
assertThat(terminal.getOutput(), containsString("WARNING: plugin requires additional permissions"));
|
assertThat(terminal.getErrorOutput(), containsString("WARNING: plugin requires additional permissions"));
|
||||||
assertThat(terminal.getOutput(), containsString("-> Downloading"));
|
assertThat(terminal.getOutput(), containsString("-> Downloading"));
|
||||||
// No progress bar in batch mode
|
// No progress bar in batch mode
|
||||||
assertThat(terminal.getOutput(), not(containsString("100%")));
|
assertThat(terminal.getOutput(), not(containsString("100%")));
|
||||||
|
@ -1225,7 +1225,7 @@ public class InstallPluginCommandTests extends ESTestCase {
|
||||||
UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1()));
|
UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1()));
|
||||||
assertEquals("installation aborted by user", e.getMessage());
|
assertEquals("installation aborted by user", e.getMessage());
|
||||||
|
|
||||||
assertThat(terminal.getOutput(), containsString("WARNING: " + warning));
|
assertThat(terminal.getErrorOutput(), containsString("WARNING: " + warning));
|
||||||
try (Stream<Path> fileStream = Files.list(env.v2().pluginsFile())) {
|
try (Stream<Path> fileStream = Files.list(env.v2().pluginsFile())) {
|
||||||
assertThat(fileStream.collect(Collectors.toList()), empty());
|
assertThat(fileStream.collect(Collectors.toList()), empty());
|
||||||
}
|
}
|
||||||
|
@ -1238,7 +1238,7 @@ public class InstallPluginCommandTests extends ESTestCase {
|
||||||
terminal.addTextInput("n");
|
terminal.addTextInput("n");
|
||||||
e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1()));
|
e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1()));
|
||||||
assertEquals("installation aborted by user", e.getMessage());
|
assertEquals("installation aborted by user", e.getMessage());
|
||||||
assertThat(terminal.getOutput(), containsString("WARNING: " + warning));
|
assertThat(terminal.getErrorOutput(), containsString("WARNING: " + warning));
|
||||||
try (Stream<Path> fileStream = Files.list(env.v2().pluginsFile())) {
|
try (Stream<Path> fileStream = Files.list(env.v2().pluginsFile())) {
|
||||||
assertThat(fileStream.collect(Collectors.toList()), empty());
|
assertThat(fileStream.collect(Collectors.toList()), empty());
|
||||||
}
|
}
|
||||||
|
@ -1251,7 +1251,7 @@ public class InstallPluginCommandTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
installPlugin(pluginZip, env.v1());
|
installPlugin(pluginZip, env.v1());
|
||||||
for (String warning : warnings) {
|
for (String warning : warnings) {
|
||||||
assertThat(terminal.getOutput(), containsString("WARNING: " + warning));
|
assertThat(terminal.getErrorOutput(), containsString("WARNING: " + warning));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -247,8 +247,11 @@ public class ListPluginsCommandTests extends ESTestCase {
|
||||||
MockTerminal terminal = listPlugins(home);
|
MockTerminal terminal = listPlugins(home);
|
||||||
String message = "plugin [fake_plugin1] was built for Elasticsearch version 1.0 but version " + Version.CURRENT + " is required";
|
String message = "plugin [fake_plugin1] was built for Elasticsearch version 1.0 but version " + Version.CURRENT + " is required";
|
||||||
assertEquals(
|
assertEquals(
|
||||||
"fake_plugin1\n" + "WARNING: " + message + "\n" + "fake_plugin2\n",
|
"fake_plugin1\nfake_plugin2\n",
|
||||||
terminal.getOutput());
|
terminal.getOutput());
|
||||||
|
assertEquals(
|
||||||
|
"WARNING: " + message + "\n",
|
||||||
|
terminal.getErrorOutput());
|
||||||
|
|
||||||
String[] params = {"-s"};
|
String[] params = {"-s"};
|
||||||
terminal = listPlugins(home, params);
|
terminal = listPlugins(home, params);
|
||||||
|
|
|
@ -237,11 +237,14 @@ public class RemovePluginCommandTests extends ESTestCase {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}.main(new String[] { "-Epath.home=" + home, "fake" }, terminal);
|
}.main(new String[] { "-Epath.home=" + home, "fake" }, terminal);
|
||||||
try (BufferedReader reader = new BufferedReader(new StringReader(terminal.getOutput()))) {
|
try (BufferedReader reader = new BufferedReader(new StringReader(terminal.getOutput()));
|
||||||
|
BufferedReader errorReader = new BufferedReader(new StringReader(terminal.getErrorOutput()))
|
||||||
|
) {
|
||||||
assertEquals("-> removing [fake]...", reader.readLine());
|
assertEquals("-> removing [fake]...", reader.readLine());
|
||||||
assertEquals("ERROR: plugin [fake] not found; run 'elasticsearch-plugin list' to get list of installed plugins",
|
assertEquals("ERROR: plugin [fake] not found; run 'elasticsearch-plugin list' to get list of installed plugins",
|
||||||
reader.readLine());
|
errorReader.readLine());
|
||||||
assertNull(reader.readLine());
|
assertNull(reader.readLine());
|
||||||
|
assertNull(errorReader.readLine());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,35 @@
|
||||||
|
--
|
||||||
|
:api: estimate-memory-usage
|
||||||
|
:request: PutDataFrameAnalyticsRequest
|
||||||
|
:response: EstimateMemoryUsageResponse
|
||||||
|
--
|
||||||
|
[id="{upid}-{api}"]
|
||||||
|
=== Estimate memory usage API
|
||||||
|
|
||||||
|
The Estimate memory usage API is used to estimate memory usage of {dfanalytics}.
|
||||||
|
Estimation results can be used when deciding the appropriate value for `model_memory_limit` setting later on.
|
||||||
|
|
||||||
|
The API accepts an +{request}+ object and returns an +{response}+.
|
||||||
|
|
||||||
|
[id="{upid}-{api}-request"]
|
||||||
|
==== Estimate memory usage Request
|
||||||
|
|
||||||
|
["source","java",subs="attributes,callouts,macros"]
|
||||||
|
--------------------------------------------------
|
||||||
|
include-tagged::{doc-tests-file}[{api}-request]
|
||||||
|
--------------------------------------------------
|
||||||
|
<1> Constructing a new request containing a {dataframe-analytics-config} for which memory usage estimation should be performed
|
||||||
|
|
||||||
|
include::../execution.asciidoc[]
|
||||||
|
|
||||||
|
[id="{upid}-{api}-response"]
|
||||||
|
==== Response
|
||||||
|
|
||||||
|
The returned +{response}+ contains the memory usage estimates.
|
||||||
|
|
||||||
|
["source","java",subs="attributes,callouts,macros"]
|
||||||
|
--------------------------------------------------
|
||||||
|
include-tagged::{doc-tests-file}[{api}-response]
|
||||||
|
--------------------------------------------------
|
||||||
|
<1> Estimated memory usage under the assumption that the whole {dfanalytics} should happen in memory (i.e. without overflowing to disk).
|
||||||
|
<2> Estimated memory usage under the assumption that overflowing to disk is allowed during {dfanalytics}.
|
|
@ -18,14 +18,15 @@ include-tagged::{doc-tests-file}[{api}-request]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
<1> Constructing a new evaluation request
|
<1> Constructing a new evaluation request
|
||||||
<2> Reference to an existing index
|
<2> Reference to an existing index
|
||||||
<3> Kind of evaluation to perform
|
<3> The query with which to select data from indices
|
||||||
<4> Name of the field in the index. Its value denotes the actual (i.e. ground truth) label for an example. Must be either true or false
|
<4> Kind of evaluation to perform
|
||||||
<5> Name of the field in the index. Its value denotes the probability (as per some ML algorithm) of the example being classified as positive
|
<5> Name of the field in the index. Its value denotes the actual (i.e. ground truth) label for an example. Must be either true or false
|
||||||
<6> The remaining parameters are the metrics to be calculated based on the two fields described above.
|
<6> Name of the field in the index. Its value denotes the probability (as per some ML algorithm) of the example being classified as positive
|
||||||
<7> https://en.wikipedia.org/wiki/Precision_and_recall[Precision] calculated at thresholds: 0.4, 0.5 and 0.6
|
<7> The remaining parameters are the metrics to be calculated based on the two fields described above.
|
||||||
<8> https://en.wikipedia.org/wiki/Precision_and_recall[Recall] calculated at thresholds: 0.5 and 0.7
|
<8> https://en.wikipedia.org/wiki/Precision_and_recall[Precision] calculated at thresholds: 0.4, 0.5 and 0.6
|
||||||
<9> https://en.wikipedia.org/wiki/Confusion_matrix[Confusion matrix] calculated at threshold 0.5
|
<9> https://en.wikipedia.org/wiki/Precision_and_recall[Recall] calculated at thresholds: 0.5 and 0.7
|
||||||
<10> https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve[AuC ROC] calculated and the curve points returned
|
<10> https://en.wikipedia.org/wiki/Confusion_matrix[Confusion matrix] calculated at threshold 0.5
|
||||||
|
<11> https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve[AuC ROC] calculated and the curve points returned
|
||||||
|
|
||||||
include::../execution.asciidoc[]
|
include::../execution.asciidoc[]
|
||||||
|
|
||||||
|
|
|
@ -85,6 +85,7 @@ This page lists all the available search queries with their corresponding `Query
|
||||||
| {ref}/query-dsl-percolate-query.html[Percolate] | {percolate-ref}/PercolateQueryBuilder.html[PercolateQueryBuilder] |
|
| {ref}/query-dsl-percolate-query.html[Percolate] | {percolate-ref}/PercolateQueryBuilder.html[PercolateQueryBuilder] |
|
||||||
| {ref}/query-dsl-wrapper-query.html[Wrapper] | {query-ref}/WrapperQueryBuilder.html[WrapperQueryBuilder] | {query-ref}/QueryBuilders.html#wrapperQuery-java.lang.String-[QueryBuilders.wrapperQuery()]
|
| {ref}/query-dsl-wrapper-query.html[Wrapper] | {query-ref}/WrapperQueryBuilder.html[WrapperQueryBuilder] | {query-ref}/QueryBuilders.html#wrapperQuery-java.lang.String-[QueryBuilders.wrapperQuery()]
|
||||||
| {ref}/query-dsl-rank-feature-query.html[Rank Feature] | {mapper-extras-ref}/RankFeatureQuery.html[RankFeatureQueryBuilder] |
|
| {ref}/query-dsl-rank-feature-query.html[Rank Feature] | {mapper-extras-ref}/RankFeatureQuery.html[RankFeatureQueryBuilder] |
|
||||||
|
| {ref}/query-dsl-pinned-query.html[Pinned Query] | The PinnedQueryBuilder is packaged as part of the xpack-core module |
|
||||||
|======
|
|======
|
||||||
|
|
||||||
==== Span queries
|
==== Span queries
|
||||||
|
|
|
@ -295,6 +295,7 @@ The Java High Level REST Client supports the following Machine Learning APIs:
|
||||||
* <<{upid}-start-data-frame-analytics>>
|
* <<{upid}-start-data-frame-analytics>>
|
||||||
* <<{upid}-stop-data-frame-analytics>>
|
* <<{upid}-stop-data-frame-analytics>>
|
||||||
* <<{upid}-evaluate-data-frame>>
|
* <<{upid}-evaluate-data-frame>>
|
||||||
|
* <<{upid}-estimate-memory-usage>>
|
||||||
* <<{upid}-put-filter>>
|
* <<{upid}-put-filter>>
|
||||||
* <<{upid}-get-filters>>
|
* <<{upid}-get-filters>>
|
||||||
* <<{upid}-update-filter>>
|
* <<{upid}-update-filter>>
|
||||||
|
@ -346,6 +347,7 @@ include::ml/delete-data-frame-analytics.asciidoc[]
|
||||||
include::ml/start-data-frame-analytics.asciidoc[]
|
include::ml/start-data-frame-analytics.asciidoc[]
|
||||||
include::ml/stop-data-frame-analytics.asciidoc[]
|
include::ml/stop-data-frame-analytics.asciidoc[]
|
||||||
include::ml/evaluate-data-frame.asciidoc[]
|
include::ml/evaluate-data-frame.asciidoc[]
|
||||||
|
include::ml/estimate-memory-usage.asciidoc[]
|
||||||
include::ml/put-filter.asciidoc[]
|
include::ml/put-filter.asciidoc[]
|
||||||
include::ml/get-filters.asciidoc[]
|
include::ml/get-filters.asciidoc[]
|
||||||
include::ml/update-filter.asciidoc[]
|
include::ml/update-filter.asciidoc[]
|
||||||
|
|
|
@ -98,6 +98,39 @@ dictionary to `$ES_HOME/config/userdict_ja.txt`:
|
||||||
東京スカイツリー,東京 スカイツリー,トウキョウ スカイツリー,カスタム名詞
|
東京スカイツリー,東京 スカイツリー,トウキョウ スカイツリー,カスタム名詞
|
||||||
-----------------------
|
-----------------------
|
||||||
|
|
||||||
|
--
|
||||||
|
|
||||||
|
You can also inline the rules directly in the tokenizer definition using
|
||||||
|
the `user_dictionary_rules` option:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
--------------------------------------------------
|
||||||
|
PUT nori_sample
|
||||||
|
{
|
||||||
|
"settings": {
|
||||||
|
"index": {
|
||||||
|
"analysis": {
|
||||||
|
"tokenizer": {
|
||||||
|
"kuromoji_user_dict": {
|
||||||
|
"type": "kuromoji_tokenizer",
|
||||||
|
"mode": "extended",
|
||||||
|
"user_dictionary_rules": ["東京スカイツリー,東京 スカイツリー,トウキョウ スカイツリー,カスタム名詞"]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"analyzer": {
|
||||||
|
"my_analyzer": {
|
||||||
|
"type": "custom",
|
||||||
|
"tokenizer": "kuromoji_user_dict"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
--------------------------------------------------
|
||||||
|
// CONSOLE
|
||||||
|
--
|
||||||
|
|
||||||
`nbest_cost`/`nbest_examples`::
|
`nbest_cost`/`nbest_examples`::
|
||||||
+
|
+
|
||||||
--
|
--
|
||||||
|
|
|
@ -21,33 +21,11 @@ ability to "exclude" (`-`), for example: `test*,-test3`.
|
||||||
|
|
||||||
All multi index APIs support the following url query string parameters:
|
All multi index APIs support the following url query string parameters:
|
||||||
|
|
||||||
[horizontal]
|
include::{docdir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable]
|
||||||
`ignore_unavailable`::
|
|
||||||
|
|
||||||
Controls whether to ignore if any specified indices are unavailable,
|
include::{docdir}/rest-api/common-parms.asciidoc[tag=allow-no-indices]
|
||||||
including indices that don't exist or closed indices. Either `true` or `false`
|
|
||||||
can be specified.
|
|
||||||
|
|
||||||
`allow_no_indices`::
|
include::{docdir}/rest-api/common-parms.asciidoc[tag=expand-wildcards]
|
||||||
|
|
||||||
Controls whether to fail if a wildcard indices expression results in no
|
|
||||||
concrete indices. Either `true` or `false` can be specified. For example if
|
|
||||||
the wildcard expression `foo*` is specified and no indices are available that
|
|
||||||
start with `foo`, then depending on this setting the request will fail. This
|
|
||||||
setting is also applicable when `_all`, `*`, or no index has been specified. This
|
|
||||||
settings also applies for aliases, in case an alias points to a closed index.
|
|
||||||
|
|
||||||
`expand_wildcards`::
|
|
||||||
|
|
||||||
Controls what kind of concrete indices that wildcard indices expressions can expand
|
|
||||||
to. If `open` is specified then the wildcard expression is expanded to only
|
|
||||||
open indices. If `closed` is specified then the wildcard expression is
|
|
||||||
expanded only to closed indices. Also both values (`open,closed`) can be
|
|
||||||
specified to expand to all indices.
|
|
||||||
+
|
|
||||||
If `none` is specified then wildcard expansion will be disabled. If `all`
|
|
||||||
is specified, wildcard expressions will expand to all indices (this is equivalent
|
|
||||||
to specifying `open,closed`).
|
|
||||||
|
|
||||||
The defaults settings for the above parameters depend on the API being used.
|
The defaults settings for the above parameters depend on the API being used.
|
||||||
|
|
||||||
|
|
|
@ -79,11 +79,11 @@ The API returns the following response:
|
||||||
|
|
||||||
[source,txt]
|
[source,txt]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
alias index filter routing.index routing.search
|
alias index filter routing.index routing.search is_write_index
|
||||||
alias1 test1 - - -
|
alias1 test1 - - - -
|
||||||
alias2 test1 * - -
|
alias2 test1 * - - -
|
||||||
alias3 test1 - 1 1
|
alias3 test1 - 1 1 -
|
||||||
alias4 test1 - 2 1,2
|
alias4 test1 - 2 1,2 -
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// TESTRESPONSE[s/[*]/[*]/ non_json]
|
// TESTRESPONSE[s/[*]/[*]/ non_json]
|
||||||
|
|
||||||
|
|
|
@ -1,43 +1,25 @@
|
||||||
[[docs-delete]]
|
[[docs-delete]]
|
||||||
=== Delete API
|
=== Delete API
|
||||||
|
++++
|
||||||
|
<titleabbrev>Delete</titleabbrev>
|
||||||
|
++++
|
||||||
|
|
||||||
The delete API allows to delete a JSON document from a specific
|
Removes a JSON document from the specified index.
|
||||||
index based on its id. The following example deletes the JSON document
|
|
||||||
from an index called `twitter` with ID `1`:
|
|
||||||
|
|
||||||
[source,js]
|
[[docs-delete-api-request]]
|
||||||
--------------------------------------------------
|
==== {api-request-title}
|
||||||
DELETE /twitter/_doc/1
|
|
||||||
--------------------------------------------------
|
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:twitter]
|
|
||||||
|
|
||||||
The result of the above delete operation is:
|
`DELETE /<index>/_doc/<_id>`
|
||||||
|
|
||||||
[source,js]
|
[[docs-delete-api-desc]]
|
||||||
--------------------------------------------------
|
==== {api-description-title}
|
||||||
{
|
|
||||||
"_shards" : {
|
You use DELETE to remove a document from an index. You must specify the
|
||||||
"total" : 2,
|
index name and document ID.
|
||||||
"failed" : 0,
|
|
||||||
"successful" : 2
|
|
||||||
},
|
|
||||||
"_index" : "twitter",
|
|
||||||
"_type" : "_doc",
|
|
||||||
"_id" : "1",
|
|
||||||
"_version" : 2,
|
|
||||||
"_primary_term": 1,
|
|
||||||
"_seq_no": 5,
|
|
||||||
"result": "deleted"
|
|
||||||
}
|
|
||||||
--------------------------------------------------
|
|
||||||
// TESTRESPONSE[s/"successful" : 2/"successful" : 1/]
|
|
||||||
// TESTRESPONSE[s/"_primary_term" : 1/"_primary_term" : $body._primary_term/]
|
|
||||||
// TESTRESPONSE[s/"_seq_no" : 5/"_seq_no" : $body._seq_no/]
|
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
[[optimistic-concurrency-control-delete]]
|
[[optimistic-concurrency-control-delete]]
|
||||||
==== Optimistic concurrency control
|
===== Optimistic concurrency control
|
||||||
|
|
||||||
Delete operations can be made conditional and only be performed if the last
|
Delete operations can be made conditional and only be performed if the last
|
||||||
modification to the document was assigned the sequence number and primary
|
modification to the document was assigned the sequence number and primary
|
||||||
|
@ -47,7 +29,7 @@ and a status code of 409. See <<optimistic-concurrency-control>> for more detail
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
[[delete-versioning]]
|
[[delete-versioning]]
|
||||||
==== Versioning
|
===== Versioning
|
||||||
|
|
||||||
Each document indexed is versioned. When deleting a document, the `version` can
|
Each document indexed is versioned. When deleting a document, the `version` can
|
||||||
be specified to make sure the relevant document we are trying to delete is
|
be specified to make sure the relevant document we are trying to delete is
|
||||||
|
@ -60,11 +42,17 @@ determined by the `index.gc_deletes` index setting and defaults to 60 seconds.
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
[[delete-routing]]
|
[[delete-routing]]
|
||||||
==== Routing
|
===== Routing
|
||||||
|
|
||||||
|
If routing is used during indexing, the routing value also needs to be
|
||||||
|
specified to delete a document.
|
||||||
|
|
||||||
|
If the `_routing` mapping is set to `required` and no routing value is
|
||||||
|
specified, the delete API throws a `RoutingMissingException` and rejects
|
||||||
|
the request.
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
When indexing using the ability to control the routing, in order to
|
|
||||||
delete a document, the routing value should also be provided. For
|
|
||||||
example:
|
|
||||||
|
|
||||||
////
|
////
|
||||||
Example to delete with routing
|
Example to delete with routing
|
||||||
|
@ -87,26 +75,21 @@ DELETE /twitter/_doc/1?routing=kimchy
|
||||||
// CONSOLE
|
// CONSOLE
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
|
|
||||||
The above will delete a tweet with id `1`, but will be routed based on the
|
This request deletes the tweet with id `1`, but it is routed based on the
|
||||||
user. Note that issuing a delete without the correct routing will cause the
|
user. The document is not deleted if the correct routing is not specified.
|
||||||
document to not be deleted.
|
|
||||||
|
|
||||||
When the `_routing` mapping is set as `required` and no routing value is
|
|
||||||
specified, the delete API will throw a `RoutingMissingException` and reject
|
|
||||||
the request.
|
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
[[delete-index-creation]]
|
[[delete-index-creation]]
|
||||||
==== Automatic index creation
|
===== Automatic index creation
|
||||||
|
|
||||||
If an <<docs-index_,external versioning variant>> is used,
|
If an <<docs-index_,external versioning variant>> is used,
|
||||||
the delete operation automatically creates an index if it has not been
|
the delete operation automatically creates the specified index if it does not
|
||||||
created before (check out the <<indices-create-index,create index API>>
|
exist. For information about manually creating indices, see
|
||||||
for manually creating an index).
|
<<indices-create-index,create index API>>.
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
[[delete-distributed]]
|
[[delete-distributed]]
|
||||||
==== Distributed
|
===== Distributed
|
||||||
|
|
||||||
The delete operation gets hashed into a specific shard id. It then gets
|
The delete operation gets hashed into a specific shard id. It then gets
|
||||||
redirected into the primary shard within that id group, and replicated
|
redirected into the primary shard within that id group, and replicated
|
||||||
|
@ -114,7 +97,7 @@ redirected into the primary shard within that id group, and replicated
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
[[delete-wait-for-active-shards]]
|
[[delete-wait-for-active-shards]]
|
||||||
==== Wait For Active Shards
|
===== Wait for active shards
|
||||||
|
|
||||||
When making delete requests, you can set the `wait_for_active_shards`
|
When making delete requests, you can set the `wait_for_active_shards`
|
||||||
parameter to require a minimum number of shard copies to be active
|
parameter to require a minimum number of shard copies to be active
|
||||||
|
@ -124,15 +107,14 @@ example.
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
[[delete-refresh]]
|
[[delete-refresh]]
|
||||||
==== Refresh
|
===== Refresh
|
||||||
|
|
||||||
Control when the changes made by this request are visible to search. See
|
Control when the changes made by this request are visible to search. See
|
||||||
<<docs-refresh>>.
|
<<docs-refresh>>.
|
||||||
|
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
[[delete-timeout]]
|
[[delete-timeout]]
|
||||||
==== Timeout
|
===== Timeout
|
||||||
|
|
||||||
The primary shard assigned to perform the delete operation might not be
|
The primary shard assigned to perform the delete operation might not be
|
||||||
available when the delete operation is executed. Some reasons for this
|
available when the delete operation is executed. Some reasons for this
|
||||||
|
@ -149,3 +131,68 @@ DELETE /twitter/_doc/1?timeout=5m
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
// CONSOLE
|
||||||
// TEST[setup:twitter]
|
// TEST[setup:twitter]
|
||||||
|
|
||||||
|
[[docs-delete-api-path-params]]
|
||||||
|
==== {api-path-parms-title}
|
||||||
|
|
||||||
|
`<index>`::
|
||||||
|
(Required, string) Name of the target index.
|
||||||
|
|
||||||
|
`<_id>`::
|
||||||
|
(Required, string) Unique identifier for the document.
|
||||||
|
|
||||||
|
[[docs-delete-api-query-params]]
|
||||||
|
==== {api-query-parms-title}
|
||||||
|
|
||||||
|
include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-seq-no]
|
||||||
|
|
||||||
|
include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-primary-term]
|
||||||
|
|
||||||
|
include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-pipeline]
|
||||||
|
|
||||||
|
include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-refresh]
|
||||||
|
|
||||||
|
include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-routing]
|
||||||
|
|
||||||
|
include::{docdir}/rest-api/common-parms.asciidoc[tag=timeout]
|
||||||
|
|
||||||
|
include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-version]
|
||||||
|
|
||||||
|
include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-version-type]
|
||||||
|
|
||||||
|
include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-wait-for-active-shards]
|
||||||
|
|
||||||
|
[[docs-delete-api-example]]
|
||||||
|
==== {api-examples-title}
|
||||||
|
|
||||||
|
Delete the JSON document `1` from the `twitter` index:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
--------------------------------------------------
|
||||||
|
DELETE /twitter/_doc/1
|
||||||
|
--------------------------------------------------
|
||||||
|
// CONSOLE
|
||||||
|
// TEST[setup:twitter]
|
||||||
|
|
||||||
|
The API returns the following result:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
--------------------------------------------------
|
||||||
|
{
|
||||||
|
"_shards" : {
|
||||||
|
"total" : 2,
|
||||||
|
"failed" : 0,
|
||||||
|
"successful" : 2
|
||||||
|
},
|
||||||
|
"_index" : "twitter",
|
||||||
|
"_type" : "_doc",
|
||||||
|
"_id" : "1",
|
||||||
|
"_version" : 2,
|
||||||
|
"_primary_term": 1,
|
||||||
|
"_seq_no": 5,
|
||||||
|
"result": "deleted"
|
||||||
|
}
|
||||||
|
--------------------------------------------------
|
||||||
|
// TESTRESPONSE[s/"successful" : 2/"successful" : 1/]
|
||||||
|
// TESTRESPONSE[s/"_primary_term" : 1/"_primary_term" : $body._primary_term/]
|
||||||
|
// TESTRESPONSE[s/"_seq_no" : 5/"_seq_no" : $body._seq_no/]
|
||||||
|
|
|
@ -1,9 +1,235 @@
|
||||||
[[docs-get]]
|
[[docs-get]]
|
||||||
=== Get API
|
=== Get API
|
||||||
|
++++
|
||||||
|
<titleabbrev>Get</titleabbrev>
|
||||||
|
++++
|
||||||
|
|
||||||
The get API allows to get a JSON document from the index based on
|
Retrieves the specified JSON document from an index.
|
||||||
its id. The following example gets a JSON document from an index called
|
|
||||||
twitter with id valued 0:
|
[[docs-get-api-request]]
|
||||||
|
==== {api-request-title}
|
||||||
|
|
||||||
|
`GET <index>/_doc/<_id>`
|
||||||
|
|
||||||
|
`HEAD <index>/_doc/<_id>`
|
||||||
|
|
||||||
|
`GET <index>/_source/<_id>`
|
||||||
|
|
||||||
|
`HEAD <index>/_source/<_id>`
|
||||||
|
|
||||||
|
[[docs-get-api-desc]]
|
||||||
|
==== {api-description-title}
|
||||||
|
You use GET to retrieve a document and its source or stored fields from a
|
||||||
|
particular index. Use HEAD to verify that a document exists. You can
|
||||||
|
use the `_source` resource retrieve just the document source or verify
|
||||||
|
that it exists.
|
||||||
|
|
||||||
|
[float]
|
||||||
|
[[realtime]]
|
||||||
|
===== Realtime
|
||||||
|
|
||||||
|
By default, the get API is realtime, and is not affected by the refresh
|
||||||
|
rate of the index (when data will become visible for search). If a document
|
||||||
|
has been updated but is not yet refreshed, the get API will issue a refresh
|
||||||
|
call in-place to make the document visible. This will also make other documents
|
||||||
|
changed since the last refresh visible. In order to disable realtime GET,
|
||||||
|
one can set the `realtime` parameter to `false`.
|
||||||
|
|
||||||
|
[float]
|
||||||
|
[[get-source-filtering]]
|
||||||
|
===== Source filtering
|
||||||
|
|
||||||
|
By default, the get operation returns the contents of the `_source` field unless
|
||||||
|
you have used the `stored_fields` parameter or if the `_source` field is disabled.
|
||||||
|
You can turn off `_source` retrieval by using the `_source` parameter:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
--------------------------------------------------
|
||||||
|
GET twitter/_doc/0?_source=false
|
||||||
|
--------------------------------------------------
|
||||||
|
// CONSOLE
|
||||||
|
// TEST[setup:twitter]
|
||||||
|
|
||||||
|
If you only need one or two fields from the `_source`, use the `_source_includes`
|
||||||
|
or `_source_excludes` parameters to include or filter out particular fields.
|
||||||
|
This can be especially helpful with large documents where partial retrieval can
|
||||||
|
save on network overhead. Both parameters take a comma separated list
|
||||||
|
of fields or wildcard expressions. Example:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
--------------------------------------------------
|
||||||
|
GET twitter/_doc/0?_source_includes=*.id&_source_excludes=entities
|
||||||
|
--------------------------------------------------
|
||||||
|
// CONSOLE
|
||||||
|
// TEST[setup:twitter]
|
||||||
|
|
||||||
|
If you only want to specify includes, you can use a shorter notation:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
--------------------------------------------------
|
||||||
|
GET twitter/_doc/0?_source=*.id,retweeted
|
||||||
|
--------------------------------------------------
|
||||||
|
// CONSOLE
|
||||||
|
// TEST[setup:twitter]
|
||||||
|
|
||||||
|
[float]
|
||||||
|
[[get-routing]]
|
||||||
|
===== Routing
|
||||||
|
|
||||||
|
If routing is used during indexing, the routing value also needs to be
|
||||||
|
specified to retrieve a document. For example:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
--------------------------------------------------
|
||||||
|
GET twitter/_doc/2?routing=user1
|
||||||
|
--------------------------------------------------
|
||||||
|
// CONSOLE
|
||||||
|
// TEST[continued]
|
||||||
|
|
||||||
|
This request gets the tweet with id `2`, but it is routed based on the
|
||||||
|
user. The document is not fetched if the correct routing is not specified.
|
||||||
|
|
||||||
|
[float]
|
||||||
|
[[preference]]
|
||||||
|
===== Preference
|
||||||
|
|
||||||
|
Controls a `preference` of which shard replicas to execute the get
|
||||||
|
request on. By default, the operation is randomized between the shard
|
||||||
|
replicas.
|
||||||
|
|
||||||
|
The `preference` can be set to:
|
||||||
|
|
||||||
|
`_local`::
|
||||||
|
The operation will prefer to be executed on a local
|
||||||
|
allocated shard if possible.
|
||||||
|
|
||||||
|
Custom (string) value::
|
||||||
|
A custom value will be used to guarantee that
|
||||||
|
the same shards will be used for the same custom value. This can help
|
||||||
|
with "jumping values" when hitting different shards in different refresh
|
||||||
|
states. A sample value can be something like the web session id, or the
|
||||||
|
user name.
|
||||||
|
|
||||||
|
[float]
|
||||||
|
[[get-refresh]]
|
||||||
|
===== Refresh
|
||||||
|
|
||||||
|
The `refresh` parameter can be set to `true` in order to refresh the
|
||||||
|
relevant shard before the get operation and make it searchable. Setting
|
||||||
|
it to `true` should be done after careful thought and verification that
|
||||||
|
this does not cause a heavy load on the system (and slows down
|
||||||
|
indexing).
|
||||||
|
|
||||||
|
[float]
|
||||||
|
[[get-distributed]]
|
||||||
|
===== Distributed
|
||||||
|
|
||||||
|
The get operation gets hashed into a specific shard id. It then gets
|
||||||
|
redirected to one of the replicas within that shard id and returns the
|
||||||
|
result. The replicas are the primary shard and its replicas within that
|
||||||
|
shard id group. This means that the more replicas we have, the
|
||||||
|
better GET scaling we will have.
|
||||||
|
|
||||||
|
[float]
|
||||||
|
[[get-versioning]]
|
||||||
|
===== Versioning support
|
||||||
|
|
||||||
|
You can use the `version` parameter to retrieve the document only if
|
||||||
|
its current version is equal to the specified one. This behavior is the same
|
||||||
|
for all version types with the exception of version type `FORCE` which always
|
||||||
|
retrieves the document. Note that `FORCE` version type is deprecated.
|
||||||
|
|
||||||
|
Internally, Elasticsearch has marked the old document as deleted and added an
|
||||||
|
entirely new document. The old version of the document doesn’t disappear
|
||||||
|
immediately, although you won’t be able to access it. Elasticsearch cleans up
|
||||||
|
deleted documents in the background as you continue to index more data.
|
||||||
|
|
||||||
|
[[docs-get-api-path-params]]
|
||||||
|
==== {api-path-parms-title}
|
||||||
|
|
||||||
|
`<index>`::
|
||||||
|
(Required, string) Name of the index that contains the document.
|
||||||
|
|
||||||
|
`<_id>`::
|
||||||
|
(Required, string) Unique identifier of the document.
|
||||||
|
|
||||||
|
[[docs-get-api-query-params]]
|
||||||
|
==== {api-query-parms-title}
|
||||||
|
|
||||||
|
`preference`::
|
||||||
|
(Optional, string) Specify the node or shard the operation should
|
||||||
|
be performed on (default: random).
|
||||||
|
|
||||||
|
`realtime`::
|
||||||
|
(Optional, boolean) Set to `false` to disable real time GET
|
||||||
|
(default: `true`). See <<realtime>>.
|
||||||
|
|
||||||
|
include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-refresh]
|
||||||
|
|
||||||
|
include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-routing]
|
||||||
|
|
||||||
|
`stored_fields`::
|
||||||
|
(Optional, boolean) Set to `true` to retrieve the document fields stored in the
|
||||||
|
index rather than the document `_source` (default: `false`).
|
||||||
|
|
||||||
|
`_source`::
|
||||||
|
(Optional, list) Set to `false` to disable source retrieval (default: `true`).
|
||||||
|
You can also specify a comma-separated list of the fields
|
||||||
|
you want to retrieve.
|
||||||
|
|
||||||
|
`_source_excludes`::
|
||||||
|
(Optional, list) Specify the source fields you want to exclude.
|
||||||
|
|
||||||
|
`_source_includes`::
|
||||||
|
(Optional, list) Specify the source fields you want to retrieve.
|
||||||
|
|
||||||
|
include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-version]
|
||||||
|
|
||||||
|
include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-version-type]
|
||||||
|
|
||||||
|
[[docs-get-api-response-body]]
|
||||||
|
==== {api-response-body-title}
|
||||||
|
|
||||||
|
`_index`::
|
||||||
|
The name of the index the document belongs to.
|
||||||
|
|
||||||
|
`_type`::
|
||||||
|
The document type. {es} indices now support a single document type, `_doc`.
|
||||||
|
|
||||||
|
`_id`::
|
||||||
|
The unique identifier for the document.
|
||||||
|
|
||||||
|
`_version`::
|
||||||
|
The document version. Incremented each time the document is updated.
|
||||||
|
|
||||||
|
`_seq_no`::
|
||||||
|
The sequence number assigned to the document for the indexing
|
||||||
|
operation. Sequence numbers are used to ensure an older version of a document
|
||||||
|
doesn’t overwrite a newer version. See <<optimistic-concurrency-control-index>>.
|
||||||
|
|
||||||
|
`_primary_term`::
|
||||||
|
The primary term assigned to the document for the indexing operation.
|
||||||
|
See <<optimistic-concurrency-control-index>>.
|
||||||
|
|
||||||
|
`found`::
|
||||||
|
Indicates whether the document exists: `true` or `false`.
|
||||||
|
|
||||||
|
`_routing`::
|
||||||
|
The explicit routing, if set.
|
||||||
|
|
||||||
|
'_source'::
|
||||||
|
If `found` is `true`, contains the document data formatted in JSON.
|
||||||
|
Excluded if the `_source` parameter is set to `false` or the `stored_fields`
|
||||||
|
paramter is set to `true`.
|
||||||
|
|
||||||
|
'_fields'::
|
||||||
|
If the `stored_fields` parameter is set to `true` and `found` is
|
||||||
|
`true`, contains the document fields stored in the index.
|
||||||
|
|
||||||
|
[[docs-get-api-example]]
|
||||||
|
==== {api-examples-title}
|
||||||
|
|
||||||
|
Retrieve the JSON document with the `_id` 0 from the `twitter` index:
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
@ -12,7 +238,7 @@ GET twitter/_doc/0
|
||||||
// CONSOLE
|
// CONSOLE
|
||||||
// TEST[setup:twitter]
|
// TEST[setup:twitter]
|
||||||
|
|
||||||
The result of the above get operation is:
|
The API returns the following result:
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
@ -34,13 +260,7 @@ The result of the above get operation is:
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// TESTRESPONSE[s/"_seq_no" : \d+/"_seq_no" : $body._seq_no/ s/"_primary_term" : 1/"_primary_term" : $body._primary_term/]
|
// TESTRESPONSE[s/"_seq_no" : \d+/"_seq_no" : $body._seq_no/ s/"_primary_term" : 1/"_primary_term" : $body._primary_term/]
|
||||||
|
|
||||||
The above result includes the `_index`, `_id`, and `_version`
|
Check to see if a document with the `_id` 0 exists:
|
||||||
of the document we wish to retrieve, including the actual `_source`
|
|
||||||
of the document if it could be found (as indicated by the `found`
|
|
||||||
field in the response).
|
|
||||||
|
|
||||||
The API also allows to check for the existence of a document using
|
|
||||||
`HEAD`, for example:
|
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
@ -49,60 +269,50 @@ HEAD twitter/_doc/0
|
||||||
// CONSOLE
|
// CONSOLE
|
||||||
// TEST[setup:twitter]
|
// TEST[setup:twitter]
|
||||||
|
|
||||||
[float]
|
{es} returns a status code of `200 - OK` if the document exists, or
|
||||||
[[realtime]]
|
`404 - Not Found` if it doesn't.
|
||||||
==== Realtime
|
|
||||||
|
|
||||||
By default, the get API is realtime, and is not affected by the refresh
|
|
||||||
rate of the index (when data will become visible for search). If a document
|
|
||||||
has been updated but is not yet refreshed, the get API will issue a refresh
|
|
||||||
call in-place to make the document visible. This will also make other documents
|
|
||||||
changed since the last refresh visible. In order to disable realtime GET,
|
|
||||||
one can set the `realtime` parameter to `false`.
|
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
[[get-source-filtering]]
|
[[_source]]
|
||||||
==== Source filtering
|
===== Get the source field only
|
||||||
|
|
||||||
By default, the get operation returns the contents of the `_source` field unless
|
Use the `<index>/_source/<id>` resource to get
|
||||||
you have used the `stored_fields` parameter or if the `_source` field is disabled.
|
just the `_source` field of a document. For example:
|
||||||
You can turn off `_source` retrieval by using the `_source` parameter:
|
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET twitter/_doc/0?_source=false
|
GET twitter/_source/1
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
// CONSOLE
|
||||||
// TEST[setup:twitter]
|
// TEST[continued]
|
||||||
|
|
||||||
If you only need one or two fields from the complete `_source`, you can use the `_source_includes`
|
You can use the source filtering parameters to control which parts of the
|
||||||
and `_source_excludes` parameters to include or filter out the parts you need. This can be especially helpful
|
`_source` are returned:
|
||||||
with large documents where partial retrieval can save on network overhead. Both parameters take a comma separated list
|
|
||||||
of fields or wildcard expressions. Example:
|
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET twitter/_doc/0?_source_includes=*.id&_source_excludes=entities
|
GET twitter/_source/1/?_source_includes=*.id&_source_excludes=entities
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
// CONSOLE
|
||||||
// TEST[setup:twitter]
|
// TEST[continued]
|
||||||
|
|
||||||
If you only want to specify includes, you can use a shorter notation:
|
You can use HEAD with the `_source` endpoint to efficiently
|
||||||
|
test whether or not the document _source exists. A document's source is not
|
||||||
|
available if it is disabled in the <<mapping-source-field,mapping>>.
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET twitter/_doc/0?_source=*.id,retweeted
|
HEAD twitter/_source/1
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
// CONSOLE
|
||||||
// TEST[setup:twitter]
|
// TEST[continued]
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
[[get-stored-fields]]
|
[[get-stored-fields]]
|
||||||
==== Stored Fields
|
===== Get stored fields
|
||||||
|
|
||||||
The get operation allows specifying a set of stored fields that will be
|
Use the `stored_fields` parameter to specify the set of stored fields you want
|
||||||
returned by passing the `stored_fields` parameter.
|
to retrieve. Any requested fields that are not stored are ignored.
|
||||||
If the requested fields are not stored, they will be ignored.
|
|
||||||
Consider for instance the following mapping:
|
Consider for instance the following mapping:
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
||||||
|
@ -147,7 +357,7 @@ GET twitter/_doc/1?stored_fields=tags,counter
|
||||||
// CONSOLE
|
// CONSOLE
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
|
|
||||||
The result of the above get operation is:
|
The API returns the following result:
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
@ -168,11 +378,10 @@ The result of the above get operation is:
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// TESTRESPONSE[s/"_seq_no" : \d+/"_seq_no" : $body._seq_no/ s/"_primary_term" : 1/"_primary_term" : $body._primary_term/]
|
// TESTRESPONSE[s/"_seq_no" : \d+/"_seq_no" : $body._seq_no/ s/"_primary_term" : 1/"_primary_term" : $body._primary_term/]
|
||||||
|
|
||||||
|
|
||||||
Field values fetched from the document itself are always returned as an array.
|
Field values fetched from the document itself are always returned as an array.
|
||||||
Since the `counter` field is not stored the get request simply ignores it when trying to get the `stored_fields.`
|
Since the `counter` field is not stored, the get request ignores it.
|
||||||
|
|
||||||
It is also possible to retrieve metadata fields like the `_routing` field:
|
You can also retrieve metadata fields like the `_routing` field:
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
@ -192,7 +401,7 @@ GET twitter/_doc/2?routing=user1&stored_fields=tags,counter
|
||||||
// CONSOLE
|
// CONSOLE
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
|
|
||||||
The result of the above get operation is:
|
The API returns the following result:
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
@ -214,113 +423,5 @@ The result of the above get operation is:
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// TESTRESPONSE[s/"_seq_no" : \d+/"_seq_no" : $body._seq_no/ s/"_primary_term" : 1/"_primary_term" : $body._primary_term/]
|
// TESTRESPONSE[s/"_seq_no" : \d+/"_seq_no" : $body._seq_no/ s/"_primary_term" : 1/"_primary_term" : $body._primary_term/]
|
||||||
|
|
||||||
Also only leaf fields can be returned via the `stored_field` option. So object fields can't be returned and such requests
|
Only leaf fields can be retrieved with the `stored_field` option. Object fields
|
||||||
will fail.
|
can't be returned--if specified, the request fails.
|
||||||
|
|
||||||
[float]
|
|
||||||
[[_source]]
|
|
||||||
==== Getting the +_source+ directly
|
|
||||||
|
|
||||||
Use the `/{index}/_source/{id}` endpoint to get
|
|
||||||
just the `_source` field of the document,
|
|
||||||
without any additional content around it. For example:
|
|
||||||
|
|
||||||
[source,js]
|
|
||||||
--------------------------------------------------
|
|
||||||
GET twitter/_source/1
|
|
||||||
--------------------------------------------------
|
|
||||||
// CONSOLE
|
|
||||||
// TEST[continued]
|
|
||||||
|
|
||||||
You can also use the same source filtering parameters to control which parts of the `_source` will be returned:
|
|
||||||
|
|
||||||
[source,js]
|
|
||||||
--------------------------------------------------
|
|
||||||
GET twitter/_source/1/?_source_includes=*.id&_source_excludes=entities
|
|
||||||
--------------------------------------------------
|
|
||||||
// CONSOLE
|
|
||||||
// TEST[continued]
|
|
||||||
|
|
||||||
Note, there is also a HEAD variant for the _source endpoint to efficiently test for document _source existence.
|
|
||||||
An existing document will not have a _source if it is disabled in the <<mapping-source-field,mapping>>.
|
|
||||||
|
|
||||||
[source,js]
|
|
||||||
--------------------------------------------------
|
|
||||||
HEAD twitter/_source/1
|
|
||||||
--------------------------------------------------
|
|
||||||
// CONSOLE
|
|
||||||
// TEST[continued]
|
|
||||||
|
|
||||||
[float]
|
|
||||||
[[get-routing]]
|
|
||||||
==== Routing
|
|
||||||
|
|
||||||
When indexing using the ability to control the routing, in order to get
|
|
||||||
a document, the routing value should also be provided. For example:
|
|
||||||
|
|
||||||
[source,js]
|
|
||||||
--------------------------------------------------
|
|
||||||
GET twitter/_doc/2?routing=user1
|
|
||||||
--------------------------------------------------
|
|
||||||
// CONSOLE
|
|
||||||
// TEST[continued]
|
|
||||||
|
|
||||||
The above will get a tweet with id `2`, but will be routed based on the
|
|
||||||
user. Note that issuing a get without the correct routing will cause the
|
|
||||||
document not to be fetched.
|
|
||||||
|
|
||||||
[float]
|
|
||||||
[[preference]]
|
|
||||||
==== Preference
|
|
||||||
|
|
||||||
Controls a `preference` of which shard replicas to execute the get
|
|
||||||
request on. By default, the operation is randomized between the shard
|
|
||||||
replicas.
|
|
||||||
|
|
||||||
The `preference` can be set to:
|
|
||||||
|
|
||||||
`_local`::
|
|
||||||
The operation will prefer to be executed on a local
|
|
||||||
allocated shard if possible.
|
|
||||||
|
|
||||||
Custom (string) value::
|
|
||||||
A custom value will be used to guarantee that
|
|
||||||
the same shards will be used for the same custom value. This can help
|
|
||||||
with "jumping values" when hitting different shards in different refresh
|
|
||||||
states. A sample value can be something like the web session id, or the
|
|
||||||
user name.
|
|
||||||
|
|
||||||
[float]
|
|
||||||
[[get-refresh]]
|
|
||||||
==== Refresh
|
|
||||||
|
|
||||||
The `refresh` parameter can be set to `true` in order to refresh the
|
|
||||||
relevant shard before the get operation and make it searchable. Setting
|
|
||||||
it to `true` should be done after careful thought and verification that
|
|
||||||
this does not cause a heavy load on the system (and slows down
|
|
||||||
indexing).
|
|
||||||
|
|
||||||
[float]
|
|
||||||
[[get-distributed]]
|
|
||||||
==== Distributed
|
|
||||||
|
|
||||||
The get operation gets hashed into a specific shard id. It then gets
|
|
||||||
redirected to one of the replicas within that shard id and returns the
|
|
||||||
result. The replicas are the primary shard and its replicas within that
|
|
||||||
shard id group. This means that the more replicas we have, the
|
|
||||||
better GET scaling we will have.
|
|
||||||
|
|
||||||
|
|
||||||
[float]
|
|
||||||
[[get-versioning]]
|
|
||||||
==== Versioning support
|
|
||||||
|
|
||||||
You can use the `version` parameter to retrieve the document only if
|
|
||||||
its current version is equal to the specified one. This behavior is the same
|
|
||||||
for all version types with the exception of version type `FORCE` which always
|
|
||||||
retrieves the document. Note that `FORCE` version type is deprecated.
|
|
||||||
|
|
||||||
Internally, Elasticsearch has marked the old document as deleted and added an
|
|
||||||
entirely new document. The old version of the document doesn’t disappear
|
|
||||||
immediately, although you won’t be able to access it. Elasticsearch cleans up
|
|
||||||
deleted documents in the background as you continue to index more data.
|
|
||||||
|
|
|
@ -1,77 +1,144 @@
|
||||||
[[docs-index_]]
|
[[docs-index_]]
|
||||||
=== Index API
|
=== Index API
|
||||||
|
++++
|
||||||
|
<titleabbrev>Index</titleabbrev>
|
||||||
|
++++
|
||||||
|
|
||||||
IMPORTANT: See <<removal-of-types>>.
|
IMPORTANT: See <<removal-of-types>>.
|
||||||
|
|
||||||
The index API adds or updates a JSON document in a specific index,
|
Adds a JSON document to the specified index and makes
|
||||||
making it searchable. The following example inserts the JSON document
|
it searchable. If the document already exists,
|
||||||
into the "twitter" index with an id of 1:
|
updates the document and increments its version.
|
||||||
|
|
||||||
[source,js]
|
[[docs-index-api-request]]
|
||||||
--------------------------------------------------
|
==== {api-request-title}
|
||||||
PUT twitter/_doc/1
|
|
||||||
{
|
|
||||||
"user" : "kimchy",
|
|
||||||
"post_date" : "2009-11-15T14:12:12",
|
|
||||||
"message" : "trying out Elasticsearch"
|
|
||||||
}
|
|
||||||
--------------------------------------------------
|
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
The result of the above index operation is:
|
`PUT /<index>/_doc/<_id>`
|
||||||
|
|
||||||
[source,js]
|
`POST /<index>/_doc/`
|
||||||
--------------------------------------------------
|
|
||||||
{
|
|
||||||
"_shards" : {
|
|
||||||
"total" : 2,
|
|
||||||
"failed" : 0,
|
|
||||||
"successful" : 2
|
|
||||||
},
|
|
||||||
"_index" : "twitter",
|
|
||||||
"_type" : "_doc",
|
|
||||||
"_id" : "1",
|
|
||||||
"_version" : 1,
|
|
||||||
"_seq_no" : 0,
|
|
||||||
"_primary_term" : 1,
|
|
||||||
"result" : "created"
|
|
||||||
}
|
|
||||||
--------------------------------------------------
|
|
||||||
// TESTRESPONSE[s/"successful" : 2/"successful" : 1/]
|
|
||||||
|
|
||||||
The `_shards` header provides information about the replication process of the index operation:
|
`PUT /<index>/_create/<_id>`
|
||||||
|
|
||||||
`total`:: Indicates how many shard copies (primary and replica shards) the index operation should be executed on.
|
`POST /<index>/_create/<_id>`
|
||||||
`successful`:: Indicates the number of shard copies the index operation succeeded on.
|
|
||||||
`failed`:: An array that contains replication-related errors in the case an index operation failed on a replica shard.
|
|
||||||
|
|
||||||
The index operation is successful in the case `successful` is at least 1.
|
[[docs-index-api-path-params]]
|
||||||
|
==== {api-path-parms-title}
|
||||||
|
|
||||||
NOTE: Replica shards may not all be started when an indexing operation successfully returns (by default, only the
|
`<index>`::
|
||||||
primary is required, but this behavior can be <<index-wait-for-active-shards,changed>>). In that case,
|
(Required, string) Name of the target index. By default, the index is created
|
||||||
`total` will be equal to the total shards based on the `number_of_replicas` setting and `successful` will be
|
automatically if it doesn't exist. For more information, see <<index-creation>>.
|
||||||
equal to the number of shards started (primary plus replicas). If there were no failures, the `failed` will be 0.
|
|
||||||
|
`<_id>`::
|
||||||
|
(Optional, string) Unique identifier for the document. Required if you are
|
||||||
|
using a PUT request. Omit to automatically generate an ID when using a
|
||||||
|
POST request.
|
||||||
|
|
||||||
|
|
||||||
|
[[docs-index-api-query-params]]
|
||||||
|
==== {api-query-parms-title}
|
||||||
|
|
||||||
|
include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-seq-no]
|
||||||
|
|
||||||
|
include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-primary-term]
|
||||||
|
|
||||||
|
`op_type`::
|
||||||
|
(Optional, enum) Set to `create` to only index the document
|
||||||
|
if it does not already exist (_put if absent_). If a document with the specified
|
||||||
|
`_id` already exists, the indexing operation will fail. Same as using the
|
||||||
|
`<index>/_create` endpoint. Valid values: `index`, `create`. Default: `index`.
|
||||||
|
|
||||||
|
include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-pipeline]
|
||||||
|
|
||||||
|
include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-refresh]
|
||||||
|
|
||||||
|
include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-routing]
|
||||||
|
|
||||||
|
include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
|
||||||
|
|
||||||
|
include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-version]
|
||||||
|
|
||||||
|
include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-version-type]
|
||||||
|
|
||||||
|
include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-wait-for-active-shards]
|
||||||
|
|
||||||
|
[[docs-index-api-request-body]]
|
||||||
|
==== {api-request-body-title}
|
||||||
|
|
||||||
|
`<field>`::
|
||||||
|
(Required, string) Request body contains the JSON source for the document
|
||||||
|
data.
|
||||||
|
|
||||||
|
[[docs-index-api-response-body]]
|
||||||
|
==== {api-response-body-title}
|
||||||
|
|
||||||
|
`_shards`::
|
||||||
|
Provides information about the replication process of the index operation.
|
||||||
|
|
||||||
|
`_shards.total`::
|
||||||
|
Indicates how many shard copies (primary and replica shards) the index operation
|
||||||
|
should be executed on.
|
||||||
|
|
||||||
|
`_shards.successful`::
|
||||||
|
Indicates the number of shard copies the index operation succeeded on.
|
||||||
|
When the index operation is successful, `successful` is at least 1.
|
||||||
|
+
|
||||||
|
NOTE: Replica shards might not all be started when an indexing operation
|
||||||
|
returns successfully--by default, only the primary is required. Set
|
||||||
|
`wait_for_active_shards` to change this default behavior. See
|
||||||
|
<<index-wait-for-active-shards>>.
|
||||||
|
|
||||||
|
`_shards.failed`::
|
||||||
|
An array that contains replication-related errors in the case an index operation
|
||||||
|
failed on a replica shard. 0 indicates there were no failures.
|
||||||
|
|
||||||
|
`_index`::
|
||||||
|
The name of the index the document was added to.
|
||||||
|
|
||||||
|
`_type`::
|
||||||
|
The document type. {es} indices now support a single document type, `_doc`.
|
||||||
|
|
||||||
|
`_id`::
|
||||||
|
The unique identifier for the added document.
|
||||||
|
|
||||||
|
`_version`::
|
||||||
|
The document version. Incremented each time the document is updated.
|
||||||
|
|
||||||
|
`_seq_no`::
|
||||||
|
The sequence number assigned to the document for the indexing operation.
|
||||||
|
Sequence numbers are used to ensure an older version of a document
|
||||||
|
doesn’t overwrite a newer version. See <<optimistic-concurrency-control-index>>.
|
||||||
|
|
||||||
|
`_primary_term`::
|
||||||
|
The primary term assigned to the document for the indexing operation.
|
||||||
|
See <<optimistic-concurrency-control-index>>.
|
||||||
|
|
||||||
|
`result`::
|
||||||
|
The result of the indexing operation, `created` or `updated`.
|
||||||
|
|
||||||
|
[[docs-index-api-desc]]
|
||||||
|
==== {api-description-title}
|
||||||
|
|
||||||
|
You can index a new JSON document with the `_doc` or `_create` resource. Using
|
||||||
|
`_create` guarantees that the document is only indexed if it does not already
|
||||||
|
exist. To update an existing document, you must use the `_doc` resource.
|
||||||
|
|
||||||
[float]
|
|
||||||
[[index-creation]]
|
[[index-creation]]
|
||||||
==== Automatic Index Creation
|
===== Create indices automatically
|
||||||
|
|
||||||
The index operation automatically creates an index if it does not already
|
If the specified index does not already exist, by default the index operation
|
||||||
exist, and applies any <<indices-templates,index templates>> that are
|
automatically creates it and applies any configured
|
||||||
configured. The index operation also creates a dynamic mapping if one does not
|
<<indices-templates,index templates>>. If no mapping exists, the index opration
|
||||||
already exist. By default, new fields and objects will automatically be added
|
creates a dynamic mapping. By default, new fields and objects are
|
||||||
to the mapping definition if needed. Check out the <<mapping,mapping>> section
|
automatically added to the mapping if needed. For more information about field
|
||||||
for more information on mapping definitions, and the
|
mapping, see <<mapping,mapping>> and the <<indices-put-mapping,put mapping>> API.
|
||||||
<<indices-put-mapping,put mapping>> API for information about updating mappings
|
|
||||||
manually.
|
|
||||||
|
|
||||||
Automatic index creation is controlled by the `action.auto_create_index`
|
Automatic index creation is controlled by the `action.auto_create_index`
|
||||||
setting. This setting defaults to `true`, meaning that indices are always
|
setting. This setting defaults to `true`, which allows any index to be created
|
||||||
automatically created. Automatic index creation can be permitted only for
|
automatically. You can modify this setting to explicitly allow or block
|
||||||
indices matching certain patterns by changing the value of this setting to a
|
automatic creation of indices that match specified patterns, or set it to
|
||||||
comma-separated list of these patterns. It can also be explicitly permitted and
|
`false` to disable automatic index creation entirely. Specify a
|
||||||
forbidden by prefixing patterns in the list with a `+` or `-`. Finally it can
|
comma-separated list of patterns you want to allow, or prefix each pattern with
|
||||||
be completely disabled by changing this setting to `false`.
|
`+` or `-` to indicate whether it should be allowed or blocked.
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
@ -98,56 +165,30 @@ PUT _cluster/settings
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
// CONSOLE
|
||||||
|
|
||||||
<1> Permit only the auto-creation of indices called `twitter`, `index10`, no
|
<1> Allow auto-creation of indices called `twitter` or `index10`, block the
|
||||||
other index matching `index1*`, and any other index matching `ind*`. The
|
creation of indices that match the pattern `index1*`, and allow creation of
|
||||||
patterns are matched in the order in which they are given.
|
any other indices that match the `ind*` pattern. Patterns are matched in
|
||||||
|
the order specified.
|
||||||
|
|
||||||
<2> Completely disable the auto-creation of indices.
|
<2> Disable automatic index creation entirely.
|
||||||
|
|
||||||
<3> Permit the auto-creation of indices with any name. This is the default.
|
<3> Allow automatic creation of any index. This is the default.
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
[[operation-type]]
|
[[operation-type]]
|
||||||
==== Operation Type
|
===== Put if absent
|
||||||
|
|
||||||
The index operation also accepts an `op_type` that can be used to force
|
You can force a create operation by using the `_create` resource or
|
||||||
a `create` operation, allowing for "put-if-absent" behavior. When
|
setting the `op_type` parameter to _create_. In this case,
|
||||||
`create` is used, the index operation will fail if a document by that id
|
the index operation fails if a document with the specified ID
|
||||||
already exists in the index.
|
already exists in the index.
|
||||||
|
|
||||||
Here is an example of using the `op_type` parameter:
|
|
||||||
|
|
||||||
[source,js]
|
|
||||||
--------------------------------------------------
|
|
||||||
PUT twitter/_doc/1?op_type=create
|
|
||||||
{
|
|
||||||
"user" : "kimchy",
|
|
||||||
"post_date" : "2009-11-15T14:12:12",
|
|
||||||
"message" : "trying out Elasticsearch"
|
|
||||||
}
|
|
||||||
--------------------------------------------------
|
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
Another option to specify `create` is to use the following uri:
|
|
||||||
|
|
||||||
[source,js]
|
|
||||||
--------------------------------------------------
|
|
||||||
PUT twitter/_create/1
|
|
||||||
{
|
|
||||||
"user" : "kimchy",
|
|
||||||
"post_date" : "2009-11-15T14:12:12",
|
|
||||||
"message" : "trying out Elasticsearch"
|
|
||||||
}
|
|
||||||
--------------------------------------------------
|
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
==== Automatic ID Generation
|
===== Create document IDs automatically
|
||||||
|
|
||||||
The index operation can be executed without specifying the id. In such a
|
If you don't specify a document ID when using POST, the `op_type` is
|
||||||
case, an id will be generated automatically. In addition, the `op_type`
|
automatically set to `create` and the index operation generates a unique ID
|
||||||
will automatically be set to `create`. Here is an example (note the
|
for the document.
|
||||||
*POST* used instead of *PUT*):
|
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
@ -160,7 +201,7 @@ POST twitter/_doc/
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
// CONSOLE
|
||||||
|
|
||||||
The result of the above index operation is:
|
The API returns the following result:
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
@ -183,7 +224,7 @@ The result of the above index operation is:
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
[[optimistic-concurrency-control-index]]
|
[[optimistic-concurrency-control-index]]
|
||||||
==== Optimistic concurrency control
|
===== Optimistic concurrency control
|
||||||
|
|
||||||
Index operations can be made conditional and only be performed if the last
|
Index operations can be made conditional and only be performed if the last
|
||||||
modification to the document was assigned the sequence number and primary
|
modification to the document was assigned the sequence number and primary
|
||||||
|
@ -193,7 +234,7 @@ and a status code of 409. See <<optimistic-concurrency-control>> for more detail
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
[[index-routing]]
|
[[index-routing]]
|
||||||
==== Routing
|
===== Routing
|
||||||
|
|
||||||
By default, shard placement ? or `routing` ? is controlled by using a
|
By default, shard placement ? or `routing` ? is controlled by using a
|
||||||
hash of the document's id value. For more explicit control, the value
|
hash of the document's id value. For more explicit control, the value
|
||||||
|
@ -211,11 +252,11 @@ POST twitter/_doc?routing=kimchy
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
// CONSOLE
|
||||||
|
|
||||||
In the example above, the "_doc" document is routed to a shard based on
|
In this example, the document is routed to a shard based on
|
||||||
the `routing` parameter provided: "kimchy".
|
the `routing` parameter provided: "kimchy".
|
||||||
|
|
||||||
When setting up explicit mapping, the `_routing` field can be optionally
|
When setting up explicit mapping, you can also use the `_routing` field
|
||||||
used to direct the index operation to extract the routing value from the
|
to direct the index operation to extract the routing value from the
|
||||||
document itself. This does come at the (very minimal) cost of an
|
document itself. This does come at the (very minimal) cost of an
|
||||||
additional document parsing pass. If the `_routing` mapping is defined
|
additional document parsing pass. If the `_routing` mapping is defined
|
||||||
and set to be `required`, the index operation will fail if no routing
|
and set to be `required`, the index operation will fail if no routing
|
||||||
|
@ -223,7 +264,7 @@ value is provided or extracted.
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
[[index-distributed]]
|
[[index-distributed]]
|
||||||
==== Distributed
|
===== Distributed
|
||||||
|
|
||||||
The index operation is directed to the primary shard based on its route
|
The index operation is directed to the primary shard based on its route
|
||||||
(see the Routing section above) and performed on the actual node
|
(see the Routing section above) and performed on the actual node
|
||||||
|
@ -232,7 +273,7 @@ if needed, the update is distributed to applicable replicas.
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
[[index-wait-for-active-shards]]
|
[[index-wait-for-active-shards]]
|
||||||
==== Wait For Active Shards
|
===== Active shards
|
||||||
|
|
||||||
To improve the resiliency of writes to the system, indexing operations
|
To improve the resiliency of writes to the system, indexing operations
|
||||||
can be configured to wait for a certain number of active shard copies
|
can be configured to wait for a certain number of active shard copies
|
||||||
|
@ -290,14 +331,14 @@ replication succeeded/failed.
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
[[index-refresh]]
|
[[index-refresh]]
|
||||||
==== Refresh
|
===== Refresh
|
||||||
|
|
||||||
Control when the changes made by this request are visible to search. See
|
Control when the changes made by this request are visible to search. See
|
||||||
<<docs-refresh,refresh>>.
|
<<docs-refresh,refresh>>.
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
[[index-noop]]
|
[[index-noop]]
|
||||||
==== Noop Updates
|
===== Noop updates
|
||||||
|
|
||||||
When updating a document using the index API a new version of the document is
|
When updating a document using the index API a new version of the document is
|
||||||
always created even if the document hasn't changed. If this isn't acceptable
|
always created even if the document hasn't changed. If this isn't acceptable
|
||||||
|
@ -312,7 +353,7 @@ Elasticsearch runs on the shard receiving the updates.
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
[[timeout]]
|
[[timeout]]
|
||||||
==== Timeout
|
===== Timeout
|
||||||
|
|
||||||
The primary shard assigned to perform the index operation might not be
|
The primary shard assigned to perform the index operation might not be
|
||||||
available when the index operation is executed. Some reasons for this
|
available when the index operation is executed. Some reasons for this
|
||||||
|
@ -336,7 +377,7 @@ PUT twitter/_doc/1?timeout=5m
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
[[index-versioning]]
|
[[index-versioning]]
|
||||||
==== Versioning
|
===== Versioning
|
||||||
|
|
||||||
Each indexed document is given a version number. By default,
|
Each indexed document is given a version number. By default,
|
||||||
internal versioning is used that starts at 1 and increments
|
internal versioning is used that starts at 1 and increments
|
||||||
|
@ -363,11 +404,12 @@ PUT twitter/_doc/1?version=2&version_type=external
|
||||||
// CONSOLE
|
// CONSOLE
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
|
|
||||||
*NOTE:* Versioning is completely real time, and is not affected by the
|
NOTE: Versioning is completely real time, and is not affected by the
|
||||||
near real time aspects of search operations. If no version is provided,
|
near real time aspects of search operations. If no version is provided,
|
||||||
then the operation is executed without any version checks.
|
then the operation is executed without any version checks.
|
||||||
|
|
||||||
The above will succeed since the supplied version of 2 is higher than
|
In the previous example, the operation will succeed since the supplied
|
||||||
|
version of 2 is higher than
|
||||||
the current document version of 1. If the document was already updated
|
the current document version of 1. If the document was already updated
|
||||||
and its version was set to 2 or higher, the indexing command will fail
|
and its version was set to 2 or higher, the indexing command will fail
|
||||||
and result in a conflict (409 http status code).
|
and result in a conflict (409 http status code).
|
||||||
|
@ -381,12 +423,13 @@ latest version will be used if the index operations arrive out of order for
|
||||||
whatever reason.
|
whatever reason.
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
|
[[index-version-types]]
|
||||||
===== Version types
|
===== Version types
|
||||||
|
|
||||||
Next to the `external` version type explained above, Elasticsearch
|
In addition to the `external` version type, Elasticsearch
|
||||||
also supports other types for specific use cases. Here is an overview of
|
also supports other types for specific use cases:
|
||||||
the different version types and their semantics.
|
|
||||||
|
|
||||||
|
[[_version_types]]
|
||||||
`internal`:: Only index the document if the given version is identical to the version
|
`internal`:: Only index the document if the given version is identical to the version
|
||||||
of the stored document.
|
of the stored document.
|
||||||
|
|
||||||
|
@ -400,8 +443,72 @@ than the version of the stored document. If there is no existing document
|
||||||
the operation will succeed as well. The given version will be used as the new version
|
the operation will succeed as well. The given version will be used as the new version
|
||||||
and will be stored with the new document. The supplied version must be a non-negative long number.
|
and will be stored with the new document. The supplied version must be a non-negative long number.
|
||||||
|
|
||||||
*NOTE*: The `external_gte` version type is meant for special use cases and
|
NOTE: The `external_gte` version type is meant for special use cases and
|
||||||
should be used with care. If used incorrectly, it can result in loss of data.
|
should be used with care. If used incorrectly, it can result in loss of data.
|
||||||
There is another option, `force`, which is deprecated because it can cause
|
There is another option, `force`, which is deprecated because it can cause
|
||||||
primary and replica shards to diverge.
|
primary and replica shards to diverge.
|
||||||
|
|
||||||
|
[[docs-index-api-example]]
|
||||||
|
==== {api-examples-title}
|
||||||
|
|
||||||
|
Insert a JSON document into the `twitter` index with an `_id` of 1:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
--------------------------------------------------
|
||||||
|
PUT twitter/_doc/1
|
||||||
|
{
|
||||||
|
"user" : "kimchy",
|
||||||
|
"post_date" : "2009-11-15T14:12:12",
|
||||||
|
"message" : "trying out Elasticsearch"
|
||||||
|
}
|
||||||
|
--------------------------------------------------
|
||||||
|
// CONSOLE
|
||||||
|
|
||||||
|
The API returns the following result:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
--------------------------------------------------
|
||||||
|
{
|
||||||
|
"_shards" : {
|
||||||
|
"total" : 2,
|
||||||
|
"failed" : 0,
|
||||||
|
"successful" : 2
|
||||||
|
},
|
||||||
|
"_index" : "twitter",
|
||||||
|
"_type" : "_doc",
|
||||||
|
"_id" : "1",
|
||||||
|
"_version" : 1,
|
||||||
|
"_seq_no" : 0,
|
||||||
|
"_primary_term" : 1,
|
||||||
|
"result" : "created"
|
||||||
|
}
|
||||||
|
--------------------------------------------------
|
||||||
|
// TESTRESPONSE[s/"successful" : 2/"successful" : 1/]
|
||||||
|
|
||||||
|
Use the `_create` resource to index a document into the `twitter` index if
|
||||||
|
no document with that ID exists:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
--------------------------------------------------
|
||||||
|
PUT twitter/_create/1
|
||||||
|
{
|
||||||
|
"user" : "kimchy",
|
||||||
|
"post_date" : "2009-11-15T14:12:12",
|
||||||
|
"message" : "trying out Elasticsearch"
|
||||||
|
}
|
||||||
|
--------------------------------------------------
|
||||||
|
// CONSOLE
|
||||||
|
|
||||||
|
Set the `op_type` parameter to _create_ to index a document into the `twitter`
|
||||||
|
index if no document with that ID exists:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
--------------------------------------------------
|
||||||
|
PUT twitter/_doc/1?op_type=create
|
||||||
|
{
|
||||||
|
"user" : "kimchy",
|
||||||
|
"post_date" : "2009-11-15T14:12:12",
|
||||||
|
"message" : "trying out Elasticsearch"
|
||||||
|
}
|
||||||
|
--------------------------------------------------
|
||||||
|
// CONSOLE
|
||||||
|
|
|
@ -1,18 +1,86 @@
|
||||||
[[docs-update]]
|
[[docs-update]]
|
||||||
=== Update API
|
=== Update API
|
||||||
|
++++
|
||||||
|
<titleabbrev>Update</titleabbrev>
|
||||||
|
++++
|
||||||
|
|
||||||
The update API allows to update a document based on a script provided.
|
Updates a document using the specified script.
|
||||||
The operation gets the document (collocated with the shard) from the
|
|
||||||
index, runs the script (with optional script language and parameters),
|
|
||||||
and indexes back the result (also allows to delete, or ignore the
|
|
||||||
operation).
|
|
||||||
|
|
||||||
Note, this operation still means full reindex of the document, it just
|
[[docs-update-api-request]]
|
||||||
removes some network roundtrips and reduces chances of version conflicts
|
==== {api-request-title}
|
||||||
between the get and the index. The `_source` field needs to be enabled
|
|
||||||
for this feature to work.
|
|
||||||
|
|
||||||
For example, let's index a simple doc:
|
`POST /<index/_update/<_id>`
|
||||||
|
|
||||||
|
[[update-api-desc]]
|
||||||
|
==== {api-description-title}
|
||||||
|
|
||||||
|
Enables you script document updates. The script can update, delete, or skip
|
||||||
|
modifying the document. The update API also supports passing a partial document,
|
||||||
|
which is merged into the existing document. To fully replace an existing
|
||||||
|
document, use the <<docs-index_,`index` API>>.
|
||||||
|
|
||||||
|
This operation:
|
||||||
|
|
||||||
|
. Gets the document (collocated with the shard) from the index.
|
||||||
|
. Runs the specified script.
|
||||||
|
. Indexes the result.
|
||||||
|
|
||||||
|
The document must still be reindexed, but using `update` removes some network
|
||||||
|
roundtrips and reduces chances of version conflicts between the GET and the
|
||||||
|
index operation.
|
||||||
|
|
||||||
|
The `_source` field must be enabled to use `update`. In addition to `_source`,
|
||||||
|
you can access the following variables through the `ctx` map: `_index`,
|
||||||
|
`_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp).
|
||||||
|
|
||||||
|
[[docs-update-api-path-params]]
|
||||||
|
==== {api-path-parms-title}
|
||||||
|
|
||||||
|
`<index>`::
|
||||||
|
(Required, string) Name of the target index. By default, the index is created
|
||||||
|
automatically if it doesn't exist. For more information, see <<index-creation>>.
|
||||||
|
|
||||||
|
`<_id>`::
|
||||||
|
(Required, string) Unique identifier for the document to be updated.
|
||||||
|
|
||||||
|
[[docs-update-api-query-params]]
|
||||||
|
==== {api-query-parms-title}
|
||||||
|
|
||||||
|
include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-seq-no]
|
||||||
|
|
||||||
|
include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-primary-term]
|
||||||
|
|
||||||
|
`lang`::
|
||||||
|
(Optional, string) The script language. Default: `painless`.
|
||||||
|
|
||||||
|
include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-refresh]
|
||||||
|
|
||||||
|
`retry_on_conflict`::
|
||||||
|
(Optional, integer) Specify how many times should the operation be retried when
|
||||||
|
a conflict occurs. Default: 0.
|
||||||
|
|
||||||
|
include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-refresh]
|
||||||
|
|
||||||
|
include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-routing]
|
||||||
|
|
||||||
|
`_source`::
|
||||||
|
(Optional, list) Set to `false` to disable source retrieval (default: `true`).
|
||||||
|
You can also specify a comma-separated list of the fields you want to retrieve.
|
||||||
|
|
||||||
|
`_source_excludes`::
|
||||||
|
(Optional, list) Specify the source fields you want to exclude.
|
||||||
|
|
||||||
|
`_source_includes`::
|
||||||
|
(Optional, list) Specify the source fields you want to retrieve.
|
||||||
|
|
||||||
|
include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
|
||||||
|
|
||||||
|
include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-wait-for-active-shards]
|
||||||
|
|
||||||
|
[[update-api-example]]
|
||||||
|
==== {api-examples-title}
|
||||||
|
|
||||||
|
First, let's index a simple doc:
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
@ -24,10 +92,8 @@ PUT test/_doc/1
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
// CONSOLE
|
||||||
|
|
||||||
[float]
|
To increment the counter, you can submit an update request with the
|
||||||
==== Scripted updates
|
following script:
|
||||||
|
|
||||||
Now, we can execute a script that would increment the counter:
|
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
@ -45,8 +111,8 @@ POST test/_update/1
|
||||||
// CONSOLE
|
// CONSOLE
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
|
|
||||||
We can add a tag to the list of tags (if the tag exists, it
|
Similarly, you could use and update script to add a tag to the list of tags
|
||||||
still gets added, since this is a list):
|
(this is just a list, so the tag is added even it exists):
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
@ -64,11 +130,11 @@ POST test/_update/1
|
||||||
// CONSOLE
|
// CONSOLE
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
|
|
||||||
We can remove a tag from the list of tags. Note that the Painless function to
|
You could also remove a tag from the list of tags. The Painless
|
||||||
`remove` a tag takes as its parameter the array index of the element you wish
|
function to `remove` a tag takes the array index of the element
|
||||||
to remove, so you need a bit more logic to locate it while avoiding a runtime
|
you want to remove. To avoid a possible runtime error, you first need to
|
||||||
error. Note that if the tag was present more than once in the list, this will
|
make sure the tag exists. If the list contains duplicates of the tag, this
|
||||||
remove only one occurrence of it:
|
script just removes one occurrence.
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
@ -86,11 +152,8 @@ POST test/_update/1
|
||||||
// CONSOLE
|
// CONSOLE
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
|
|
||||||
In addition to `_source`, the following variables are available through
|
You can also add and remove fields from a document. For example, this script
|
||||||
the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`,
|
adds the field `new_field`:
|
||||||
and `_now` (the current timestamp).
|
|
||||||
|
|
||||||
We can also add a new field to the document:
|
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
@ -102,7 +165,7 @@ POST test/_update/1
|
||||||
// CONSOLE
|
// CONSOLE
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
|
|
||||||
Or remove a field from the document:
|
Conversely, this script removes the field `new_field`:
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
@ -114,9 +177,9 @@ POST test/_update/1
|
||||||
// CONSOLE
|
// CONSOLE
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
|
|
||||||
And, we can even change the operation that is executed. This example deletes
|
Instead of updating the document, you can also change the operation that is
|
||||||
the doc if the `tags` field contains `green`, otherwise it does nothing
|
executed from within the script. For example, this request deletes the doc if
|
||||||
(`noop`):
|
the `tags` field contains `green`, otherwise it does nothing (`noop`):
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
@ -135,13 +198,8 @@ POST test/_update/1
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
==== Updates with a partial document
|
===== Update part of a document
|
||||||
|
|
||||||
The update API also supports passing a partial document,
|
|
||||||
which will be merged into the existing document (simple recursive merge,
|
|
||||||
inner merging of objects, replacing core "keys/values" and arrays).
|
|
||||||
To fully replace the existing document, the <<docs-index_,`index` API>> should
|
|
||||||
be used instead.
|
|
||||||
The following partial update adds a new field to the
|
The following partial update adds a new field to the
|
||||||
existing document:
|
existing document:
|
||||||
|
|
||||||
|
@ -157,14 +215,14 @@ POST test/_update/1
|
||||||
// CONSOLE
|
// CONSOLE
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
|
|
||||||
If both `doc` and `script` are specified, then `doc` is ignored. Best is
|
If both `doc` and `script` are specified, then `doc` is ignored. If you
|
||||||
to put your field pairs of the partial document in the script itself.
|
specify a scripted update, include the fields you want to update in the script.
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
==== Detecting noop updates
|
===== Detect noop updates
|
||||||
|
|
||||||
If `doc` is specified its value is merged with the existing `_source`.
|
By default updates that don't change anything detect that they don't change
|
||||||
By default updates that don't change anything detect that they don't change anything and return `"result": "noop"` like this:
|
anything and return `"result": "noop"`:
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
@ -178,9 +236,8 @@ POST test/_update/1
|
||||||
// CONSOLE
|
// CONSOLE
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
|
|
||||||
If `name` was `new_name` before the request was sent then the entire update
|
If the value of `name` is already `new_name`, the update
|
||||||
request is ignored. The `result` element in the response returns `noop` if
|
request is ignored and the `result` element in the response returns `noop`:
|
||||||
the request was ignored.
|
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
@ -201,7 +258,7 @@ the request was ignored.
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// TESTRESPONSE
|
// TESTRESPONSE
|
||||||
|
|
||||||
You can disable this behavior by setting `"detect_noop": false` like this:
|
You can disable this behavior by setting `"detect_noop": false`:
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
@ -218,11 +275,11 @@ POST test/_update/1
|
||||||
|
|
||||||
[[upserts]]
|
[[upserts]]
|
||||||
[float]
|
[float]
|
||||||
==== Upserts
|
===== Upsert
|
||||||
|
|
||||||
If the document does not already exist, the contents of the `upsert` element
|
If the document does not already exist, the contents of the `upsert` element
|
||||||
will be inserted as a new document. If the document does exist, then the
|
are inserted as a new document. If the document exists, the
|
||||||
`script` will be executed instead:
|
`script` is executed:
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
@ -245,11 +302,10 @@ POST test/_update/1
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
[[scripted_upsert]]
|
[[scripted_upsert]]
|
||||||
===== `scripted_upsert`
|
===== Scripted upsert
|
||||||
|
|
||||||
If you would like your script to run regardless of whether the document exists
|
To run the script whether or not the document exists, set `scripted_upsert` to
|
||||||
or not -- i.e. the script handles initializing the document instead of the
|
`true`:
|
||||||
`upsert` element -- then set `scripted_upsert` to `true`:
|
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
@ -275,10 +331,10 @@ POST sessions/_update/dh3sgudg8gsrgl
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
[[doc_as_upsert]]
|
[[doc_as_upsert]]
|
||||||
===== `doc_as_upsert`
|
===== Doc as upsert
|
||||||
|
|
||||||
Instead of sending a partial `doc` plus an `upsert` doc, setting
|
Instead of sending a partial `doc` plus an `upsert` doc, you can set
|
||||||
`doc_as_upsert` to `true` will use the contents of `doc` as the `upsert`
|
`doc_as_upsert` to `true` to use the contents of `doc` as the `upsert`
|
||||||
value:
|
value:
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
||||||
|
@ -293,51 +349,3 @@ POST test/_update/1
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
// CONSOLE
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
|
|
||||||
[float]
|
|
||||||
==== Parameters
|
|
||||||
|
|
||||||
The update operation supports the following query-string parameters:
|
|
||||||
|
|
||||||
[horizontal]
|
|
||||||
`retry_on_conflict`::
|
|
||||||
|
|
||||||
In between the get and indexing phases of the update, it is possible that
|
|
||||||
another process might have already updated the same document. By default, the
|
|
||||||
update will fail with a version conflict exception. The `retry_on_conflict`
|
|
||||||
parameter controls how many times to retry the update before finally throwing
|
|
||||||
an exception.
|
|
||||||
|
|
||||||
`routing`::
|
|
||||||
|
|
||||||
Routing is used to route the update request to the right shard and sets the
|
|
||||||
routing for the upsert request if the document being updated doesn't exist.
|
|
||||||
Can't be used to update the routing of an existing document.
|
|
||||||
|
|
||||||
`timeout`::
|
|
||||||
|
|
||||||
Timeout waiting for a shard to become available.
|
|
||||||
|
|
||||||
`wait_for_active_shards`::
|
|
||||||
|
|
||||||
The number of shard copies required to be active before proceeding with the update operation.
|
|
||||||
See <<index-wait-for-active-shards,here>> for details.
|
|
||||||
|
|
||||||
`refresh`::
|
|
||||||
|
|
||||||
Control when the changes made by this request are visible to search. See
|
|
||||||
<<docs-refresh, refresh>>.
|
|
||||||
|
|
||||||
`_source`::
|
|
||||||
|
|
||||||
Allows to control if and how the updated source should be returned in the response.
|
|
||||||
By default the updated source is not returned.
|
|
||||||
See <<request-body-search-source-filtering, Source filtering>> for details.
|
|
||||||
|
|
||||||
`if_seq_no` and `if_primary_term`::
|
|
||||||
|
|
||||||
Update operations can be made conditional and only be performed if the last
|
|
||||||
modification to the document was assigned the sequence number and primary
|
|
||||||
term specified by the `if_seq_no` and `if_primary_term` parameters. If a
|
|
||||||
mismatch is detected, the operation will result in a `VersionConflictException`
|
|
||||||
and a status code of 409. See <<optimistic-concurrency-control>> for more details.
|
|
|
@ -135,8 +135,8 @@ Windows:
|
||||||
The additional nodes are assigned unique IDs. Because you're running all three
|
The additional nodes are assigned unique IDs. Because you're running all three
|
||||||
nodes locally, they automatically join the cluster with the first node.
|
nodes locally, they automatically join the cluster with the first node.
|
||||||
|
|
||||||
. Use the `cat health` API to verify that your three-node cluster is up running.
|
. Use the cat health API to verify that your three-node cluster is up running.
|
||||||
The `cat` APIs return information about your cluster and indices in a
|
The cat APIs return information about your cluster and indices in a
|
||||||
format that's easier to read than raw JSON.
|
format that's easier to read than raw JSON.
|
||||||
+
|
+
|
||||||
You can interact directly with your cluster by submitting HTTP requests to
|
You can interact directly with your cluster by submitting HTTP requests to
|
||||||
|
@ -155,8 +155,8 @@ GET /_cat/health?v
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
// CONSOLE
|
||||||
+
|
+
|
||||||
The response should indicate that the status of the _elasticsearch_ cluster
|
The response should indicate that the status of the `elasticsearch` cluster
|
||||||
is _green_ and it has three nodes:
|
is `green` and it has three nodes:
|
||||||
+
|
+
|
||||||
[source,txt]
|
[source,txt]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
@ -191,8 +191,8 @@ Once you have a cluster up and running, you're ready to index some data.
|
||||||
There are a variety of ingest options for {es}, but in the end they all
|
There are a variety of ingest options for {es}, but in the end they all
|
||||||
do the same thing: put JSON documents into an {es} index.
|
do the same thing: put JSON documents into an {es} index.
|
||||||
|
|
||||||
You can do this directly with a simple POST request that identifies
|
You can do this directly with a simple PUT request that specifies
|
||||||
the index you want to add the document to and specifies one or more
|
the index you want to add the document, a unique document ID, and one or more
|
||||||
`"field": "value"` pairs in the request body:
|
`"field": "value"` pairs in the request body:
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
||||||
|
@ -204,9 +204,9 @@ PUT /customer/_doc/1
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
// CONSOLE
|
||||||
|
|
||||||
This request automatically creates the _customer_ index if it doesn't already
|
This request automatically creates the `customer` index if it doesn't already
|
||||||
exist, adds a new document that has an ID of `1`, and stores and
|
exist, adds a new document that has an ID of `1`, and stores and
|
||||||
indexes the _name_ field.
|
indexes the `name` field.
|
||||||
|
|
||||||
Since this is a new document, the response shows that the result of the
|
Since this is a new document, the response shows that the result of the
|
||||||
operation was that version 1 of the document was created:
|
operation was that version 1 of the document was created:
|
||||||
|
@ -264,46 +264,22 @@ and shows the original source fields that were indexed.
|
||||||
// TESTRESPONSE[s/"_seq_no" : \d+/"_seq_no" : $body._seq_no/ ]
|
// TESTRESPONSE[s/"_seq_no" : \d+/"_seq_no" : $body._seq_no/ ]
|
||||||
// TESTRESPONSE[s/"_primary_term" : \d+/"_primary_term" : $body._primary_term/]
|
// TESTRESPONSE[s/"_primary_term" : \d+/"_primary_term" : $body._primary_term/]
|
||||||
|
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
[[getting-started-batch-processing]]
|
[[getting-started-batch-processing]]
|
||||||
=== Batch processing
|
=== Indexing documents in bulk
|
||||||
|
|
||||||
In addition to being able to index, update, and delete individual documents, Elasticsearch also provides the ability to perform any of the above operations in batches using the {ref}/docs-bulk.html[`_bulk` API]. This functionality is important in that it provides a very efficient mechanism to do multiple operations as fast as possible with as few network roundtrips as possible.
|
If you have a lot of documents to index, you can submit them in batches with
|
||||||
|
the {ref}/docs-bulk.html[bulk API]. Using bulk to batch document
|
||||||
|
operations is significantly faster than submitting requests individually as it minimizes network roundtrips.
|
||||||
|
|
||||||
As a quick example, the following call indexes two documents (ID 1 - John Doe and ID 2 - Jane Doe) in one bulk operation:
|
The optimal batch size depends a number of factors: the document size and complexity, the indexing and search load, and the resources available to your cluster. A good place to start is with batches of 1,000 to 5,000 documents
|
||||||
|
and a total payload between 5MB and 15MB. From there, you can experiment
|
||||||
|
to find the sweet spot.
|
||||||
|
|
||||||
[source,js]
|
To get some data into {es} that you can start searching and analyzing:
|
||||||
--------------------------------------------------
|
|
||||||
POST /customer/_bulk?pretty
|
|
||||||
{"index":{"_id":"1"}}
|
|
||||||
{"name": "John Doe" }
|
|
||||||
{"index":{"_id":"2"}}
|
|
||||||
{"name": "Jane Doe" }
|
|
||||||
--------------------------------------------------
|
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
This example updates the first document (ID of 1) and then deletes the second document (ID of 2) in one bulk operation:
|
|
||||||
|
|
||||||
[source,sh]
|
|
||||||
--------------------------------------------------
|
|
||||||
POST /customer/_bulk
|
|
||||||
{"update":{"_id":"1"}}
|
|
||||||
{"doc": { "name": "John Doe becomes Jane Doe" } }
|
|
||||||
{"delete":{"_id":"2"}}
|
|
||||||
--------------------------------------------------
|
|
||||||
// CONSOLE
|
|
||||||
// TEST[continued]
|
|
||||||
|
|
||||||
Note above that for the delete action, there is no corresponding source document after it since deletes only require the ID of the document to be deleted.
|
|
||||||
|
|
||||||
The Bulk API does not fail due to failures in one of the actions. If a single action fails for whatever reason, it will continue to process the remainder of the actions after it. When the bulk API returns, it will provide a status for each action (in the same order it was sent in) so that you can check if a specific action failed or not.
|
|
||||||
|
|
||||||
[float]
|
|
||||||
=== Sample dataset
|
|
||||||
|
|
||||||
Now that we've gotten a glimpse of the basics, let's try to work on a more realistic dataset. I've prepared a sample of fictitious JSON documents of customer bank account information. Each document has the following schema:
|
|
||||||
|
|
||||||
|
. Download the https://github.com/elastic/elasticsearch/blob/master/docs/src/test/resources/accounts.json?raw=true[`accounts.json`] sample data set. The documents in this randomly-generated data set represent user accounts with the following information:
|
||||||
|
+
|
||||||
[source,js]
|
[source,js]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
|
@ -322,21 +298,19 @@ Now that we've gotten a glimpse of the basics, let's try to work on a more reali
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// NOTCONSOLE
|
// NOTCONSOLE
|
||||||
|
|
||||||
For the curious, this data was generated using http://www.json-generator.com/[`www.json-generator.com/`], so please ignore the actual values and semantics of the data as these are all randomly generated.
|
. Index the account data into the `bank` index with the following `_bulk` request:
|
||||||
|
+
|
||||||
You can download the sample dataset (accounts.json) from https://github.com/elastic/elasticsearch/blob/master/docs/src/test/resources/accounts.json?raw=true[here]. Extract it to our current directory and let's load it into our cluster as follows:
|
|
||||||
|
|
||||||
[source,sh]
|
[source,sh]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
curl -H "Content-Type: application/json" -XPOST "localhost:9200/bank/_bulk?pretty&refresh" --data-binary "@accounts.json"
|
curl -H "Content-Type: application/json" -XPOST "localhost:9200/bank/_bulk?pretty&refresh" --data-binary "@accounts.json"
|
||||||
curl "localhost:9200/_cat/indices?v"
|
curl "localhost:9200/_cat/indices?v"
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// NOTCONSOLE
|
// NOTCONSOLE
|
||||||
|
+
|
||||||
////
|
////
|
||||||
This replicates the above in a document-testing friendly way but isn't visible
|
This replicates the above in a document-testing friendly way but isn't visible
|
||||||
in the docs:
|
in the docs:
|
||||||
|
+
|
||||||
[source,js]
|
[source,js]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_cat/indices?v
|
GET /_cat/indices?v
|
||||||
|
@ -344,9 +318,9 @@ GET /_cat/indices?v
|
||||||
// CONSOLE
|
// CONSOLE
|
||||||
// TEST[setup:bank]
|
// TEST[setup:bank]
|
||||||
////
|
////
|
||||||
|
+
|
||||||
And the response:
|
The response indicates that 1,000 documents were indexed successfully.
|
||||||
|
+
|
||||||
[source,txt]
|
[source,txt]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
health status index uuid pri rep docs.count docs.deleted store.size pri.store.size
|
health status index uuid pri rep docs.count docs.deleted store.size pri.store.size
|
||||||
|
@ -355,8 +329,6 @@ yellow open bank l7sSYV2cQXmu6_4rJWVIww 5 1 1000 0 12
|
||||||
// TESTRESPONSE[s/128.6kb/\\d+(\\.\\d+)?[mk]?b/]
|
// TESTRESPONSE[s/128.6kb/\\d+(\\.\\d+)?[mk]?b/]
|
||||||
// TESTRESPONSE[s/l7sSYV2cQXmu6_4rJWVIww/.+/ non_json]
|
// TESTRESPONSE[s/l7sSYV2cQXmu6_4rJWVIww/.+/ non_json]
|
||||||
|
|
||||||
Which means that we just successfully bulk indexed 1000 documents into the bank index.
|
|
||||||
|
|
||||||
[[getting-started-search]]
|
[[getting-started-search]]
|
||||||
== Start searching
|
== Start searching
|
||||||
|
|
||||||
|
|
|
@ -76,12 +76,23 @@ commit point. Defaults to `512mb`.
|
||||||
|
|
||||||
`index.translog.retention.size`::
|
`index.translog.retention.size`::
|
||||||
|
|
||||||
The total size of translog files to keep. Keeping more translog files increases
|
When soft deletes is disabled (enabled by default in 7.0 or later),
|
||||||
the chance of performing an operation based sync when recovering replicas. If
|
`index.translog.retention.size` controls the total size of translog files to keep.
|
||||||
the translog files are not sufficient, replica recovery will fall back to a
|
Keeping more translog files increases the chance of performing an operation based
|
||||||
file based sync. Defaults to `512mb`
|
sync when recovering replicas. If the translog files are not sufficient,
|
||||||
|
replica recovery will fall back to a file based sync. Defaults to `512mb`
|
||||||
|
|
||||||
|
Both `index.translog.retention.size` and `index.translog.retention.age` should not
|
||||||
|
be specified unless soft deletes is disabled as they will be ignored.
|
||||||
|
|
||||||
|
|
||||||
`index.translog.retention.age`::
|
`index.translog.retention.age`::
|
||||||
|
|
||||||
The maximum duration for which translog files will be kept. Defaults to `12h`.
|
When soft deletes is disabled (enabled by default in 7.0 or later),
|
||||||
|
`index.translog.retention.age` controls the maximum duration for which translog
|
||||||
|
files to keep. Keeping more translog files increases the chance of performing an
|
||||||
|
operation based sync when recovering replicas. If the translog files are not sufficient,
|
||||||
|
replica recovery will fall back to a file based sync. Defaults to `12h`
|
||||||
|
|
||||||
|
Both `index.translog.retention.size` and `index.translog.retention.age` should not
|
||||||
|
be specified unless soft deletes is disabled as they will be ignored.
|
||||||
|
|
|
@ -1,8 +1,10 @@
|
||||||
[[indices-get-mapping]]
|
[[indices-get-mapping]]
|
||||||
=== Get Mapping
|
=== Get mapping API
|
||||||
|
++++
|
||||||
|
<titleabbrev>Get mapping</titleabbrev>
|
||||||
|
++++
|
||||||
|
|
||||||
The get mapping API allows to retrieve mapping definitions for an index or
|
Retrieves <<mapping,mapping definitions>> for indices in a cluster.
|
||||||
index/type.
|
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
@ -13,10 +15,46 @@ GET /twitter/_mapping
|
||||||
|
|
||||||
NOTE: Before 7.0.0, the 'mappings' definition used to include a type name. Although mappings
|
NOTE: Before 7.0.0, the 'mappings' definition used to include a type name. Although mappings
|
||||||
in responses no longer contain a type name by default, you can still request the old format
|
in responses no longer contain a type name by default, you can still request the old format
|
||||||
through the parameter include_type_name. For more details, please see <<removal-of-types>>.
|
through the parameter `include_type_name`. For more details, please see <<removal-of-types>>.
|
||||||
|
|
||||||
[float]
|
|
||||||
==== Multiple Indices
|
[[get-mapping-api-request]]
|
||||||
|
==== {api-request-title}
|
||||||
|
|
||||||
|
`GET /_mapping`
|
||||||
|
|
||||||
|
`GET /{index}/_mapping`
|
||||||
|
|
||||||
|
|
||||||
|
[[get-mapping-api-path-params]]
|
||||||
|
==== {api-path-parms-title}
|
||||||
|
|
||||||
|
include::{docdir}/rest-api/common-parms.asciidoc[tag=index]
|
||||||
|
|
||||||
|
|
||||||
|
[[get-mapping-api-query-params]]
|
||||||
|
==== {api-query-parms-title}
|
||||||
|
|
||||||
|
include::{docdir}/rest-api/common-parms.asciidoc[tag=allow-no-indices]
|
||||||
|
|
||||||
|
include::{docdir}/rest-api/common-parms.asciidoc[tag=expand-wildcards]
|
||||||
|
+
|
||||||
|
Defaults to `open`.
|
||||||
|
|
||||||
|
include::{docdir}/rest-api/common-parms.asciidoc[tag=include-type-name]
|
||||||
|
|
||||||
|
include::{docdir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable]
|
||||||
|
|
||||||
|
include::{docdir}/rest-api/common-parms.asciidoc[tag=local]
|
||||||
|
|
||||||
|
include::{docdir}/rest-api/common-parms.asciidoc[tag=master-timeout]
|
||||||
|
|
||||||
|
|
||||||
|
[[get-mapping-api-example]]
|
||||||
|
==== {api-examples-title}
|
||||||
|
|
||||||
|
[[get-mapping-api-multi-ex]]
|
||||||
|
===== Multiple indices
|
||||||
|
|
||||||
The get mapping API can be used to get more than one index with a
|
The get mapping API can be used to get more than one index with a
|
||||||
single call. General usage of the API follows the following syntax:
|
single call. General usage of the API follows the following syntax:
|
||||||
|
|
|
@ -42,14 +42,14 @@ Serves as an advice on how to set `model_memory_limit` when creating {dfanalytic
|
||||||
[[ml-estimate-memory-usage-dfanalytics-results]]
|
[[ml-estimate-memory-usage-dfanalytics-results]]
|
||||||
==== {api-response-body-title}
|
==== {api-response-body-title}
|
||||||
|
|
||||||
`expected_memory_usage_with_one_partition`::
|
`expected_memory_without_disk`::
|
||||||
(string) Estimated memory usage under the assumption that the whole {dfanalytics} should happen in memory
|
(string) Estimated memory usage under the assumption that the whole {dfanalytics} should happen in memory
|
||||||
(i.e. without overflowing to disk).
|
(i.e. without overflowing to disk).
|
||||||
|
|
||||||
`expected_memory_usage_with_max_partitions`::
|
`expected_memory_with_disk`::
|
||||||
(string) Estimated memory usage under the assumption that overflowing to disk is allowed during {dfanalytics}.
|
(string) Estimated memory usage under the assumption that overflowing to disk is allowed during {dfanalytics}.
|
||||||
`expected_memory_usage_with_max_partitions` is usually smaller than `expected_memory_usage_with_one_partition`
|
`expected_memory_with_disk` is usually smaller than `expected_memory_without_disk` as using disk allows to
|
||||||
as using disk allows to limit the main memory needed to perform {dfanalytics}.
|
limit the main memory needed to perform {dfanalytics}.
|
||||||
|
|
||||||
[[ml-estimate-memory-usage-dfanalytics-example]]
|
[[ml-estimate-memory-usage-dfanalytics-example]]
|
||||||
==== {api-examples-title}
|
==== {api-examples-title}
|
||||||
|
@ -76,8 +76,8 @@ The API returns the following results:
|
||||||
[source,js]
|
[source,js]
|
||||||
----
|
----
|
||||||
{
|
{
|
||||||
"expected_memory_usage_with_one_partition": "128MB",
|
"expected_memory_without_disk": "128MB",
|
||||||
"expected_memory_usage_with_max_partitions": "32MB"
|
"expected_memory_with_disk": "32MB"
|
||||||
}
|
}
|
||||||
----
|
----
|
||||||
// TESTRESPONSE
|
// TESTRESPONSE
|
|
@ -44,6 +44,12 @@ packages together commonly used metrics for various analyses.
|
||||||
(Required, object) Defines the `index` in which the evaluation will be
|
(Required, object) Defines the `index` in which the evaluation will be
|
||||||
performed.
|
performed.
|
||||||
|
|
||||||
|
`query`::
|
||||||
|
(Optional, object) Query used to select data from the index.
|
||||||
|
The {es} query domain-specific language (DSL). This value corresponds to the query
|
||||||
|
object in an {es} search POST body. By default, this property has the following
|
||||||
|
value: `{"match_all": {}}`.
|
||||||
|
|
||||||
`evaluation`::
|
`evaluation`::
|
||||||
(Required, object) Defines the type of evaluation you want to perform. For example:
|
(Required, object) Defines the type of evaluation you want to perform. For example:
|
||||||
`binary_soft_classification`. See <<ml-evaluate-dfanalytics-resources>>.
|
`binary_soft_classification`. See <<ml-evaluate-dfanalytics-resources>>.
|
||||||
|
|
|
@ -332,6 +332,42 @@ POST /_snapshot/my_unverified_backup/_verify
|
||||||
|
|
||||||
It returns a list of nodes where repository was successfully verified or an error message if verification process failed.
|
It returns a list of nodes where repository was successfully verified or an error message if verification process failed.
|
||||||
|
|
||||||
|
[float]
|
||||||
|
===== Repository Cleanup
|
||||||
|
Repositories can over time accumulate data that is not referenced by any existing snapshot. This is a result of the data safety guarantees
|
||||||
|
the snapshot functionality provides in failure scenarios during snapshot creation and the decentralized nature of the snapshot creation
|
||||||
|
process. This unreferenced data does in no way negatively impact the performance or safety of a snapshot repository but leads to higher
|
||||||
|
than necessary storage use. In order to clean up this unreferenced data, users can call the cleanup endpoint for a repository which will
|
||||||
|
trigger a complete accounting of the repositories contents and subsequent deletion of all unreferenced data that was found.
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
-----------------------------------
|
||||||
|
POST /_snapshot/my_repository/_cleanup
|
||||||
|
-----------------------------------
|
||||||
|
// CONSOLE
|
||||||
|
// TEST[continued]
|
||||||
|
|
||||||
|
The response to a cleanup request looks as follows:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
--------------------------------------------------
|
||||||
|
{
|
||||||
|
"results": {
|
||||||
|
"deleted_bytes": 20,
|
||||||
|
"deleted_blobs": 5
|
||||||
|
}
|
||||||
|
}
|
||||||
|
--------------------------------------------------
|
||||||
|
// TESTRESPONSE
|
||||||
|
|
||||||
|
Depending on the concrete repository implementation the numbers shown for bytes free as well as the number of blobs removed will either
|
||||||
|
be an approximation or an exact result. Any non-zero value for the number of blobs removed implies that unreferenced blobs were found and
|
||||||
|
subsequently cleaned up.
|
||||||
|
|
||||||
|
Please note that most of the cleanup operations executed by this endpoint are automatically executed when deleting any snapshot from a
|
||||||
|
repository. If you regularly delete snapshots, you will in most cases not get any or only minor space savings from using this functionality
|
||||||
|
and should lower your frequency of invoking it accordingly.
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
[[snapshots-take-snapshot]]
|
[[snapshots-take-snapshot]]
|
||||||
=== Snapshot
|
=== Snapshot
|
||||||
|
|
|
@ -4,13 +4,152 @@
|
||||||
<titleabbrev>Match</titleabbrev>
|
<titleabbrev>Match</titleabbrev>
|
||||||
++++
|
++++
|
||||||
|
|
||||||
|
Returns documents that match a provided text, number, date or boolean value. The
|
||||||
|
provided text is analyzed before matching.
|
||||||
|
|
||||||
`match` queries accept text/numerics/dates, analyzes
|
The `match` query is the standard query for performing a full-text search,
|
||||||
them, and constructs a query. For example:
|
including options for fuzzy matching.
|
||||||
|
|
||||||
|
|
||||||
|
[[match-query-ex-request]]
|
||||||
|
==== Example request
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
|
{
|
||||||
|
"query": {
|
||||||
|
"match" : {
|
||||||
|
"message" : {
|
||||||
|
"query" : "this is a test"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
--------------------------------------------------
|
||||||
|
// CONSOLE
|
||||||
|
|
||||||
|
|
||||||
|
[[match-top-level-params]]
|
||||||
|
==== Top-level parameters for `match`
|
||||||
|
|
||||||
|
`<field>`::
|
||||||
|
(Required, object) Field you wish to search.
|
||||||
|
|
||||||
|
|
||||||
|
[[match-field-params]]
|
||||||
|
==== Parameters for `<field>`
|
||||||
|
`query`::
|
||||||
|
+
|
||||||
|
--
|
||||||
|
(Required) Text, number, boolean value or date you wish to find in the provided
|
||||||
|
`<field>`.
|
||||||
|
|
||||||
|
The `match` query <<analysis,analyzes>> any provided text before performing a
|
||||||
|
search. This means the `match` query can search <<text,`text`>> fields for
|
||||||
|
analyzed tokens rather than an exact term.
|
||||||
|
--
|
||||||
|
|
||||||
|
`analyzer`::
|
||||||
|
(Optional, string) <<analysis,Analyzer>> used to convert the text in the `query`
|
||||||
|
value into tokens. Defaults to the <<specify-index-time-analyzer,index-time
|
||||||
|
analyzer>> mapped for the `<field>`. If no analyzer is mapped, the index's
|
||||||
|
default analyzer is used.
|
||||||
|
|
||||||
|
`auto_generate_synonyms_phrase_query`::
|
||||||
|
+
|
||||||
|
--
|
||||||
|
(Optional, boolean) If `true`, <<query-dsl-match-query-phrase,match phrase>>
|
||||||
|
queries are automatically created for multi-term synonyms. Defaults to `true`.
|
||||||
|
|
||||||
|
See <<query-dsl-match-query-synonyms,Use synonyms with match query>> for an
|
||||||
|
example.
|
||||||
|
--
|
||||||
|
|
||||||
|
`fuzziness`::
|
||||||
|
(Optional, string) Maximum edit distance allowed for matching. See <<fuzziness>>
|
||||||
|
for valid values and more information. See <<query-dsl-match-query-fuzziness>>
|
||||||
|
for an example.
|
||||||
|
|
||||||
|
`max_expansions`::
|
||||||
|
(Optional, integer) Maximum number of terms to which the query will
|
||||||
|
expand. Defaults to `50`.
|
||||||
|
|
||||||
|
`prefix_length`::
|
||||||
|
(Optional, integer) Number of beginning characters left unchanged for fuzzy
|
||||||
|
matching. Defaults to `0`.
|
||||||
|
|
||||||
|
`transpositions`::
|
||||||
|
(Optional, boolean) If `true`, edits for fuzzy matching include
|
||||||
|
transpositions of two adjacent characters (ab → ba). Defaults to `true`.
|
||||||
|
|
||||||
|
`fuzzy_rewrite`::
|
||||||
|
+
|
||||||
|
--
|
||||||
|
(Optional, string) Method used to rewrite the query. See the
|
||||||
|
<<query-dsl-multi-term-rewrite, `rewrite` parameter>> for valid values and more
|
||||||
|
information.
|
||||||
|
|
||||||
|
If the `fuzziness` parameter is not `0`, the `match` query uses a `rewrite`
|
||||||
|
method of `top_terms_blended_freqs_${max_expansions}` by default.
|
||||||
|
--
|
||||||
|
|
||||||
|
`lenient`::
|
||||||
|
(Optional, boolean) If `true`, format-based errors, such as providing a text
|
||||||
|
`query` value for a <<number,numeric>> field, are ignored. Defaults to `false`.
|
||||||
|
|
||||||
|
`operator`::
|
||||||
|
+
|
||||||
|
--
|
||||||
|
(Optional, string) Boolean logic used to interpret text in the `query` value.
|
||||||
|
Valid values are:
|
||||||
|
|
||||||
|
`OR` (Default)::
|
||||||
|
For example, a `query` value of `capital of Hungary` is interpreted as `capital
|
||||||
|
OR of OR Hungary`.
|
||||||
|
|
||||||
|
`AND`::
|
||||||
|
For example, a `query` value of `capital of Hungary` is interpreted as `capital
|
||||||
|
AND of AND Hungary`.
|
||||||
|
--
|
||||||
|
|
||||||
|
`minimum_should_match`::
|
||||||
|
+
|
||||||
|
--
|
||||||
|
(Optional, string) Minimum number of clauses that must match for a document to
|
||||||
|
be returned. See the <<query-dsl-minimum-should-match, `minimum_should_match`
|
||||||
|
parameter>> for valid values and more information.
|
||||||
|
--
|
||||||
|
|
||||||
|
`zero_terms_query`::
|
||||||
|
+
|
||||||
|
--
|
||||||
|
(Optional, string) Indicates whether no documents are returned if the `analyzer`
|
||||||
|
removes all tokens, such as when using a `stop` filter. Valid values are:
|
||||||
|
|
||||||
|
`none` (Default)::
|
||||||
|
No documents are returned if the `analyzer` removes all tokens.
|
||||||
|
|
||||||
|
`all`::
|
||||||
|
Returns all documents, similar to a <<query-dsl-match-all-query,`match_all`>>
|
||||||
|
query.
|
||||||
|
|
||||||
|
See <<query-dsl-match-query-zero>> for an example.
|
||||||
|
--
|
||||||
|
|
||||||
|
|
||||||
|
[[match-query-notes]]
|
||||||
|
==== Notes
|
||||||
|
|
||||||
|
[[query-dsl-match-query-short-ex]]
|
||||||
|
===== Short request example
|
||||||
|
|
||||||
|
You can simplify the match query syntax by combining the `<field>` and `query`
|
||||||
|
parameters. For example:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
----
|
||||||
|
GET /_search
|
||||||
{
|
{
|
||||||
"query": {
|
"query": {
|
||||||
"match" : {
|
"match" : {
|
||||||
|
@ -18,23 +157,38 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
----
|
||||||
// CONSOLE
|
// CONSOLE
|
||||||
|
|
||||||
Note, `message` is the name of a field, you can substitute the name of
|
|
||||||
any field instead.
|
|
||||||
|
|
||||||
[[query-dsl-match-query-boolean]]
|
[[query-dsl-match-query-boolean]]
|
||||||
==== match
|
===== How the match query works
|
||||||
|
|
||||||
The `match` query is of type `boolean`. It means that the text
|
The `match` query is of type `boolean`. It means that the text
|
||||||
provided is analyzed and the analysis process constructs a boolean query
|
provided is analyzed and the analysis process constructs a boolean query
|
||||||
from the provided text. The `operator` flag can be set to `or` or `and`
|
from the provided text. The `operator` parameter can be set to `or` or `and`
|
||||||
to control the boolean clauses (defaults to `or`). The minimum number of
|
to control the boolean clauses (defaults to `or`). The minimum number of
|
||||||
optional `should` clauses to match can be set using the
|
optional `should` clauses to match can be set using the
|
||||||
<<query-dsl-minimum-should-match,`minimum_should_match`>>
|
<<query-dsl-minimum-should-match,`minimum_should_match`>>
|
||||||
parameter.
|
parameter.
|
||||||
|
|
||||||
|
Here is an example with the `operator` parameter:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
--------------------------------------------------
|
||||||
|
GET /_search
|
||||||
|
{
|
||||||
|
"query": {
|
||||||
|
"match" : {
|
||||||
|
"message" : {
|
||||||
|
"query" : "this is a test",
|
||||||
|
"operator" : "and"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
--------------------------------------------------
|
||||||
|
// CONSOLE
|
||||||
|
|
||||||
The `analyzer` can be set to control which analyzer will perform the
|
The `analyzer` can be set to control which analyzer will perform the
|
||||||
analysis process on the text. It defaults to the field explicit mapping
|
analysis process on the text. It defaults to the field explicit mapping
|
||||||
definition, or the default search analyzer.
|
definition, or the default search analyzer.
|
||||||
|
@ -44,7 +198,7 @@ data-type mismatches, such as trying to query a numeric field with a text
|
||||||
query string. Defaults to `false`.
|
query string. Defaults to `false`.
|
||||||
|
|
||||||
[[query-dsl-match-query-fuzziness]]
|
[[query-dsl-match-query-fuzziness]]
|
||||||
===== Fuzziness
|
===== Fuzziness in the match query
|
||||||
|
|
||||||
`fuzziness` allows _fuzzy matching_ based on the type of field being queried.
|
`fuzziness` allows _fuzzy matching_ based on the type of field being queried.
|
||||||
See <<fuzziness>> for allowed settings.
|
See <<fuzziness>> for allowed settings.
|
||||||
|
|
|
@ -1,9 +1,38 @@
|
||||||
|
|
||||||
|
tag::allow-no-indices[]
|
||||||
|
`allow_no_indices`::
|
||||||
|
(Optional, boolean) If `true`, the request returns an error if a wildcard
|
||||||
|
expression or `_all` value retrieves only missing or closed indices. This
|
||||||
|
parameter also applies to <<indices-aliases,index aliases>> that point to a
|
||||||
|
missing or closed index.
|
||||||
|
end::allow-no-indices[]
|
||||||
|
|
||||||
tag::bytes[]
|
tag::bytes[]
|
||||||
`bytes`::
|
`bytes`::
|
||||||
(Optional, <<byte-units,byte size units>>) Unit used to display byte values.
|
(Optional, <<byte-units,byte size units>>) Unit used to display byte values.
|
||||||
end::bytes[]
|
end::bytes[]
|
||||||
|
|
||||||
|
tag::expand-wildcards[]
|
||||||
|
`expand_wildcards`::
|
||||||
|
+
|
||||||
|
--
|
||||||
|
(Optional, string) Controls what kind of indices that wildcard
|
||||||
|
expressions can expand to. Valid values are:
|
||||||
|
|
||||||
|
`all`::
|
||||||
|
Expand to open and closed indices.
|
||||||
|
|
||||||
|
`open`::
|
||||||
|
Expand only to open indices.
|
||||||
|
|
||||||
|
`closed`::
|
||||||
|
Expand only to closed indices.
|
||||||
|
|
||||||
|
`none`::
|
||||||
|
Wildcard expressions are not accepted.
|
||||||
|
--
|
||||||
|
end::expand-wildcards[]
|
||||||
|
|
||||||
tag::cat-h[]
|
tag::cat-h[]
|
||||||
`h`::
|
`h`::
|
||||||
(Optional, string) Comma-separated list of column names to display.
|
(Optional, string) Comma-separated list of column names to display.
|
||||||
|
@ -28,6 +57,19 @@ https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html[HTTP accept header].
|
||||||
Valid values include JSON, YAML, etc.
|
Valid values include JSON, YAML, etc.
|
||||||
end::http-format[]
|
end::http-format[]
|
||||||
|
|
||||||
|
tag::include-type-name[]
|
||||||
|
`include_type_name`::
|
||||||
|
deprecated:[7.0.0, Mapping types have been deprecated. See <<removal-of-types>>.]
|
||||||
|
(Optional, boolean) If `true`, a mapping type is expected in the body of
|
||||||
|
mappings. Defaults to `false`.
|
||||||
|
end::include-type-name[]
|
||||||
|
|
||||||
|
tag::index-ignore-unavailable[]
|
||||||
|
`ignore_unavailable`::
|
||||||
|
(Optional, boolean) If `true`, missing or closed indices are not included in the
|
||||||
|
response. Defaults to `false`.
|
||||||
|
end::index-ignore-unavailable[]
|
||||||
|
|
||||||
tag::include-unloaded-segments[]
|
tag::include-unloaded-segments[]
|
||||||
`include_unloaded_segments`::
|
`include_unloaded_segments`::
|
||||||
(Optional, boolean) If `true`, the response includes information from segments
|
(Optional, boolean) If `true`, the response includes information from segments
|
||||||
|
@ -70,12 +112,65 @@ tag::cat-v[]
|
||||||
to `false`.
|
to `false`.
|
||||||
end::cat-v[]
|
end::cat-v[]
|
||||||
|
|
||||||
|
tag::doc-pipeline[]
|
||||||
|
`pipeline`::
|
||||||
|
(Optional, string) ID of the pipeline to use to preprocess incoming documents.
|
||||||
|
end::doc-pipeline[]
|
||||||
|
|
||||||
|
tag::doc-refresh[]
|
||||||
|
`refresh`::
|
||||||
|
(Optional, enum) If `true`, {es} refreshes the affected shards to make this
|
||||||
|
operation visible to search, if `wait_for` then wait for a refresh to make
|
||||||
|
this operation visible to search, if `false` do nothing with refreshes.
|
||||||
|
Valid values: `true`, `false`, `wait_for`. Default: `false`.
|
||||||
|
end::doc-refresh[]
|
||||||
|
|
||||||
|
tag::doc-seq-no[]
|
||||||
|
`if_seq_no`::
|
||||||
|
(Optional, integer) Only perform the operation if the document has this
|
||||||
|
sequence number. See <<optimistic-concurrency-control-index>>.
|
||||||
|
end::doc-seq-no[]
|
||||||
|
|
||||||
|
tag::doc-primary-term[]
|
||||||
|
`if_primary_term`::
|
||||||
|
(Optional, integer) Only perform the operation if the document has
|
||||||
|
this primary term. See <<optimistic-concurrency-control-index>>.
|
||||||
|
end::doc-primary-term[]
|
||||||
|
|
||||||
|
tag::doc-routing[]
|
||||||
|
`routing`::
|
||||||
|
(Optional, string) Target the specified primary shard.
|
||||||
|
end::doc-routing[]
|
||||||
|
|
||||||
|
tag::doc-version[]
|
||||||
|
`version`::
|
||||||
|
(Optional, integer) Explicit version number for concurrency control.
|
||||||
|
The specified version must match the current version of the document for the
|
||||||
|
request to succeed.
|
||||||
|
end::doc-version[]
|
||||||
|
|
||||||
|
tag::doc-version-type[]
|
||||||
|
`version_type`::
|
||||||
|
(Optional, enum) Specific version type: `internal`, `external`,
|
||||||
|
`external_gte`, `force`.
|
||||||
|
end::doc-version-type[]
|
||||||
|
|
||||||
|
tag::doc-wait-for-active-shards[]
|
||||||
|
`wait_for_active_shards`::
|
||||||
|
(Optional, string) The number of shard copies that must be active before
|
||||||
|
proceeding with the operation. Set to `all` or any positive integer up
|
||||||
|
to the total number of shards in the index (`number_of_replicas+1`).
|
||||||
|
Default: 1, the primary shard.
|
||||||
|
end::doc-wait-for-active-shards[]
|
||||||
|
|
||||||
tag::timeoutparms[]
|
tag::timeoutparms[]
|
||||||
|
|
||||||
|
tag::timeout[]
|
||||||
`timeout`::
|
`timeout`::
|
||||||
(Optional, <<time-units, time units>>) Specifies the period of time to wait for
|
(Optional, <<time-units, time units>>) Specifies the period of time to wait for
|
||||||
a response. If no response is received before the timeout expires, the request
|
a response. If no response is received before the timeout expires, the request
|
||||||
fails and returns an error. Defaults to `30s`.
|
fails and returns an error. Defaults to `30s`.
|
||||||
|
end::timeout[]
|
||||||
|
|
||||||
tag::master-timeout[]
|
tag::master-timeout[]
|
||||||
`master_timeout`::
|
`master_timeout`::
|
||||||
|
|
|
@ -195,6 +195,21 @@ DELETE _scripts/calculate-score
|
||||||
// CONSOLE
|
// CONSOLE
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
|
|
||||||
|
[float]
|
||||||
|
[[modules-scripting-search-templates]]
|
||||||
|
=== Search templates
|
||||||
|
You can also use the `_scripts` API to store **search templates**. Search
|
||||||
|
templates save specific <<search-search,search requests>> with placeholder
|
||||||
|
values, called template parameters.
|
||||||
|
|
||||||
|
You can use stored search templates to run searches without writing out the
|
||||||
|
entire query. Just provide the stored template's ID and the template parameters.
|
||||||
|
This is useful when you want to run a commonly used query quickly and without
|
||||||
|
mistakes.
|
||||||
|
|
||||||
|
Search templates use the http://mustache.github.io/mustache.5.html[mustache
|
||||||
|
templating language]. See <<search-template>> for more information and examples.
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
[[modules-scripting-using-caching]]
|
[[modules-scripting-using-caching]]
|
||||||
=== Script caching
|
=== Script caching
|
||||||
|
|
|
@ -24,5 +24,11 @@ GET /_search
|
||||||
|
|
||||||
|
|
||||||
Note that `from` + `size` can not be more than the `index.max_result_window`
|
Note that `from` + `size` can not be more than the `index.max_result_window`
|
||||||
index setting which defaults to 10,000. See the <<request-body-search-scroll,Scroll>> or <<request-body-search-search-after,Search After>>
|
index setting, which defaults to 10,000.
|
||||||
API for more efficient ways to do deep scrolling.
|
|
||||||
|
WARNING: {es} uses Lucene's internal doc IDs as tie-breakers. These internal
|
||||||
|
doc IDs can be completely different across replicas of the same
|
||||||
|
data. When paginating, you might occasionally see that documents with the same
|
||||||
|
sort values are not ordered consistently. For deep scrolling, it is more
|
||||||
|
efficient to use the <<request-body-search-scroll,Scroll>> or
|
||||||
|
<<request-body-search-search-after,Search After>> APIs.
|
||||||
|
|
|
@ -32,7 +32,209 @@ disable scripts per type and context as described in the
|
||||||
<<allowed-script-types-setting, scripting docs>>
|
<<allowed-script-types-setting, scripting docs>>
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
==== More template examples
|
==== Examples
|
||||||
|
|
||||||
|
[float]
|
||||||
|
[[pre-registered-templates]]
|
||||||
|
===== Store a search template
|
||||||
|
|
||||||
|
You can store a search template using the stored scripts API.
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
------------------------------------------
|
||||||
|
POST _scripts/<templateid>
|
||||||
|
{
|
||||||
|
"script": {
|
||||||
|
"lang": "mustache",
|
||||||
|
"source": {
|
||||||
|
"query": {
|
||||||
|
"match": {
|
||||||
|
"title": "{{query_string}}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
------------------------------------------
|
||||||
|
// CONSOLE
|
||||||
|
// TEST[continued]
|
||||||
|
|
||||||
|
//////////////////////////
|
||||||
|
|
||||||
|
We want to be sure that the template has been created,
|
||||||
|
because we'll use it later.
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
--------------------------------------------------
|
||||||
|
{
|
||||||
|
"acknowledged" : true
|
||||||
|
}
|
||||||
|
--------------------------------------------------
|
||||||
|
// TESTRESPONSE
|
||||||
|
|
||||||
|
//////////////////////////
|
||||||
|
|
||||||
|
This template can be retrieved by
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
------------------------------------------
|
||||||
|
GET _scripts/<templateid>
|
||||||
|
------------------------------------------
|
||||||
|
// CONSOLE
|
||||||
|
// TEST[continued]
|
||||||
|
|
||||||
|
which is rendered as:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
------------------------------------------
|
||||||
|
{
|
||||||
|
"script" : {
|
||||||
|
"lang" : "mustache",
|
||||||
|
"source" : "{\"query\":{\"match\":{\"title\":\"{{query_string}}\"}}}",
|
||||||
|
"options": {
|
||||||
|
"content_type" : "application/json; charset=UTF-8"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"_id": "<templateid>",
|
||||||
|
"found": true
|
||||||
|
}
|
||||||
|
------------------------------------------
|
||||||
|
// TESTRESPONSE
|
||||||
|
|
||||||
|
This template can be deleted by
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
------------------------------------------
|
||||||
|
DELETE _scripts/<templateid>
|
||||||
|
------------------------------------------
|
||||||
|
// CONSOLE
|
||||||
|
// TEST[continued]
|
||||||
|
|
||||||
|
//////////////////////////
|
||||||
|
|
||||||
|
We want to be sure that the template has been created,
|
||||||
|
because we'll use it later.
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
--------------------------------------------------
|
||||||
|
{
|
||||||
|
"acknowledged" : true
|
||||||
|
}
|
||||||
|
--------------------------------------------------
|
||||||
|
// TESTRESPONSE
|
||||||
|
|
||||||
|
//////////////////////////
|
||||||
|
|
||||||
|
[float]
|
||||||
|
[[use-registered-templates]]
|
||||||
|
===== Use a stored search template
|
||||||
|
|
||||||
|
To use a stored template at search time use:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
------------------------------------------
|
||||||
|
GET _search/template
|
||||||
|
{
|
||||||
|
"id": "<templateid>", <1>
|
||||||
|
"params": {
|
||||||
|
"query_string": "search for these words"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
------------------------------------------
|
||||||
|
// CONSOLE
|
||||||
|
// TEST[catch:missing]
|
||||||
|
<1> Name of the stored template script.
|
||||||
|
|
||||||
|
[float]
|
||||||
|
[[_validating_templates]]
|
||||||
|
==== Validate a search template
|
||||||
|
|
||||||
|
A template can be rendered in a response with given parameters using
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
------------------------------------------
|
||||||
|
GET _render/template
|
||||||
|
{
|
||||||
|
"source": "{ \"query\": { \"terms\": {{#toJson}}statuses{{/toJson}} }}",
|
||||||
|
"params": {
|
||||||
|
"statuses" : {
|
||||||
|
"status": [ "pending", "published" ]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
------------------------------------------
|
||||||
|
// CONSOLE
|
||||||
|
|
||||||
|
This call will return the rendered template:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
------------------------------------------
|
||||||
|
{
|
||||||
|
"template_output": {
|
||||||
|
"query": {
|
||||||
|
"terms": {
|
||||||
|
"status": [ <1>
|
||||||
|
"pending",
|
||||||
|
"published"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
------------------------------------------
|
||||||
|
// TESTRESPONSE
|
||||||
|
<1> `status` array has been populated with values from the `params` object.
|
||||||
|
|
||||||
|
Stored templates can also be rendered using
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
------------------------------------------
|
||||||
|
GET _render/template/<template_name>
|
||||||
|
{
|
||||||
|
"params": {
|
||||||
|
"..."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
------------------------------------------
|
||||||
|
// NOTCONSOLE
|
||||||
|
|
||||||
|
[float]
|
||||||
|
===== Explain
|
||||||
|
|
||||||
|
You can use `explain` parameter when running a template:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
------------------------------------------
|
||||||
|
GET _search/template
|
||||||
|
{
|
||||||
|
"id": "my_template",
|
||||||
|
"params": {
|
||||||
|
"status": [ "pending", "published" ]
|
||||||
|
},
|
||||||
|
"explain": true
|
||||||
|
}
|
||||||
|
------------------------------------------
|
||||||
|
// CONSOLE
|
||||||
|
// TEST[catch:missing]
|
||||||
|
|
||||||
|
[float]
|
||||||
|
===== Profiling
|
||||||
|
|
||||||
|
You can use `profile` parameter when running a template:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
------------------------------------------
|
||||||
|
GET _search/template
|
||||||
|
{
|
||||||
|
"id": "my_template",
|
||||||
|
"params": {
|
||||||
|
"status": [ "pending", "published" ]
|
||||||
|
},
|
||||||
|
"profile": true
|
||||||
|
}
|
||||||
|
------------------------------------------
|
||||||
|
// CONSOLE
|
||||||
|
// TEST[catch:missing]
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
===== Filling in a query string with a single value
|
===== Filling in a query string with a single value
|
||||||
|
@ -397,204 +599,6 @@ The previous query will be rendered as:
|
||||||
------------------------------------------
|
------------------------------------------
|
||||||
// TESTRESPONSE
|
// TESTRESPONSE
|
||||||
|
|
||||||
|
|
||||||
[float]
|
|
||||||
[[pre-registered-templates]]
|
|
||||||
===== Pre-registered template
|
|
||||||
|
|
||||||
You can register search templates by using the stored scripts api.
|
|
||||||
|
|
||||||
[source,js]
|
|
||||||
------------------------------------------
|
|
||||||
POST _scripts/<templatename>
|
|
||||||
{
|
|
||||||
"script": {
|
|
||||||
"lang": "mustache",
|
|
||||||
"source": {
|
|
||||||
"query": {
|
|
||||||
"match": {
|
|
||||||
"title": "{{query_string}}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
------------------------------------------
|
|
||||||
// CONSOLE
|
|
||||||
// TEST[continued]
|
|
||||||
|
|
||||||
//////////////////////////
|
|
||||||
|
|
||||||
We want to be sure that the template has been created,
|
|
||||||
because we'll use it later.
|
|
||||||
|
|
||||||
[source,js]
|
|
||||||
--------------------------------------------------
|
|
||||||
{
|
|
||||||
"acknowledged" : true
|
|
||||||
}
|
|
||||||
--------------------------------------------------
|
|
||||||
// TESTRESPONSE
|
|
||||||
|
|
||||||
//////////////////////////
|
|
||||||
|
|
||||||
This template can be retrieved by
|
|
||||||
|
|
||||||
[source,js]
|
|
||||||
------------------------------------------
|
|
||||||
GET _scripts/<templatename>
|
|
||||||
------------------------------------------
|
|
||||||
// CONSOLE
|
|
||||||
// TEST[continued]
|
|
||||||
|
|
||||||
which is rendered as:
|
|
||||||
|
|
||||||
[source,js]
|
|
||||||
------------------------------------------
|
|
||||||
{
|
|
||||||
"script" : {
|
|
||||||
"lang" : "mustache",
|
|
||||||
"source" : "{\"query\":{\"match\":{\"title\":\"{{query_string}}\"}}}",
|
|
||||||
"options": {
|
|
||||||
"content_type" : "application/json; charset=UTF-8"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"_id": "<templatename>",
|
|
||||||
"found": true
|
|
||||||
}
|
|
||||||
------------------------------------------
|
|
||||||
// TESTRESPONSE
|
|
||||||
|
|
||||||
This template can be deleted by
|
|
||||||
|
|
||||||
[source,js]
|
|
||||||
------------------------------------------
|
|
||||||
DELETE _scripts/<templatename>
|
|
||||||
------------------------------------------
|
|
||||||
// CONSOLE
|
|
||||||
// TEST[continued]
|
|
||||||
|
|
||||||
//////////////////////////
|
|
||||||
|
|
||||||
We want to be sure that the template has been created,
|
|
||||||
because we'll use it later.
|
|
||||||
|
|
||||||
[source,js]
|
|
||||||
--------------------------------------------------
|
|
||||||
{
|
|
||||||
"acknowledged" : true
|
|
||||||
}
|
|
||||||
--------------------------------------------------
|
|
||||||
// TESTRESPONSE
|
|
||||||
|
|
||||||
//////////////////////////
|
|
||||||
|
|
||||||
To use a stored template at search time use:
|
|
||||||
|
|
||||||
[source,js]
|
|
||||||
------------------------------------------
|
|
||||||
GET _search/template
|
|
||||||
{
|
|
||||||
"id": "<templateName>", <1>
|
|
||||||
"params": {
|
|
||||||
"query_string": "search for these words"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
------------------------------------------
|
|
||||||
// CONSOLE
|
|
||||||
// TEST[catch:missing]
|
|
||||||
<1> Name of the stored template script.
|
|
||||||
|
|
||||||
[float]
|
|
||||||
==== Validating templates
|
|
||||||
|
|
||||||
A template can be rendered in a response with given parameters using
|
|
||||||
|
|
||||||
[source,js]
|
|
||||||
------------------------------------------
|
|
||||||
GET _render/template
|
|
||||||
{
|
|
||||||
"source": "{ \"query\": { \"terms\": {{#toJson}}statuses{{/toJson}} }}",
|
|
||||||
"params": {
|
|
||||||
"statuses" : {
|
|
||||||
"status": [ "pending", "published" ]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
------------------------------------------
|
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
This call will return the rendered template:
|
|
||||||
|
|
||||||
[source,js]
|
|
||||||
------------------------------------------
|
|
||||||
{
|
|
||||||
"template_output": {
|
|
||||||
"query": {
|
|
||||||
"terms": {
|
|
||||||
"status": [ <1>
|
|
||||||
"pending",
|
|
||||||
"published"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
------------------------------------------
|
|
||||||
// TESTRESPONSE
|
|
||||||
<1> `status` array has been populated with values from the `params` object.
|
|
||||||
|
|
||||||
Pre-registered templates can also be rendered using
|
|
||||||
|
|
||||||
[source,js]
|
|
||||||
------------------------------------------
|
|
||||||
GET _render/template/<template_name>
|
|
||||||
{
|
|
||||||
"params": {
|
|
||||||
"..."
|
|
||||||
}
|
|
||||||
}
|
|
||||||
------------------------------------------
|
|
||||||
// NOTCONSOLE
|
|
||||||
|
|
||||||
[float]
|
|
||||||
===== Explain
|
|
||||||
|
|
||||||
You can use `explain` parameter when running a template:
|
|
||||||
|
|
||||||
[source,js]
|
|
||||||
------------------------------------------
|
|
||||||
GET _search/template
|
|
||||||
{
|
|
||||||
"id": "my_template",
|
|
||||||
"params": {
|
|
||||||
"status": [ "pending", "published" ]
|
|
||||||
},
|
|
||||||
"explain": true
|
|
||||||
}
|
|
||||||
------------------------------------------
|
|
||||||
// CONSOLE
|
|
||||||
// TEST[catch:missing]
|
|
||||||
|
|
||||||
[float]
|
|
||||||
===== Profiling
|
|
||||||
|
|
||||||
You can use `profile` parameter when running a template:
|
|
||||||
|
|
||||||
[source,js]
|
|
||||||
------------------------------------------
|
|
||||||
GET _search/template
|
|
||||||
{
|
|
||||||
"id": "my_template",
|
|
||||||
"params": {
|
|
||||||
"status": [ "pending", "published" ]
|
|
||||||
},
|
|
||||||
"profile": true
|
|
||||||
}
|
|
||||||
------------------------------------------
|
|
||||||
// CONSOLE
|
|
||||||
// TEST[catch:missing]
|
|
||||||
|
|
||||||
[[multi-search-template]]
|
[[multi-search-template]]
|
||||||
=== Multi Search Template
|
=== Multi Search Template
|
||||||
|
|
||||||
|
|
|
@ -76,7 +76,7 @@ corresponding endpoints are whitelisted as well.
|
||||||
|
|
||||||
[[ssl-notification-settings]]
|
[[ssl-notification-settings]]
|
||||||
:ssl-prefix: xpack.http
|
:ssl-prefix: xpack.http
|
||||||
:component: {watcher}
|
:component: {watcher} HTTP
|
||||||
:verifies:
|
:verifies:
|
||||||
:server!:
|
:server!:
|
||||||
:ssl-context: watcher
|
:ssl-context: watcher
|
||||||
|
@ -215,6 +215,15 @@ HTML feature groups>>.
|
||||||
Set to `false` to completely disable HTML sanitation. Not recommended.
|
Set to `false` to completely disable HTML sanitation. Not recommended.
|
||||||
Defaults to `true`.
|
Defaults to `true`.
|
||||||
|
|
||||||
|
[[ssl-notification-smtp-settings]]
|
||||||
|
:ssl-prefix: xpack.notification.email
|
||||||
|
:component: {watcher} Email
|
||||||
|
:verifies:
|
||||||
|
:server!:
|
||||||
|
:ssl-context: watcher-email
|
||||||
|
|
||||||
|
include::ssl-settings.asciidoc[]
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
[[slack-notification-settings]]
|
[[slack-notification-settings]]
|
||||||
==== Slack Notification Settings
|
==== Slack Notification Settings
|
||||||
|
|
|
@ -73,7 +73,7 @@ public abstract class Command implements Closeable {
|
||||||
StringWriter sw = new StringWriter();
|
StringWriter sw = new StringWriter();
|
||||||
PrintWriter pw = new PrintWriter(sw)) {
|
PrintWriter pw = new PrintWriter(sw)) {
|
||||||
e.printStackTrace(pw);
|
e.printStackTrace(pw);
|
||||||
terminal.println(sw.toString());
|
terminal.errorPrintln(sw.toString());
|
||||||
} catch (final IOException impossible) {
|
} catch (final IOException impossible) {
|
||||||
// StringWriter#close declares a checked IOException from the Closeable interface but the Javadocs for StringWriter
|
// StringWriter#close declares a checked IOException from the Closeable interface but the Javadocs for StringWriter
|
||||||
// say that an exception here is impossible
|
// say that an exception here is impossible
|
||||||
|
@ -89,14 +89,15 @@ public abstract class Command implements Closeable {
|
||||||
try {
|
try {
|
||||||
mainWithoutErrorHandling(args, terminal);
|
mainWithoutErrorHandling(args, terminal);
|
||||||
} catch (OptionException e) {
|
} catch (OptionException e) {
|
||||||
printHelp(terminal);
|
// print help to stderr on exceptions
|
||||||
terminal.println(Terminal.Verbosity.SILENT, "ERROR: " + e.getMessage());
|
printHelp(terminal, true);
|
||||||
|
terminal.errorPrintln(Terminal.Verbosity.SILENT, "ERROR: " + e.getMessage());
|
||||||
return ExitCodes.USAGE;
|
return ExitCodes.USAGE;
|
||||||
} catch (UserException e) {
|
} catch (UserException e) {
|
||||||
if (e.exitCode == ExitCodes.USAGE) {
|
if (e.exitCode == ExitCodes.USAGE) {
|
||||||
printHelp(terminal);
|
printHelp(terminal, true);
|
||||||
}
|
}
|
||||||
terminal.println(Terminal.Verbosity.SILENT, "ERROR: " + e.getMessage());
|
terminal.errorPrintln(Terminal.Verbosity.SILENT, "ERROR: " + e.getMessage());
|
||||||
return e.exitCode;
|
return e.exitCode;
|
||||||
}
|
}
|
||||||
return ExitCodes.OK;
|
return ExitCodes.OK;
|
||||||
|
@ -109,7 +110,7 @@ public abstract class Command implements Closeable {
|
||||||
final OptionSet options = parser.parse(args);
|
final OptionSet options = parser.parse(args);
|
||||||
|
|
||||||
if (options.has(helpOption)) {
|
if (options.has(helpOption)) {
|
||||||
printHelp(terminal);
|
printHelp(terminal, false);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -125,12 +126,18 @@ public abstract class Command implements Closeable {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Prints a help message for the command to the terminal. */
|
/** Prints a help message for the command to the terminal. */
|
||||||
private void printHelp(Terminal terminal) throws IOException {
|
private void printHelp(Terminal terminal, boolean toStdError) throws IOException {
|
||||||
|
if (toStdError) {
|
||||||
|
terminal.errorPrintln(description);
|
||||||
|
terminal.errorPrintln("");
|
||||||
|
parser.printHelpOn(terminal.getErrorWriter());
|
||||||
|
} else {
|
||||||
terminal.println(description);
|
terminal.println(description);
|
||||||
terminal.println("");
|
terminal.println("");
|
||||||
printAdditionalHelp(terminal);
|
printAdditionalHelp(terminal);
|
||||||
parser.printHelpOn(terminal.getWriter());
|
parser.printHelpOn(terminal.getWriter());
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/** Prints additional help information, specific to the command */
|
/** Prints additional help information, specific to the command */
|
||||||
protected void printAdditionalHelp(Terminal terminal) {}
|
protected void printAdditionalHelp(Terminal terminal) {}
|
||||||
|
|
|
@ -39,9 +39,17 @@ import java.util.Locale;
|
||||||
*/
|
*/
|
||||||
public abstract class Terminal {
|
public abstract class Terminal {
|
||||||
|
|
||||||
|
/** Writer to standard error - not supplied by the {@link Console} API, so we share with subclasses */
|
||||||
|
private static final PrintWriter ERROR_WRITER = newErrorWriter();
|
||||||
|
|
||||||
/** The default terminal implementation, which will be a console if available, or stdout/stderr if not. */
|
/** The default terminal implementation, which will be a console if available, or stdout/stderr if not. */
|
||||||
public static final Terminal DEFAULT = ConsoleTerminal.isSupported() ? new ConsoleTerminal() : new SystemTerminal();
|
public static final Terminal DEFAULT = ConsoleTerminal.isSupported() ? new ConsoleTerminal() : new SystemTerminal();
|
||||||
|
|
||||||
|
@SuppressForbidden(reason = "Writer for System.err")
|
||||||
|
private static PrintWriter newErrorWriter() {
|
||||||
|
return new PrintWriter(System.err);
|
||||||
|
}
|
||||||
|
|
||||||
/** Defines the available verbosity levels of messages to be printed. */
|
/** Defines the available verbosity levels of messages to be printed. */
|
||||||
public enum Verbosity {
|
public enum Verbosity {
|
||||||
SILENT, /* always printed */
|
SILENT, /* always printed */
|
||||||
|
@ -70,9 +78,14 @@ public abstract class Terminal {
|
||||||
/** Reads password text from the terminal input. See {@link Console#readPassword()}}. */
|
/** Reads password text from the terminal input. See {@link Console#readPassword()}}. */
|
||||||
public abstract char[] readSecret(String prompt);
|
public abstract char[] readSecret(String prompt);
|
||||||
|
|
||||||
/** Returns a Writer which can be used to write to the terminal directly. */
|
/** Returns a Writer which can be used to write to the terminal directly using standard output. */
|
||||||
public abstract PrintWriter getWriter();
|
public abstract PrintWriter getWriter();
|
||||||
|
|
||||||
|
/** Returns a Writer which can be used to write to the terminal directly using standard error. */
|
||||||
|
public PrintWriter getErrorWriter() {
|
||||||
|
return ERROR_WRITER;
|
||||||
|
}
|
||||||
|
|
||||||
/** Prints a line to the terminal at {@link Verbosity#NORMAL} verbosity level. */
|
/** Prints a line to the terminal at {@link Verbosity#NORMAL} verbosity level. */
|
||||||
public final void println(String msg) {
|
public final void println(String msg) {
|
||||||
println(Verbosity.NORMAL, msg);
|
println(Verbosity.NORMAL, msg);
|
||||||
|
@ -83,12 +96,33 @@ public abstract class Terminal {
|
||||||
print(verbosity, msg + lineSeparator);
|
print(verbosity, msg + lineSeparator);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Prints message to the terminal at {@code verbosity} level, without a newline. */
|
/** Prints message to the terminal's standard output at {@code verbosity} level, without a newline. */
|
||||||
public final void print(Verbosity verbosity, String msg) {
|
public final void print(Verbosity verbosity, String msg) {
|
||||||
if (isPrintable(verbosity)) {
|
print(verbosity, msg, false);
|
||||||
getWriter().print(msg);
|
|
||||||
getWriter().flush();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Prints message to the terminal at {@code verbosity} level, without a newline. */
|
||||||
|
private void print(Verbosity verbosity, String msg, boolean isError) {
|
||||||
|
if (isPrintable(verbosity)) {
|
||||||
|
PrintWriter writer = isError ? getErrorWriter() : getWriter();
|
||||||
|
writer.print(msg);
|
||||||
|
writer.flush();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Prints a line to the terminal's standard error at {@link Verbosity#NORMAL} verbosity level, without a newline. */
|
||||||
|
public final void errorPrint(Verbosity verbosity, String msg) {
|
||||||
|
print(verbosity, msg, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Prints a line to the terminal's standard error at {@link Verbosity#NORMAL} verbosity level. */
|
||||||
|
public final void errorPrintln(String msg) {
|
||||||
|
errorPrintln(Verbosity.NORMAL, msg);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Prints a line to the terminal's standard error at {@code verbosity} level. */
|
||||||
|
public final void errorPrintln(Verbosity verbosity, String msg) {
|
||||||
|
errorPrint(verbosity, msg + lineSeparator);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Checks if is enough {@code verbosity} level to be printed */
|
/** Checks if is enough {@code verbosity} level to be printed */
|
||||||
|
@ -110,7 +144,7 @@ public abstract class Terminal {
|
||||||
answer = answer.toLowerCase(Locale.ROOT);
|
answer = answer.toLowerCase(Locale.ROOT);
|
||||||
boolean answerYes = answer.equals("y");
|
boolean answerYes = answer.equals("y");
|
||||||
if (answerYes == false && answer.equals("n") == false) {
|
if (answerYes == false && answer.equals("n") == false) {
|
||||||
println("Did not understand answer '" + answer + "'");
|
errorPrintln("Did not understand answer '" + answer + "'");
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
return answerYes;
|
return answerYes;
|
||||||
|
@ -165,7 +199,7 @@ public abstract class Terminal {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String readText(String text) {
|
public String readText(String text) {
|
||||||
getWriter().print(text);
|
getErrorWriter().print(text); // prompts should go to standard error to avoid mixing with list output
|
||||||
BufferedReader reader = new BufferedReader(new InputStreamReader(System.in, Charset.defaultCharset()));
|
BufferedReader reader = new BufferedReader(new InputStreamReader(System.in, Charset.defaultCharset()));
|
||||||
try {
|
try {
|
||||||
final String line = reader.readLine();
|
final String line = reader.readLine();
|
||||||
|
|
|
@ -817,11 +817,6 @@ public final class Walker extends PainlessParserBaseVisitor<ANode> {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ANode visitRegex(RegexContext ctx) {
|
public ANode visitRegex(RegexContext ctx) {
|
||||||
if (false == settings.areRegexesEnabled()) {
|
|
||||||
throw location(ctx).createError(new IllegalStateException("Regexes are disabled. Set [script.painless.regex.enabled] to [true] "
|
|
||||||
+ "in elasticsearch.yaml to allow them. Be careful though, regexes break out of Painless's protection against deep "
|
|
||||||
+ "recursion and long loops."));
|
|
||||||
}
|
|
||||||
String text = ctx.REGEX().getText();
|
String text = ctx.REGEX().getText();
|
||||||
int lastSlash = text.lastIndexOf('/');
|
int lastSlash = text.lastIndexOf('/');
|
||||||
String pattern = text.substring(1, lastSlash);
|
String pattern = text.substring(1, lastSlash);
|
||||||
|
|
|
@ -40,6 +40,8 @@ public final class ERegex extends AExpression {
|
||||||
private final int flags;
|
private final int flags;
|
||||||
private Constant constant;
|
private Constant constant;
|
||||||
|
|
||||||
|
private CompilerSettings settings;
|
||||||
|
|
||||||
public ERegex(Location location, String pattern, String flagsString) {
|
public ERegex(Location location, String pattern, String flagsString) {
|
||||||
super(location);
|
super(location);
|
||||||
|
|
||||||
|
@ -56,7 +58,7 @@ public final class ERegex extends AExpression {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
void storeSettings(CompilerSettings settings) {
|
void storeSettings(CompilerSettings settings) {
|
||||||
// do nothing
|
this.settings = settings;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -66,6 +68,12 @@ public final class ERegex extends AExpression {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
void analyze(Locals locals) {
|
void analyze(Locals locals) {
|
||||||
|
if (false == settings.areRegexesEnabled()) {
|
||||||
|
throw createError(new IllegalStateException("Regexes are disabled. Set [script.painless.regex.enabled] to [true] "
|
||||||
|
+ "in elasticsearch.yaml to allow them. Be careful though, regexes break out of Painless's protection against deep "
|
||||||
|
+ "recursion and long loops."));
|
||||||
|
}
|
||||||
|
|
||||||
if (!read) {
|
if (!read) {
|
||||||
throw createError(new IllegalArgumentException("Regex constant may only be read [" + pattern + "]."));
|
throw createError(new IllegalArgumentException("Regex constant may only be read [" + pattern + "]."));
|
||||||
}
|
}
|
||||||
|
|
|
@ -262,7 +262,7 @@ public class WhenThingsGoWrongTests extends ScriptTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testRegexDisabledByDefault() {
|
public void testRegexDisabledByDefault() {
|
||||||
IllegalStateException e = expectThrows(IllegalStateException.class, () -> exec("return 'foo' ==~ /foo/"));
|
IllegalStateException e = expectScriptThrows(IllegalStateException.class, () -> exec("return 'foo' ==~ /foo/"));
|
||||||
assertEquals("Regexes are disabled. Set [script.painless.regex.enabled] to [true] in elasticsearch.yaml to allow them. "
|
assertEquals("Regexes are disabled. Set [script.painless.regex.enabled] to [true] in elasticsearch.yaml to allow them. "
|
||||||
+ "Be careful though, regexes break out of Painless's protection against deep recursion and long loops.", e.getMessage());
|
+ "Be careful though, regexes break out of Painless's protection against deep recursion and long loops.", e.getMessage());
|
||||||
}
|
}
|
||||||
|
|
|
@ -123,11 +123,14 @@ public class ReindexDocumentationIT extends ESIntegTestCase {
|
||||||
.filter(QueryBuilders.termQuery("level", "awesome"))
|
.filter(QueryBuilders.termQuery("level", "awesome"))
|
||||||
.maxDocs(1000)
|
.maxDocs(1000)
|
||||||
.script(new Script(ScriptType.INLINE,
|
.script(new Script(ScriptType.INLINE,
|
||||||
"ctx._source.awesome = 'absolutely'",
|
|
||||||
"painless",
|
"painless",
|
||||||
|
"ctx._source.awesome = 'absolutely'",
|
||||||
Collections.emptyMap()));
|
Collections.emptyMap()));
|
||||||
BulkByScrollResponse response = updateByQuery.get();
|
BulkByScrollResponse response = updateByQuery.get();
|
||||||
// end::update-by-query-filter
|
// end::update-by-query-filter
|
||||||
|
|
||||||
|
// validate order of string params to Script constructor
|
||||||
|
assertEquals(updateByQuery.request().getScript().getLang(), "painless");
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
// tag::update-by-query-size
|
// tag::update-by-query-size
|
||||||
|
@ -157,16 +160,19 @@ public class ReindexDocumentationIT extends ESIntegTestCase {
|
||||||
updateByQuery.source("source_index")
|
updateByQuery.source("source_index")
|
||||||
.script(new Script(
|
.script(new Script(
|
||||||
ScriptType.INLINE,
|
ScriptType.INLINE,
|
||||||
|
"painless",
|
||||||
"if (ctx._source.awesome == 'absolutely') {"
|
"if (ctx._source.awesome == 'absolutely') {"
|
||||||
+ " ctx.op='noop'"
|
+ " ctx.op='noop'"
|
||||||
+ "} else if (ctx._source.awesome == 'lame') {"
|
+ "} else if (ctx._source.awesome == 'lame') {"
|
||||||
+ " ctx.op='delete'"
|
+ " ctx.op='delete'"
|
||||||
+ "} else {"
|
+ "} else {"
|
||||||
+ "ctx._source.awesome = 'absolutely'}",
|
+ "ctx._source.awesome = 'absolutely'}",
|
||||||
"painless",
|
|
||||||
Collections.emptyMap()));
|
Collections.emptyMap()));
|
||||||
BulkByScrollResponse response = updateByQuery.get();
|
BulkByScrollResponse response = updateByQuery.get();
|
||||||
// end::update-by-query-script
|
// end::update-by-query-script
|
||||||
|
|
||||||
|
// validate order of string params to Script constructor
|
||||||
|
assertEquals(updateByQuery.request().getScript().getLang(), "painless");
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
// tag::update-by-query-multi-index
|
// tag::update-by-query-multi-index
|
||||||
|
|
|
@ -23,6 +23,7 @@ import org.elasticsearch.common.SuppressForbidden;
|
||||||
import org.elasticsearch.common.blobstore.BlobContainer;
|
import org.elasticsearch.common.blobstore.BlobContainer;
|
||||||
import org.elasticsearch.common.blobstore.BlobMetaData;
|
import org.elasticsearch.common.blobstore.BlobMetaData;
|
||||||
import org.elasticsearch.common.blobstore.BlobPath;
|
import org.elasticsearch.common.blobstore.BlobPath;
|
||||||
|
import org.elasticsearch.common.blobstore.DeleteResult;
|
||||||
import org.elasticsearch.common.blobstore.support.AbstractBlobContainer;
|
import org.elasticsearch.common.blobstore.support.AbstractBlobContainer;
|
||||||
|
|
||||||
import java.io.BufferedInputStream;
|
import java.io.BufferedInputStream;
|
||||||
|
@ -97,7 +98,7 @@ public class URLBlobContainer extends AbstractBlobContainer {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void delete() {
|
public DeleteResult delete() {
|
||||||
throw new UnsupportedOperationException("URL repository is read only");
|
throw new UnsupportedOperationException("URL repository is read only");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -152,7 +152,7 @@ public class Netty4HttpServerTransport extends AbstractHttpServerTransport {
|
||||||
public Netty4HttpServerTransport(Settings settings, NetworkService networkService, BigArrays bigArrays, ThreadPool threadPool,
|
public Netty4HttpServerTransport(Settings settings, NetworkService networkService, BigArrays bigArrays, ThreadPool threadPool,
|
||||||
NamedXContentRegistry xContentRegistry, Dispatcher dispatcher) {
|
NamedXContentRegistry xContentRegistry, Dispatcher dispatcher) {
|
||||||
super(settings, networkService, bigArrays, threadPool, xContentRegistry, dispatcher);
|
super(settings, networkService, bigArrays, threadPool, xContentRegistry, dispatcher);
|
||||||
Netty4Utils.setAvailableProcessors(EsExecutors.PROCESSORS_SETTING.get(settings));
|
Netty4Utils.setAvailableProcessors(EsExecutors.NODE_PROCESSORS_SETTING.get(settings));
|
||||||
|
|
||||||
this.maxChunkSize = SETTING_HTTP_MAX_CHUNK_SIZE.get(settings);
|
this.maxChunkSize = SETTING_HTTP_MAX_CHUNK_SIZE.get(settings);
|
||||||
this.maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings);
|
this.maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings);
|
||||||
|
|
|
@ -112,7 +112,7 @@ public class Netty4Transport extends TcpTransport {
|
||||||
PageCacheRecycler pageCacheRecycler, NamedWriteableRegistry namedWriteableRegistry,
|
PageCacheRecycler pageCacheRecycler, NamedWriteableRegistry namedWriteableRegistry,
|
||||||
CircuitBreakerService circuitBreakerService) {
|
CircuitBreakerService circuitBreakerService) {
|
||||||
super(settings, version, threadPool, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, networkService);
|
super(settings, version, threadPool, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, networkService);
|
||||||
Netty4Utils.setAvailableProcessors(EsExecutors.PROCESSORS_SETTING.get(settings));
|
Netty4Utils.setAvailableProcessors(EsExecutors.NODE_PROCESSORS_SETTING.get(settings));
|
||||||
this.workerCount = WORKER_COUNT.get(settings);
|
this.workerCount = WORKER_COUNT.get(settings);
|
||||||
|
|
||||||
// See AdaptiveReceiveBufferSizePredictor#DEFAULT_XXX for default values in netty..., we can use higher ones for us, even fixed one
|
// See AdaptiveReceiveBufferSizePredictor#DEFAULT_XXX for default values in netty..., we can use higher ones for us, even fixed one
|
||||||
|
|
|
@ -23,17 +23,22 @@ import org.apache.lucene.analysis.Tokenizer;
|
||||||
import org.apache.lucene.analysis.ja.JapaneseTokenizer;
|
import org.apache.lucene.analysis.ja.JapaneseTokenizer;
|
||||||
import org.apache.lucene.analysis.ja.JapaneseTokenizer.Mode;
|
import org.apache.lucene.analysis.ja.JapaneseTokenizer.Mode;
|
||||||
import org.apache.lucene.analysis.ja.dict.UserDictionary;
|
import org.apache.lucene.analysis.ja.dict.UserDictionary;
|
||||||
|
import org.apache.lucene.analysis.ja.util.CSVUtil;
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.env.Environment;
|
import org.elasticsearch.env.Environment;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
import org.elasticsearch.index.IndexSettings;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.Reader;
|
import java.io.StringReader;
|
||||||
|
import java.util.HashSet;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
public class KuromojiTokenizerFactory extends AbstractTokenizerFactory {
|
public class KuromojiTokenizerFactory extends AbstractTokenizerFactory {
|
||||||
|
|
||||||
private static final String USER_DICT_OPTION = "user_dictionary";
|
private static final String USER_DICT_PATH_OPTION = "user_dictionary";
|
||||||
|
private static final String USER_DICT_RULES_OPTION = "user_dictionary_rules";
|
||||||
private static final String NBEST_COST = "nbest_cost";
|
private static final String NBEST_COST = "nbest_cost";
|
||||||
private static final String NBEST_EXAMPLES = "nbest_examples";
|
private static final String NBEST_EXAMPLES = "nbest_examples";
|
||||||
|
|
||||||
|
@ -54,17 +59,33 @@ public class KuromojiTokenizerFactory extends AbstractTokenizerFactory {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static UserDictionary getUserDictionary(Environment env, Settings settings) {
|
public static UserDictionary getUserDictionary(Environment env, Settings settings) {
|
||||||
|
if (settings.get(USER_DICT_PATH_OPTION) != null && settings.get(USER_DICT_RULES_OPTION) != null) {
|
||||||
|
throw new IllegalArgumentException("It is not allowed to use [" + USER_DICT_PATH_OPTION + "] in conjunction" +
|
||||||
|
" with [" + USER_DICT_RULES_OPTION + "]");
|
||||||
|
}
|
||||||
try {
|
try {
|
||||||
final Reader reader = Analysis.getReaderFromFile(env, settings, USER_DICT_OPTION);
|
List<String> ruleList = Analysis.getWordList(env, settings, USER_DICT_PATH_OPTION, USER_DICT_RULES_OPTION, false);
|
||||||
if (reader == null) {
|
if (ruleList == null || ruleList.isEmpty()) {
|
||||||
return null;
|
return null;
|
||||||
} else {
|
}
|
||||||
try {
|
Set<String> dup = new HashSet<>();
|
||||||
return UserDictionary.open(reader);
|
int lineNum = 0;
|
||||||
} finally {
|
for (String line : ruleList) {
|
||||||
reader.close();
|
// ignore comments
|
||||||
|
if (line.startsWith("#") == false) {
|
||||||
|
String[] values = CSVUtil.parse(line);
|
||||||
|
if (dup.add(values[0]) == false) {
|
||||||
|
throw new IllegalArgumentException("Found duplicate term [" + values[0] + "] in user dictionary " +
|
||||||
|
"at line [" + lineNum + "]");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
++ lineNum;
|
||||||
|
}
|
||||||
|
StringBuilder sb = new StringBuilder();
|
||||||
|
for (String line : ruleList) {
|
||||||
|
sb.append(line).append(System.lineSeparator());
|
||||||
|
}
|
||||||
|
return UserDictionary.open(new StringReader(sb.toString()));
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new ElasticsearchException("failed to load kuromoji user dictionary", e);
|
throw new ElasticsearchException("failed to load kuromoji user dictionary", e);
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
|
|
||||||
package org.elasticsearch.index.analysis;
|
package org.elasticsearch.index.analysis;
|
||||||
|
|
||||||
|
import org.apache.lucene.analysis.Analyzer;
|
||||||
import org.apache.lucene.analysis.TokenStream;
|
import org.apache.lucene.analysis.TokenStream;
|
||||||
import org.apache.lucene.analysis.Tokenizer;
|
import org.apache.lucene.analysis.Tokenizer;
|
||||||
import org.apache.lucene.analysis.ja.JapaneseAnalyzer;
|
import org.apache.lucene.analysis.ja.JapaneseAnalyzer;
|
||||||
|
@ -39,6 +40,8 @@ import java.io.StringReader;
|
||||||
import java.nio.file.Files;
|
import java.nio.file.Files;
|
||||||
import java.nio.file.Path;
|
import java.nio.file.Path;
|
||||||
|
|
||||||
|
import static org.apache.lucene.analysis.BaseTokenStreamTestCase.assertTokenStreamContents;
|
||||||
|
import static org.hamcrest.CoreMatchers.containsString;
|
||||||
import static org.hamcrest.Matchers.equalTo;
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
import static org.hamcrest.Matchers.greaterThan;
|
import static org.hamcrest.Matchers.greaterThan;
|
||||||
import static org.hamcrest.Matchers.instanceOf;
|
import static org.hamcrest.Matchers.instanceOf;
|
||||||
|
@ -307,4 +310,55 @@ public class KuromojiAnalysisTests extends ESTestCase {
|
||||||
tokenizer.setReader(new StringReader(source));
|
tokenizer.setReader(new StringReader(source));
|
||||||
assertSimpleTSOutput(tokenFilter.create(tokenizer), expected);
|
assertSimpleTSOutput(tokenFilter.create(tokenizer), expected);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testKuromojiAnalyzerUserDict() throws Exception {
|
||||||
|
Settings settings = Settings.builder()
|
||||||
|
.put("index.analysis.analyzer.my_analyzer.type", "kuromoji")
|
||||||
|
.putList("index.analysis.analyzer.my_analyzer.user_dictionary_rules", "c++,c++,w,w", "制限スピード,制限スピード,セイゲンスピード,テスト名詞")
|
||||||
|
.build();
|
||||||
|
TestAnalysis analysis = createTestAnalysis(settings);
|
||||||
|
Analyzer analyzer = analysis.indexAnalyzers.get("my_analyzer");
|
||||||
|
try (TokenStream stream = analyzer.tokenStream("", "制限スピード")) {
|
||||||
|
assertTokenStreamContents(stream, new String[]{"制限スピード"});
|
||||||
|
}
|
||||||
|
|
||||||
|
try (TokenStream stream = analyzer.tokenStream("", "c++world")) {
|
||||||
|
assertTokenStreamContents(stream, new String[]{"c++", "world"});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testKuromojiAnalyzerInvalidUserDictOption() throws Exception {
|
||||||
|
Settings settings = Settings.builder()
|
||||||
|
.put("index.analysis.analyzer.my_analyzer.type", "kuromoji")
|
||||||
|
.put("index.analysis.analyzer.my_analyzer.user_dictionary", "user_dict.txt")
|
||||||
|
.putList("index.analysis.analyzer.my_analyzer.user_dictionary_rules", "c++,c++,w,w")
|
||||||
|
.build();
|
||||||
|
IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> createTestAnalysis(settings));
|
||||||
|
assertThat(exc.getMessage(), containsString("It is not allowed to use [user_dictionary] in conjunction " +
|
||||||
|
"with [user_dictionary_rules]"));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testKuromojiAnalyzerDuplicateUserDictRule() throws Exception {
|
||||||
|
Settings settings = Settings.builder()
|
||||||
|
.put("index.analysis.analyzer.my_analyzer.type", "kuromoji")
|
||||||
|
.putList("index.analysis.analyzer.my_analyzer.user_dictionary_rules",
|
||||||
|
"c++,c++,w,w", "#comment", "制限スピード,制限スピード,セイゲンスピード,テスト名詞", "制限スピード,制限スピード,セイゲンスピード,テスト名詞")
|
||||||
|
.build();
|
||||||
|
IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> createTestAnalysis(settings));
|
||||||
|
assertThat(exc.getMessage(), containsString("[制限スピード] in user dictionary at line [3]"));
|
||||||
|
}
|
||||||
|
|
||||||
|
private TestAnalysis createTestAnalysis(Settings analysisSettings) throws IOException {
|
||||||
|
InputStream dict = KuromojiAnalysisTests.class.getResourceAsStream("user_dict.txt");
|
||||||
|
Path home = createTempDir();
|
||||||
|
Path config = home.resolve("config");
|
||||||
|
Files.createDirectory(config);
|
||||||
|
Files.copy(dict, config.resolve("user_dict.txt"));
|
||||||
|
Settings settings = Settings.builder()
|
||||||
|
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||||
|
.put(Environment.PATH_HOME_SETTING.getKey(), home)
|
||||||
|
.put(analysisSettings)
|
||||||
|
.build();
|
||||||
|
return AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new AnalysisKuromojiPlugin());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -51,7 +51,7 @@ public class NoriTokenizerFactory extends AbstractTokenizerFactory {
|
||||||
throw new IllegalArgumentException("It is not allowed to use [" + USER_DICT_PATH_OPTION + "] in conjunction" +
|
throw new IllegalArgumentException("It is not allowed to use [" + USER_DICT_PATH_OPTION + "] in conjunction" +
|
||||||
" with [" + USER_DICT_RULES_OPTION + "]");
|
" with [" + USER_DICT_RULES_OPTION + "]");
|
||||||
}
|
}
|
||||||
List<String> ruleList = Analysis.getWordList(env, settings, USER_DICT_PATH_OPTION, USER_DICT_RULES_OPTION);
|
List<String> ruleList = Analysis.getWordList(env, settings, USER_DICT_PATH_OPTION, USER_DICT_RULES_OPTION, true);
|
||||||
StringBuilder sb = new StringBuilder();
|
StringBuilder sb = new StringBuilder();
|
||||||
if (ruleList == null || ruleList.isEmpty()) {
|
if (ruleList == null || ruleList.isEmpty()) {
|
||||||
return null;
|
return null;
|
||||||
|
|
|
@ -31,6 +31,7 @@ import org.elasticsearch.common.Nullable;
|
||||||
import org.elasticsearch.common.blobstore.BlobContainer;
|
import org.elasticsearch.common.blobstore.BlobContainer;
|
||||||
import org.elasticsearch.common.blobstore.BlobMetaData;
|
import org.elasticsearch.common.blobstore.BlobMetaData;
|
||||||
import org.elasticsearch.common.blobstore.BlobPath;
|
import org.elasticsearch.common.blobstore.BlobPath;
|
||||||
|
import org.elasticsearch.common.blobstore.DeleteResult;
|
||||||
import org.elasticsearch.common.blobstore.support.AbstractBlobContainer;
|
import org.elasticsearch.common.blobstore.support.AbstractBlobContainer;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
|
|
||||||
|
@ -126,9 +127,9 @@ public class AzureBlobContainer extends AbstractBlobContainer {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void delete() throws IOException {
|
public DeleteResult delete() throws IOException {
|
||||||
try {
|
try {
|
||||||
blobStore.deleteBlobDirectory(keyPath, threadPool.executor(AzureRepositoryPlugin.REPOSITORY_THREAD_POOL_NAME));
|
return blobStore.deleteBlobDirectory(keyPath, threadPool.executor(AzureRepositoryPlugin.REPOSITORY_THREAD_POOL_NAME));
|
||||||
} catch (URISyntaxException | StorageException e) {
|
} catch (URISyntaxException | StorageException e) {
|
||||||
throw new IOException(e);
|
throw new IOException(e);
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,12 +21,12 @@ package org.elasticsearch.repositories.azure;
|
||||||
|
|
||||||
import com.microsoft.azure.storage.LocationMode;
|
import com.microsoft.azure.storage.LocationMode;
|
||||||
import com.microsoft.azure.storage.StorageException;
|
import com.microsoft.azure.storage.StorageException;
|
||||||
|
|
||||||
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
|
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
|
||||||
import org.elasticsearch.common.blobstore.BlobContainer;
|
import org.elasticsearch.common.blobstore.BlobContainer;
|
||||||
import org.elasticsearch.common.blobstore.BlobMetaData;
|
import org.elasticsearch.common.blobstore.BlobMetaData;
|
||||||
import org.elasticsearch.common.blobstore.BlobPath;
|
import org.elasticsearch.common.blobstore.BlobPath;
|
||||||
import org.elasticsearch.common.blobstore.BlobStore;
|
import org.elasticsearch.common.blobstore.BlobStore;
|
||||||
|
import org.elasticsearch.common.blobstore.DeleteResult;
|
||||||
import org.elasticsearch.repositories.azure.AzureRepository.Repository;
|
import org.elasticsearch.repositories.azure.AzureRepository.Repository;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
|
|
||||||
|
@ -92,8 +92,9 @@ public class AzureBlobStore implements BlobStore {
|
||||||
service.deleteBlob(clientName, container, blob);
|
service.deleteBlob(clientName, container, blob);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void deleteBlobDirectory(String path, Executor executor) throws URISyntaxException, StorageException, IOException {
|
public DeleteResult deleteBlobDirectory(String path, Executor executor)
|
||||||
service.deleteBlobDirectory(clientName, container, path, executor);
|
throws URISyntaxException, StorageException, IOException {
|
||||||
|
return service.deleteBlobDirectory(clientName, container, path, executor);
|
||||||
}
|
}
|
||||||
|
|
||||||
public InputStream getInputStream(String blob) throws URISyntaxException, StorageException, IOException {
|
public InputStream getInputStream(String blob) throws URISyntaxException, StorageException, IOException {
|
||||||
|
|
|
@ -43,6 +43,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.elasticsearch.action.support.PlainActionFuture;
|
import org.elasticsearch.action.support.PlainActionFuture;
|
||||||
import org.elasticsearch.common.blobstore.BlobMetaData;
|
import org.elasticsearch.common.blobstore.BlobMetaData;
|
||||||
import org.elasticsearch.common.blobstore.BlobPath;
|
import org.elasticsearch.common.blobstore.BlobPath;
|
||||||
|
import org.elasticsearch.common.blobstore.DeleteResult;
|
||||||
import org.elasticsearch.common.blobstore.support.PlainBlobMetaData;
|
import org.elasticsearch.common.blobstore.support.PlainBlobMetaData;
|
||||||
import org.elasticsearch.common.collect.MapBuilder;
|
import org.elasticsearch.common.collect.MapBuilder;
|
||||||
import org.elasticsearch.common.collect.Tuple;
|
import org.elasticsearch.common.collect.Tuple;
|
||||||
|
@ -193,13 +194,15 @@ public class AzureStorageService {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
void deleteBlobDirectory(String account, String container, String path, Executor executor)
|
DeleteResult deleteBlobDirectory(String account, String container, String path, Executor executor)
|
||||||
throws URISyntaxException, StorageException, IOException {
|
throws URISyntaxException, StorageException, IOException {
|
||||||
final Tuple<CloudBlobClient, Supplier<OperationContext>> client = client(account);
|
final Tuple<CloudBlobClient, Supplier<OperationContext>> client = client(account);
|
||||||
final CloudBlobContainer blobContainer = client.v1().getContainerReference(container);
|
final CloudBlobContainer blobContainer = client.v1().getContainerReference(container);
|
||||||
final Collection<Exception> exceptions = Collections.synchronizedList(new ArrayList<>());
|
final Collection<Exception> exceptions = Collections.synchronizedList(new ArrayList<>());
|
||||||
final AtomicLong outstanding = new AtomicLong(1L);
|
final AtomicLong outstanding = new AtomicLong(1L);
|
||||||
final PlainActionFuture<Void> result = PlainActionFuture.newFuture();
|
final PlainActionFuture<Void> result = PlainActionFuture.newFuture();
|
||||||
|
final AtomicLong blobsDeleted = new AtomicLong();
|
||||||
|
final AtomicLong bytesDeleted = new AtomicLong();
|
||||||
SocketAccess.doPrivilegedVoidException(() -> {
|
SocketAccess.doPrivilegedVoidException(() -> {
|
||||||
for (final ListBlobItem blobItem : blobContainer.listBlobs(path, true)) {
|
for (final ListBlobItem blobItem : blobContainer.listBlobs(path, true)) {
|
||||||
// uri.getPath is of the form /container/keyPath.* and we want to strip off the /container/
|
// uri.getPath is of the form /container/keyPath.* and we want to strip off the /container/
|
||||||
|
@ -209,7 +212,17 @@ public class AzureStorageService {
|
||||||
executor.execute(new AbstractRunnable() {
|
executor.execute(new AbstractRunnable() {
|
||||||
@Override
|
@Override
|
||||||
protected void doRun() throws Exception {
|
protected void doRun() throws Exception {
|
||||||
|
final long len;
|
||||||
|
if (blobItem instanceof CloudBlob) {
|
||||||
|
len = ((CloudBlob) blobItem).getProperties().getLength();
|
||||||
|
} else {
|
||||||
|
len = -1L;
|
||||||
|
}
|
||||||
deleteBlob(account, container, blobPath);
|
deleteBlob(account, container, blobPath);
|
||||||
|
blobsDeleted.incrementAndGet();
|
||||||
|
if (len >= 0) {
|
||||||
|
bytesDeleted.addAndGet(len);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -235,6 +248,7 @@ public class AzureStorageService {
|
||||||
exceptions.forEach(ex::addSuppressed);
|
exceptions.forEach(ex::addSuppressed);
|
||||||
throw ex;
|
throw ex;
|
||||||
}
|
}
|
||||||
|
return new DeleteResult(blobsDeleted.get(), bytesDeleted.get());
|
||||||
}
|
}
|
||||||
|
|
||||||
public InputStream getInputStream(String account, String container, String blob)
|
public InputStream getInputStream(String account, String container, String blob)
|
||||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.repositories.gcs;
|
||||||
import org.elasticsearch.common.blobstore.BlobContainer;
|
import org.elasticsearch.common.blobstore.BlobContainer;
|
||||||
import org.elasticsearch.common.blobstore.BlobMetaData;
|
import org.elasticsearch.common.blobstore.BlobMetaData;
|
||||||
import org.elasticsearch.common.blobstore.BlobPath;
|
import org.elasticsearch.common.blobstore.BlobPath;
|
||||||
|
import org.elasticsearch.common.blobstore.DeleteResult;
|
||||||
import org.elasticsearch.common.blobstore.support.AbstractBlobContainer;
|
import org.elasticsearch.common.blobstore.support.AbstractBlobContainer;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
@ -77,8 +78,8 @@ class GoogleCloudStorageBlobContainer extends AbstractBlobContainer {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void delete() throws IOException {
|
public DeleteResult delete() throws IOException {
|
||||||
blobStore.deleteDirectory(path().buildAsString());
|
return blobStore.deleteDirectory(path().buildAsString());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -37,6 +37,7 @@ import org.elasticsearch.common.blobstore.BlobMetaData;
|
||||||
import org.elasticsearch.common.blobstore.BlobPath;
|
import org.elasticsearch.common.blobstore.BlobPath;
|
||||||
import org.elasticsearch.common.blobstore.BlobStore;
|
import org.elasticsearch.common.blobstore.BlobStore;
|
||||||
import org.elasticsearch.common.blobstore.BlobStoreException;
|
import org.elasticsearch.common.blobstore.BlobStoreException;
|
||||||
|
import org.elasticsearch.common.blobstore.DeleteResult;
|
||||||
import org.elasticsearch.common.blobstore.support.PlainBlobMetaData;
|
import org.elasticsearch.common.blobstore.support.PlainBlobMetaData;
|
||||||
import org.elasticsearch.common.collect.MapBuilder;
|
import org.elasticsearch.common.collect.MapBuilder;
|
||||||
import org.elasticsearch.core.internal.io.Streams;
|
import org.elasticsearch.core.internal.io.Streams;
|
||||||
|
@ -55,6 +56,7 @@ import java.util.Collection;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.concurrent.atomic.AtomicLong;
|
||||||
import java.util.concurrent.atomic.AtomicReference;
|
import java.util.concurrent.atomic.AtomicReference;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
|
@ -300,15 +302,24 @@ class GoogleCloudStorageBlobStore implements BlobStore {
|
||||||
*
|
*
|
||||||
* @param pathStr Name of path to delete
|
* @param pathStr Name of path to delete
|
||||||
*/
|
*/
|
||||||
void deleteDirectory(String pathStr) throws IOException {
|
DeleteResult deleteDirectory(String pathStr) throws IOException {
|
||||||
SocketAccess.doPrivilegedVoidIOException(() -> {
|
return SocketAccess.doPrivilegedIOException(() -> {
|
||||||
|
DeleteResult deleteResult = DeleteResult.ZERO;
|
||||||
Page<Blob> page = client().get(bucketName).list(BlobListOption.prefix(pathStr));
|
Page<Blob> page = client().get(bucketName).list(BlobListOption.prefix(pathStr));
|
||||||
do {
|
do {
|
||||||
final Collection<String> blobsToDelete = new ArrayList<>();
|
final Collection<String> blobsToDelete = new ArrayList<>();
|
||||||
page.getValues().forEach(b -> blobsToDelete.add(b.getName()));
|
final AtomicLong blobsDeleted = new AtomicLong(0L);
|
||||||
|
final AtomicLong bytesDeleted = new AtomicLong(0L);
|
||||||
|
page.getValues().forEach(b -> {
|
||||||
|
blobsToDelete.add(b.getName());
|
||||||
|
blobsDeleted.incrementAndGet();
|
||||||
|
bytesDeleted.addAndGet(b.getSize());
|
||||||
|
});
|
||||||
deleteBlobsIgnoringIfNotExists(blobsToDelete);
|
deleteBlobsIgnoringIfNotExists(blobsToDelete);
|
||||||
|
deleteResult = deleteResult.add(blobsDeleted.get(), bytesDeleted.get());
|
||||||
page = page.getNextPage();
|
page = page.getNextPage();
|
||||||
} while (page != null);
|
} while (page != null);
|
||||||
|
return deleteResult;
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.common.Nullable;
|
||||||
import org.elasticsearch.common.blobstore.BlobContainer;
|
import org.elasticsearch.common.blobstore.BlobContainer;
|
||||||
import org.elasticsearch.common.blobstore.BlobMetaData;
|
import org.elasticsearch.common.blobstore.BlobMetaData;
|
||||||
import org.elasticsearch.common.blobstore.BlobPath;
|
import org.elasticsearch.common.blobstore.BlobPath;
|
||||||
|
import org.elasticsearch.common.blobstore.DeleteResult;
|
||||||
import org.elasticsearch.common.blobstore.fs.FsBlobContainer;
|
import org.elasticsearch.common.blobstore.fs.FsBlobContainer;
|
||||||
import org.elasticsearch.common.blobstore.support.AbstractBlobContainer;
|
import org.elasticsearch.common.blobstore.support.AbstractBlobContainer;
|
||||||
import org.elasticsearch.common.blobstore.support.PlainBlobMetaData;
|
import org.elasticsearch.common.blobstore.support.PlainBlobMetaData;
|
||||||
|
@ -69,9 +70,13 @@ final class HdfsBlobContainer extends AbstractBlobContainer {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: See if we can get precise result reporting.
|
||||||
|
private static final DeleteResult DELETE_RESULT = new DeleteResult(1L, 0L);
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void delete() throws IOException {
|
public DeleteResult delete() throws IOException {
|
||||||
store.execute(fileContext -> fileContext.delete(path, true));
|
store.execute(fileContext -> fileContext.delete(path, true));
|
||||||
|
return DELETE_RESULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
package org.elasticsearch.repositories.hdfs;
|
package org.elasticsearch.repositories.hdfs;
|
||||||
|
|
||||||
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
|
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
|
||||||
|
import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryResponse;
|
||||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||||
import org.elasticsearch.bootstrap.JavaVersion;
|
import org.elasticsearch.bootstrap.JavaVersion;
|
||||||
import org.elasticsearch.common.settings.MockSecureSettings;
|
import org.elasticsearch.common.settings.MockSecureSettings;
|
||||||
|
@ -30,6 +31,7 @@ import org.elasticsearch.repositories.AbstractThirdPartyRepositoryTestCase;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
|
|
||||||
import static org.hamcrest.Matchers.equalTo;
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
|
import static org.hamcrest.Matchers.greaterThan;
|
||||||
|
|
||||||
@ThreadLeakFilters(filters = HdfsClientThreadLeakFilter.class)
|
@ThreadLeakFilters(filters = HdfsClientThreadLeakFilter.class)
|
||||||
public class HdfsRepositoryTests extends AbstractThirdPartyRepositoryTestCase {
|
public class HdfsRepositoryTests extends AbstractThirdPartyRepositoryTestCase {
|
||||||
|
@ -58,4 +60,14 @@ public class HdfsRepositoryTests extends AbstractThirdPartyRepositoryTestCase {
|
||||||
).get();
|
).get();
|
||||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HDFS repository doesn't have precise cleanup stats so we only check whether or not any blobs were removed
|
||||||
|
@Override
|
||||||
|
protected void assertCleanupResponse(CleanupRepositoryResponse response, long bytes, long blobs) {
|
||||||
|
if (blobs > 0) {
|
||||||
|
assertThat(response.result().blobs(), greaterThan(0L));
|
||||||
|
} else {
|
||||||
|
assertThat(response.result().blobs(), equalTo(0L));
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,7 +32,6 @@ import com.amazonaws.services.s3.model.ObjectMetadata;
|
||||||
import com.amazonaws.services.s3.model.PartETag;
|
import com.amazonaws.services.s3.model.PartETag;
|
||||||
import com.amazonaws.services.s3.model.PutObjectRequest;
|
import com.amazonaws.services.s3.model.PutObjectRequest;
|
||||||
import com.amazonaws.services.s3.model.S3Object;
|
import com.amazonaws.services.s3.model.S3Object;
|
||||||
import com.amazonaws.services.s3.model.S3ObjectSummary;
|
|
||||||
import com.amazonaws.services.s3.model.UploadPartRequest;
|
import com.amazonaws.services.s3.model.UploadPartRequest;
|
||||||
import com.amazonaws.services.s3.model.UploadPartResult;
|
import com.amazonaws.services.s3.model.UploadPartResult;
|
||||||
import org.apache.lucene.util.SetOnce;
|
import org.apache.lucene.util.SetOnce;
|
||||||
|
@ -42,6 +41,7 @@ import org.elasticsearch.common.Strings;
|
||||||
import org.elasticsearch.common.blobstore.BlobContainer;
|
import org.elasticsearch.common.blobstore.BlobContainer;
|
||||||
import org.elasticsearch.common.blobstore.BlobMetaData;
|
import org.elasticsearch.common.blobstore.BlobMetaData;
|
||||||
import org.elasticsearch.common.blobstore.BlobPath;
|
import org.elasticsearch.common.blobstore.BlobPath;
|
||||||
|
import org.elasticsearch.common.blobstore.DeleteResult;
|
||||||
import org.elasticsearch.common.blobstore.support.AbstractBlobContainer;
|
import org.elasticsearch.common.blobstore.support.AbstractBlobContainer;
|
||||||
import org.elasticsearch.common.blobstore.support.PlainBlobMetaData;
|
import org.elasticsearch.common.blobstore.support.PlainBlobMetaData;
|
||||||
import org.elasticsearch.common.collect.Tuple;
|
import org.elasticsearch.common.collect.Tuple;
|
||||||
|
@ -54,6 +54,7 @@ import java.util.HashSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
import java.util.concurrent.atomic.AtomicLong;
|
||||||
import java.util.function.Function;
|
import java.util.function.Function;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
|
@ -121,7 +122,9 @@ class S3BlobContainer extends AbstractBlobContainer {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void delete() throws IOException {
|
public DeleteResult delete() throws IOException {
|
||||||
|
final AtomicLong deletedBlobs = new AtomicLong();
|
||||||
|
final AtomicLong deletedBytes = new AtomicLong();
|
||||||
try (AmazonS3Reference clientReference = blobStore.clientReference()) {
|
try (AmazonS3Reference clientReference = blobStore.clientReference()) {
|
||||||
ObjectListing prevListing = null;
|
ObjectListing prevListing = null;
|
||||||
while (true) {
|
while (true) {
|
||||||
|
@ -135,8 +138,12 @@ class S3BlobContainer extends AbstractBlobContainer {
|
||||||
listObjectsRequest.setPrefix(keyPath);
|
listObjectsRequest.setPrefix(keyPath);
|
||||||
list = SocketAccess.doPrivileged(() -> clientReference.client().listObjects(listObjectsRequest));
|
list = SocketAccess.doPrivileged(() -> clientReference.client().listObjects(listObjectsRequest));
|
||||||
}
|
}
|
||||||
final List<String> blobsToDelete =
|
final List<String> blobsToDelete = new ArrayList<>();
|
||||||
list.getObjectSummaries().stream().map(S3ObjectSummary::getKey).collect(Collectors.toList());
|
list.getObjectSummaries().forEach(s3ObjectSummary -> {
|
||||||
|
deletedBlobs.incrementAndGet();
|
||||||
|
deletedBytes.addAndGet(s3ObjectSummary.getSize());
|
||||||
|
blobsToDelete.add(s3ObjectSummary.getKey());
|
||||||
|
});
|
||||||
if (list.isTruncated()) {
|
if (list.isTruncated()) {
|
||||||
doDeleteBlobs(blobsToDelete, false);
|
doDeleteBlobs(blobsToDelete, false);
|
||||||
prevListing = list;
|
prevListing = list;
|
||||||
|
@ -150,6 +157,7 @@ class S3BlobContainer extends AbstractBlobContainer {
|
||||||
} catch (final AmazonClientException e) {
|
} catch (final AmazonClientException e) {
|
||||||
throw new IOException("Exception when deleting blob container [" + keyPath + "]", e);
|
throw new IOException("Exception when deleting blob container [" + keyPath + "]", e);
|
||||||
}
|
}
|
||||||
|
return new DeleteResult(deletedBlobs.get(), deletedBytes.get());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -0,0 +1,385 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
package org.elasticsearch.repositories.s3;
|
||||||
|
|
||||||
|
import com.amazonaws.SdkClientException;
|
||||||
|
import com.amazonaws.services.s3.internal.MD5DigestCalculatingInputStream;
|
||||||
|
import com.amazonaws.util.Base16;
|
||||||
|
import com.sun.net.httpserver.HttpServer;
|
||||||
|
import org.apache.http.HttpStatus;
|
||||||
|
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
|
||||||
|
import org.elasticsearch.common.Nullable;
|
||||||
|
import org.elasticsearch.common.SuppressForbidden;
|
||||||
|
import org.elasticsearch.common.blobstore.BlobContainer;
|
||||||
|
import org.elasticsearch.common.blobstore.BlobPath;
|
||||||
|
import org.elasticsearch.common.bytes.BytesReference;
|
||||||
|
import org.elasticsearch.common.io.Streams;
|
||||||
|
import org.elasticsearch.common.lucene.store.ByteArrayIndexInput;
|
||||||
|
import org.elasticsearch.common.lucene.store.InputStreamIndexInput;
|
||||||
|
import org.elasticsearch.common.settings.MockSecureSettings;
|
||||||
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||||
|
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||||
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
|
import org.elasticsearch.common.util.concurrent.CountDown;
|
||||||
|
import org.elasticsearch.core.internal.io.IOUtils;
|
||||||
|
import org.elasticsearch.mocksocket.MockHttpServer;
|
||||||
|
import org.elasticsearch.test.ESTestCase;
|
||||||
|
import org.junit.After;
|
||||||
|
import org.junit.Before;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
import java.net.Inet6Address;
|
||||||
|
import java.net.InetAddress;
|
||||||
|
import java.net.InetSocketAddress;
|
||||||
|
import java.net.SocketTimeoutException;
|
||||||
|
import java.nio.charset.StandardCharsets;
|
||||||
|
import java.util.Locale;
|
||||||
|
import java.util.Objects;
|
||||||
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
|
import java.util.concurrent.atomic.AtomicLong;
|
||||||
|
|
||||||
|
import static org.elasticsearch.repositories.s3.S3ClientSettings.DISABLE_CHUNKED_ENCODING;
|
||||||
|
import static org.elasticsearch.repositories.s3.S3ClientSettings.ENDPOINT_SETTING;
|
||||||
|
import static org.elasticsearch.repositories.s3.S3ClientSettings.MAX_RETRIES_SETTING;
|
||||||
|
import static org.elasticsearch.repositories.s3.S3ClientSettings.READ_TIMEOUT_SETTING;
|
||||||
|
import static org.hamcrest.Matchers.anyOf;
|
||||||
|
import static org.hamcrest.Matchers.containsString;
|
||||||
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
|
import static org.hamcrest.Matchers.instanceOf;
|
||||||
|
import static org.hamcrest.Matchers.is;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This class tests how a {@link S3BlobContainer} and its underlying AWS S3 client are retrying requests when reading or writing blobs.
|
||||||
|
*/
|
||||||
|
@SuppressForbidden(reason = "use a http server")
|
||||||
|
public class S3BlobContainerRetriesTests extends ESTestCase {
|
||||||
|
|
||||||
|
private HttpServer httpServer;
|
||||||
|
private S3Service service;
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void setUp() throws Exception {
|
||||||
|
service = new S3Service();
|
||||||
|
httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0);
|
||||||
|
httpServer.start();
|
||||||
|
super.setUp();
|
||||||
|
}
|
||||||
|
|
||||||
|
@After
|
||||||
|
public void tearDown() throws Exception {
|
||||||
|
IOUtils.close(service);
|
||||||
|
httpServer.stop(0);
|
||||||
|
super.tearDown();
|
||||||
|
}
|
||||||
|
|
||||||
|
private BlobContainer createBlobContainer(final @Nullable Integer maxRetries,
|
||||||
|
final @Nullable TimeValue readTimeout,
|
||||||
|
final @Nullable Boolean disableChunkedEncoding,
|
||||||
|
final @Nullable ByteSizeValue bufferSize) {
|
||||||
|
final Settings.Builder clientSettings = Settings.builder();
|
||||||
|
final String clientName = randomAlphaOfLength(5).toLowerCase(Locale.ROOT);
|
||||||
|
|
||||||
|
final String endpoint;
|
||||||
|
if (httpServer.getAddress().getAddress() instanceof Inet6Address) {
|
||||||
|
endpoint = "http://[" + httpServer.getAddress().getHostString() + "]:" + httpServer.getAddress().getPort();
|
||||||
|
} else {
|
||||||
|
endpoint = "http://" + httpServer.getAddress().getHostString() + ":" + httpServer.getAddress().getPort();
|
||||||
|
}
|
||||||
|
clientSettings.put(ENDPOINT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), endpoint);
|
||||||
|
if (maxRetries != null) {
|
||||||
|
clientSettings.put(MAX_RETRIES_SETTING.getConcreteSettingForNamespace(clientName).getKey(), maxRetries);
|
||||||
|
}
|
||||||
|
if (readTimeout != null) {
|
||||||
|
clientSettings.put(READ_TIMEOUT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), readTimeout);
|
||||||
|
}
|
||||||
|
if (disableChunkedEncoding != null) {
|
||||||
|
clientSettings.put(DISABLE_CHUNKED_ENCODING.getConcreteSettingForNamespace(clientName).getKey(), disableChunkedEncoding);
|
||||||
|
}
|
||||||
|
|
||||||
|
final MockSecureSettings secureSettings = new MockSecureSettings();
|
||||||
|
secureSettings.setString(S3ClientSettings.ACCESS_KEY_SETTING.getConcreteSettingForNamespace(clientName).getKey(), "access");
|
||||||
|
secureSettings.setString(S3ClientSettings.SECRET_KEY_SETTING.getConcreteSettingForNamespace(clientName).getKey(), "secret");
|
||||||
|
clientSettings.setSecureSettings(secureSettings);
|
||||||
|
service.refreshAndClearCache(S3ClientSettings.load(clientSettings.build()));
|
||||||
|
|
||||||
|
final RepositoryMetaData repositoryMetaData = new RepositoryMetaData("repository", S3Repository.TYPE,
|
||||||
|
Settings.builder().put(S3Repository.CLIENT_NAME.getKey(), clientName).build());
|
||||||
|
|
||||||
|
return new S3BlobContainer(BlobPath.cleanPath(), new S3BlobStore(service, "bucket",
|
||||||
|
S3Repository.SERVER_SIDE_ENCRYPTION_SETTING.getDefault(Settings.EMPTY),
|
||||||
|
bufferSize == null ? S3Repository.BUFFER_SIZE_SETTING.getDefault(Settings.EMPTY) : bufferSize,
|
||||||
|
S3Repository.CANNED_ACL_SETTING.getDefault(Settings.EMPTY),
|
||||||
|
S3Repository.STORAGE_CLASS_SETTING.getDefault(Settings.EMPTY),
|
||||||
|
repositoryMetaData));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testReadBlobWithRetries() throws Exception {
|
||||||
|
final int maxRetries = randomInt(5);
|
||||||
|
final CountDown countDown = new CountDown(maxRetries + 1);
|
||||||
|
|
||||||
|
final byte[] bytes = randomByteArrayOfLength(randomIntBetween(1, 512));
|
||||||
|
httpServer.createContext("/bucket/read_blob_max_retries", exchange -> {
|
||||||
|
Streams.readFully(exchange.getRequestBody());
|
||||||
|
if (countDown.countDown()) {
|
||||||
|
exchange.getResponseHeaders().add("Content-Type", "text/plain; charset=utf-8");
|
||||||
|
exchange.sendResponseHeaders(HttpStatus.SC_OK, bytes.length);
|
||||||
|
exchange.getResponseBody().write(bytes);
|
||||||
|
exchange.close();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
exchange.sendResponseHeaders(randomFrom(HttpStatus.SC_INTERNAL_SERVER_ERROR, HttpStatus.SC_BAD_GATEWAY,
|
||||||
|
HttpStatus.SC_SERVICE_UNAVAILABLE, HttpStatus.SC_GATEWAY_TIMEOUT), -1);
|
||||||
|
exchange.close();
|
||||||
|
});
|
||||||
|
|
||||||
|
final BlobContainer blobContainer = createBlobContainer(maxRetries, null, null, null);
|
||||||
|
try (InputStream inputStream = blobContainer.readBlob("read_blob_max_retries")) {
|
||||||
|
assertArrayEquals(bytes, BytesReference.toBytes(Streams.readFully(inputStream)));
|
||||||
|
assertThat(countDown.isCountedDown(), is(true));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testReadBlobWithReadTimeouts() {
|
||||||
|
final TimeValue readTimeout = TimeValue.timeValueMillis(randomIntBetween(100, 500));
|
||||||
|
final BlobContainer blobContainer = createBlobContainer(1, readTimeout, null, null);
|
||||||
|
|
||||||
|
// HTTP server does not send a response
|
||||||
|
httpServer.createContext("/bucket/read_blob_unresponsive", exchange -> {});
|
||||||
|
|
||||||
|
Exception exception = expectThrows(SdkClientException.class, () -> blobContainer.readBlob("read_blob_unresponsive"));
|
||||||
|
assertThat(exception.getMessage().toLowerCase(Locale.ROOT), containsString("read timed out"));
|
||||||
|
assertThat(exception.getCause(), instanceOf(SocketTimeoutException.class));
|
||||||
|
|
||||||
|
// HTTP server sends a partial response
|
||||||
|
final byte[] bytes = randomByteArrayOfLength(randomIntBetween(10, 128));
|
||||||
|
httpServer.createContext("/bucket/read_blob_incomplete", exchange -> {
|
||||||
|
exchange.getResponseHeaders().add("Content-Type", "text/plain; charset=utf-8");
|
||||||
|
exchange.sendResponseHeaders(HttpStatus.SC_OK, bytes.length);
|
||||||
|
exchange.getResponseBody().write(bytes, 0, randomIntBetween(1, bytes.length - 1));
|
||||||
|
if (randomBoolean()) {
|
||||||
|
exchange.getResponseBody().flush();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
exception = expectThrows(SocketTimeoutException.class, () -> {
|
||||||
|
try (InputStream stream = blobContainer.readBlob("read_blob_incomplete")) {
|
||||||
|
Streams.readFully(stream);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
assertThat(exception.getMessage().toLowerCase(Locale.ROOT), containsString("read timed out"));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testWriteBlobWithRetries() throws Exception {
|
||||||
|
final int maxRetries = randomInt(5);
|
||||||
|
final CountDown countDown = new CountDown(maxRetries + 1);
|
||||||
|
|
||||||
|
final byte[] bytes = randomByteArrayOfLength(randomIntBetween(1, 512));
|
||||||
|
httpServer.createContext("/bucket/write_blob_max_retries", exchange -> {
|
||||||
|
final BytesReference body = Streams.readFully(exchange.getRequestBody());
|
||||||
|
if (countDown.countDown()) {
|
||||||
|
if (Objects.deepEquals(bytes, BytesReference.toBytes(body))) {
|
||||||
|
exchange.sendResponseHeaders(HttpStatus.SC_OK, -1);
|
||||||
|
} else {
|
||||||
|
exchange.sendResponseHeaders(HttpStatus.SC_BAD_REQUEST, -1);
|
||||||
|
}
|
||||||
|
exchange.close();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
exchange.sendResponseHeaders(randomFrom(HttpStatus.SC_INTERNAL_SERVER_ERROR, HttpStatus.SC_BAD_GATEWAY,
|
||||||
|
HttpStatus.SC_SERVICE_UNAVAILABLE, HttpStatus.SC_GATEWAY_TIMEOUT), -1);
|
||||||
|
exchange.close();
|
||||||
|
});
|
||||||
|
|
||||||
|
final BlobContainer blobContainer = createBlobContainer(maxRetries, null, true, null);
|
||||||
|
try (InputStream stream = new InputStreamIndexInput(new ByteArrayIndexInput("desc", bytes), bytes.length)) {
|
||||||
|
blobContainer.writeBlob("write_blob_max_retries", stream, bytes.length, false);
|
||||||
|
}
|
||||||
|
assertThat(countDown.isCountedDown(), is(true));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testWriteBlobWithReadTimeouts() {
|
||||||
|
final TimeValue readTimeout = TimeValue.timeValueMillis(randomIntBetween(100, 500));
|
||||||
|
final BlobContainer blobContainer = createBlobContainer(1, readTimeout, true, null);
|
||||||
|
|
||||||
|
// HTTP server does not send a response
|
||||||
|
httpServer.createContext("/bucket/write_blob_timeout", exchange -> {
|
||||||
|
if (randomBoolean()) {
|
||||||
|
Streams.readFully(exchange.getRequestBody());
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
final byte[] bytes = randomByteArrayOfLength(randomIntBetween(1, 128));
|
||||||
|
Exception exception = expectThrows(IOException.class, () -> {
|
||||||
|
try (InputStream stream = new InputStreamIndexInput(new ByteArrayIndexInput("desc", bytes), bytes.length)) {
|
||||||
|
blobContainer.writeBlob("write_blob_timeout", stream, bytes.length, false);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
assertThat(exception.getMessage().toLowerCase(Locale.ROOT),
|
||||||
|
containsString("unable to upload object [write_blob_timeout] using a single upload"));
|
||||||
|
|
||||||
|
assertThat(exception.getCause(), instanceOf(SdkClientException.class));
|
||||||
|
assertThat(exception.getCause().getMessage().toLowerCase(Locale.ROOT), containsString("read timed out"));
|
||||||
|
|
||||||
|
assertThat(exception.getCause().getCause(), instanceOf(SocketTimeoutException.class));
|
||||||
|
assertThat(exception.getCause().getCause().getMessage().toLowerCase(Locale.ROOT), containsString("read timed out"));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testWriteLargeBlob() throws Exception {
|
||||||
|
final boolean useTimeout = rarely();
|
||||||
|
final TimeValue readTimeout = useTimeout ? TimeValue.timeValueMillis(randomIntBetween(100, 500)) : null;
|
||||||
|
final ByteSizeValue bufferSize = new ByteSizeValue(5, ByteSizeUnit.MB);
|
||||||
|
final BlobContainer blobContainer = createBlobContainer(null, readTimeout, true, bufferSize);
|
||||||
|
|
||||||
|
final int parts = randomIntBetween(1, 2);
|
||||||
|
final long lastPartSize = randomLongBetween(10, 512);
|
||||||
|
final long blobSize = (parts * bufferSize.getBytes()) + lastPartSize;
|
||||||
|
|
||||||
|
final int maxRetries = 2; // we want all requests to fail at least once
|
||||||
|
final CountDown countDownInitiate = new CountDown(maxRetries);
|
||||||
|
final AtomicInteger countDownUploads = new AtomicInteger(maxRetries * (parts + 1));
|
||||||
|
final CountDown countDownComplete = new CountDown(maxRetries);
|
||||||
|
|
||||||
|
httpServer.createContext("/bucket/write_large_blob", exchange -> {
|
||||||
|
if ("POST".equals(exchange.getRequestMethod())
|
||||||
|
&& exchange.getRequestURI().getQuery().equals("uploads")) {
|
||||||
|
// initiate multipart upload request
|
||||||
|
if (countDownInitiate.countDown()) {
|
||||||
|
byte[] response = ("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" +
|
||||||
|
"<InitiateMultipartUploadResult>\n" +
|
||||||
|
" <Bucket>bucket</Bucket>\n" +
|
||||||
|
" <Key>write_large_blob</Key>\n" +
|
||||||
|
" <UploadId>TEST</UploadId>\n" +
|
||||||
|
"</InitiateMultipartUploadResult>").getBytes(StandardCharsets.UTF_8);
|
||||||
|
exchange.getResponseHeaders().add("Content-Type", "application/xml");
|
||||||
|
exchange.sendResponseHeaders(HttpStatus.SC_OK, response.length);
|
||||||
|
exchange.getResponseBody().write(response);
|
||||||
|
exchange.close();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
} else if ("PUT".equals(exchange.getRequestMethod())) {
|
||||||
|
// upload part request
|
||||||
|
MD5DigestCalculatingInputStream md5 = new MD5DigestCalculatingInputStream(exchange.getRequestBody());
|
||||||
|
BytesReference bytes = Streams.readFully(md5);
|
||||||
|
assertThat((long) bytes.length(), anyOf(equalTo(lastPartSize), equalTo(bufferSize.getBytes())));
|
||||||
|
|
||||||
|
if (countDownUploads.decrementAndGet() % 2 == 0) {
|
||||||
|
exchange.getResponseHeaders().add("ETag", Base16.encodeAsString(md5.getMd5Digest()));
|
||||||
|
exchange.sendResponseHeaders(HttpStatus.SC_OK, -1);
|
||||||
|
exchange.close();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
} else if ("POST".equals(exchange.getRequestMethod())
|
||||||
|
&& exchange.getRequestURI().getQuery().equals("uploadId=TEST")) {
|
||||||
|
// complete multipart upload request
|
||||||
|
Streams.readFully(exchange.getRequestBody());
|
||||||
|
if (countDownComplete.countDown()) {
|
||||||
|
byte[] response = ("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" +
|
||||||
|
"<CompleteMultipartUploadResult>\n" +
|
||||||
|
" <Bucket>bucket</Bucket>\n" +
|
||||||
|
" <Key>write_large_blob</Key>\n" +
|
||||||
|
"</CompleteMultipartUploadResult>").getBytes(StandardCharsets.UTF_8);
|
||||||
|
exchange.getResponseHeaders().add("Content-Type", "application/xml");
|
||||||
|
exchange.sendResponseHeaders(HttpStatus.SC_OK, response.length);
|
||||||
|
exchange.getResponseBody().write(response);
|
||||||
|
exchange.close();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// sends an error back or let the request time out
|
||||||
|
if (useTimeout == false) {
|
||||||
|
exchange.sendResponseHeaders(randomFrom(HttpStatus.SC_INTERNAL_SERVER_ERROR, HttpStatus.SC_BAD_GATEWAY,
|
||||||
|
HttpStatus.SC_SERVICE_UNAVAILABLE, HttpStatus.SC_GATEWAY_TIMEOUT), -1);
|
||||||
|
exchange.close();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
blobContainer.writeBlob("write_large_blob", new ZeroInputStream(blobSize), blobSize, false);
|
||||||
|
|
||||||
|
assertThat(countDownInitiate.isCountedDown(), is(true));
|
||||||
|
assertThat(countDownUploads.get(), equalTo(0));
|
||||||
|
assertThat(countDownComplete.isCountedDown(), is(true));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A resettable InputStream that only serves zeros.
|
||||||
|
*
|
||||||
|
* Ideally it should be wrapped into a BufferedInputStream but it seems that the AWS SDK is calling InputStream{@link #reset()}
|
||||||
|
* before calling InputStream{@link #mark(int)}, which is not permitted by the {@link #reset()} method contract.
|
||||||
|
**/
|
||||||
|
private static class ZeroInputStream extends InputStream {
|
||||||
|
|
||||||
|
private final AtomicBoolean closed = new AtomicBoolean(false);
|
||||||
|
private final long length;
|
||||||
|
private final AtomicLong reads;
|
||||||
|
private volatile long mark;
|
||||||
|
|
||||||
|
private ZeroInputStream(final long length) {
|
||||||
|
this.length = length;
|
||||||
|
this.reads = new AtomicLong(length);
|
||||||
|
this.mark = -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int read() throws IOException {
|
||||||
|
ensureOpen();
|
||||||
|
if (reads.decrementAndGet() < 0) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean markSupported() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public synchronized void mark(int readlimit) {
|
||||||
|
mark = reads.get();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public synchronized void reset() throws IOException {
|
||||||
|
ensureOpen();
|
||||||
|
reads.set(mark);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int available() throws IOException {
|
||||||
|
ensureOpen();
|
||||||
|
return Math.toIntExact(length - reads.get());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void close() throws IOException {
|
||||||
|
closed.set(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void ensureOpen() throws IOException {
|
||||||
|
if (closed.get()) {
|
||||||
|
throw new IOException("Stream closed");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -40,7 +40,7 @@ public class EvilElasticsearchCliTests extends ESElasticsearchCliTestCase {
|
||||||
runTest(
|
runTest(
|
||||||
ExitCodes.OK,
|
ExitCodes.OK,
|
||||||
true,
|
true,
|
||||||
output -> {},
|
(output, error) -> {},
|
||||||
(foreground, pidFile, quiet, esSettings) -> {
|
(foreground, pidFile, quiet, esSettings) -> {
|
||||||
Settings settings = esSettings.settings();
|
Settings settings = esSettings.settings();
|
||||||
assertThat(settings.keySet(), hasSize(2));
|
assertThat(settings.keySet(), hasSize(2));
|
||||||
|
@ -55,7 +55,7 @@ public class EvilElasticsearchCliTests extends ESElasticsearchCliTestCase {
|
||||||
runTest(
|
runTest(
|
||||||
ExitCodes.OK,
|
ExitCodes.OK,
|
||||||
true,
|
true,
|
||||||
output -> {},
|
(output, error) -> {},
|
||||||
(foreground, pidFile, quiet, esSettings) -> {
|
(foreground, pidFile, quiet, esSettings) -> {
|
||||||
Settings settings = esSettings.settings();
|
Settings settings = esSettings.settings();
|
||||||
assertThat(settings.keySet(), hasSize(2));
|
assertThat(settings.keySet(), hasSize(2));
|
||||||
|
|
|
@ -55,7 +55,7 @@ public class EvilCommandTests extends ESTestCase {
|
||||||
command.getShutdownHookThread().run();
|
command.getShutdownHookThread().run();
|
||||||
command.getShutdownHookThread().join();
|
command.getShutdownHookThread().join();
|
||||||
assertTrue(closed.get());
|
assertTrue(closed.get());
|
||||||
final String output = terminal.getOutput();
|
final String output = terminal.getErrorOutput();
|
||||||
if (shouldThrow) {
|
if (shouldThrow) {
|
||||||
// ensure that we dump the exception
|
// ensure that we dump the exception
|
||||||
assertThat(output, containsString("java.io.IOException: fail"));
|
assertThat(output, containsString("java.io.IOException: fail"));
|
||||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.packaging.test;
|
||||||
|
|
||||||
import org.apache.http.client.fluent.Request;
|
import org.apache.http.client.fluent.Request;
|
||||||
import org.elasticsearch.packaging.util.Archives;
|
import org.elasticsearch.packaging.util.Archives;
|
||||||
import org.elasticsearch.packaging.util.Distribution;
|
|
||||||
import org.elasticsearch.packaging.util.FileUtils;
|
import org.elasticsearch.packaging.util.FileUtils;
|
||||||
import org.elasticsearch.packaging.util.Installation;
|
import org.elasticsearch.packaging.util.Installation;
|
||||||
import org.elasticsearch.packaging.util.Platforms;
|
import org.elasticsearch.packaging.util.Platforms;
|
||||||
|
@ -52,7 +51,6 @@ import static org.hamcrest.CoreMatchers.containsString;
|
||||||
import static org.hamcrest.CoreMatchers.equalTo;
|
import static org.hamcrest.CoreMatchers.equalTo;
|
||||||
import static org.hamcrest.CoreMatchers.is;
|
import static org.hamcrest.CoreMatchers.is;
|
||||||
import static org.hamcrest.CoreMatchers.not;
|
import static org.hamcrest.CoreMatchers.not;
|
||||||
import static org.hamcrest.CoreMatchers.notNullValue;
|
|
||||||
import static org.hamcrest.Matchers.isEmptyString;
|
import static org.hamcrest.Matchers.isEmptyString;
|
||||||
import static org.junit.Assume.assumeThat;
|
import static org.junit.Assume.assumeThat;
|
||||||
import static org.junit.Assume.assumeTrue;
|
import static org.junit.Assume.assumeTrue;
|
||||||
|
@ -60,9 +58,8 @@ import static org.junit.Assume.assumeTrue;
|
||||||
public class ArchiveTests extends PackagingTestCase {
|
public class ArchiveTests extends PackagingTestCase {
|
||||||
|
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
public static void assumptions() {
|
public static void filterDistros() {
|
||||||
assumeTrue("only archive distributions",
|
assumeTrue("only archives", distribution.isArchive());
|
||||||
distribution().packaging == Distribution.Packaging.TAR || distribution().packaging == Distribution.Packaging.ZIP);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public void test10Install() throws Exception {
|
public void test10Install() throws Exception {
|
||||||
|
@ -71,20 +68,14 @@ public class ArchiveTests extends PackagingTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void test20PluginsListWithNoPlugins() throws Exception {
|
public void test20PluginsListWithNoPlugins() throws Exception {
|
||||||
assumeThat(installation, is(notNullValue()));
|
|
||||||
|
|
||||||
final Installation.Executables bin = installation.executables();
|
final Installation.Executables bin = installation.executables();
|
||||||
final Shell sh = newShell();
|
|
||||||
final Result r = sh.run(bin.elasticsearchPlugin + " list");
|
final Result r = sh.run(bin.elasticsearchPlugin + " list");
|
||||||
|
|
||||||
assertThat(r.stdout, isEmptyString());
|
assertThat(r.stdout, isEmptyString());
|
||||||
}
|
}
|
||||||
|
|
||||||
public void test30NoJava() throws Exception {
|
public void test30NoJava() throws Exception {
|
||||||
assumeThat(installation, is(notNullValue()));
|
|
||||||
|
|
||||||
final Installation.Executables bin = installation.executables();
|
final Installation.Executables bin = installation.executables();
|
||||||
final Shell sh = newShell();
|
|
||||||
sh.getEnv().remove("JAVA_HOME");
|
sh.getEnv().remove("JAVA_HOME");
|
||||||
|
|
||||||
final Path relocatedJdk = installation.bundledJdk.getParent().resolve("jdk.relocated");
|
final Path relocatedJdk = installation.bundledJdk.getParent().resolve("jdk.relocated");
|
||||||
|
@ -105,10 +96,7 @@ public class ArchiveTests extends PackagingTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void test40CreateKeystoreManually() throws Exception {
|
public void test40CreateKeystoreManually() throws Exception {
|
||||||
assumeThat(installation, is(notNullValue()));
|
|
||||||
|
|
||||||
final Installation.Executables bin = installation.executables();
|
final Installation.Executables bin = installation.executables();
|
||||||
final Shell sh = newShell();
|
|
||||||
|
|
||||||
Platforms.onLinux(() -> sh.run("sudo -u " + ARCHIVE_OWNER + " " + bin.elasticsearchKeystore + " create"));
|
Platforms.onLinux(() -> sh.run("sudo -u " + ARCHIVE_OWNER + " " + bin.elasticsearchKeystore + " create"));
|
||||||
|
|
||||||
|
@ -138,12 +126,10 @@ public class ArchiveTests extends PackagingTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void test50StartAndStop() throws Exception {
|
public void test50StartAndStop() throws Exception {
|
||||||
assumeThat(installation, is(notNullValue()));
|
|
||||||
|
|
||||||
// cleanup from previous test
|
// cleanup from previous test
|
||||||
rm(installation.config("elasticsearch.keystore"));
|
rm(installation.config("elasticsearch.keystore"));
|
||||||
|
|
||||||
Archives.runElasticsearch(installation, newShell());
|
Archives.runElasticsearch(installation, sh);
|
||||||
|
|
||||||
final String gcLogName = Platforms.LINUX && distribution().hasJdk == false
|
final String gcLogName = Platforms.LINUX && distribution().hasJdk == false
|
||||||
? "gc.log.0.current"
|
? "gc.log.0.current"
|
||||||
|
@ -156,8 +142,6 @@ public class ArchiveTests extends PackagingTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void assertRunsWithJavaHome() throws Exception {
|
public void assertRunsWithJavaHome() throws Exception {
|
||||||
Shell sh = newShell();
|
|
||||||
|
|
||||||
Platforms.onLinux(() -> {
|
Platforms.onLinux(() -> {
|
||||||
String systemJavaHome = sh.run("echo $SYSTEM_JAVA_HOME").stdout.trim();
|
String systemJavaHome = sh.run("echo $SYSTEM_JAVA_HOME").stdout.trim();
|
||||||
sh.getEnv().put("JAVA_HOME", systemJavaHome);
|
sh.getEnv().put("JAVA_HOME", systemJavaHome);
|
||||||
|
@ -177,13 +161,10 @@ public class ArchiveTests extends PackagingTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void test51JavaHomeOverride() throws Exception {
|
public void test51JavaHomeOverride() throws Exception {
|
||||||
assumeThat(installation, is(notNullValue()));
|
|
||||||
|
|
||||||
assertRunsWithJavaHome();
|
assertRunsWithJavaHome();
|
||||||
}
|
}
|
||||||
|
|
||||||
public void test52BundledJdkRemoved() throws Exception {
|
public void test52BundledJdkRemoved() throws Exception {
|
||||||
assumeThat(installation, is(notNullValue()));
|
|
||||||
assumeThat(distribution().hasJdk, is(true));
|
assumeThat(distribution().hasJdk, is(true));
|
||||||
|
|
||||||
Path relocatedJdk = installation.bundledJdk.getParent().resolve("jdk.relocated");
|
Path relocatedJdk = installation.bundledJdk.getParent().resolve("jdk.relocated");
|
||||||
|
@ -196,8 +177,6 @@ public class ArchiveTests extends PackagingTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void test53JavaHomeWithSpecialCharacters() throws Exception {
|
public void test53JavaHomeWithSpecialCharacters() throws Exception {
|
||||||
assumeThat(installation, is(notNullValue()));
|
|
||||||
|
|
||||||
Platforms.onWindows(() -> {
|
Platforms.onWindows(() -> {
|
||||||
final Shell sh = new Shell();
|
final Shell sh = new Shell();
|
||||||
try {
|
try {
|
||||||
|
@ -251,13 +230,9 @@ public class ArchiveTests extends PackagingTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void test60AutoCreateKeystore() throws Exception {
|
public void test60AutoCreateKeystore() throws Exception {
|
||||||
assumeThat(installation, is(notNullValue()));
|
|
||||||
|
|
||||||
assertThat(installation.config("elasticsearch.keystore"), file(File, ARCHIVE_OWNER, ARCHIVE_OWNER, p660));
|
assertThat(installation.config("elasticsearch.keystore"), file(File, ARCHIVE_OWNER, ARCHIVE_OWNER, p660));
|
||||||
|
|
||||||
final Installation.Executables bin = installation.executables();
|
final Installation.Executables bin = installation.executables();
|
||||||
final Shell sh = newShell();
|
|
||||||
|
|
||||||
Platforms.onLinux(() -> {
|
Platforms.onLinux(() -> {
|
||||||
final Result result = sh.run("sudo -u " + ARCHIVE_OWNER + " " + bin.elasticsearchKeystore + " list");
|
final Result result = sh.run("sudo -u " + ARCHIVE_OWNER + " " + bin.elasticsearchKeystore + " list");
|
||||||
assertThat(result.stdout, containsString("keystore.seed"));
|
assertThat(result.stdout, containsString("keystore.seed"));
|
||||||
|
@ -270,7 +245,6 @@ public class ArchiveTests extends PackagingTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void test70CustomPathConfAndJvmOptions() throws Exception {
|
public void test70CustomPathConfAndJvmOptions() throws Exception {
|
||||||
assumeThat(installation, is(notNullValue()));
|
|
||||||
|
|
||||||
final Path tempConf = getTempDir().resolve("esconf-alternate");
|
final Path tempConf = getTempDir().resolve("esconf-alternate");
|
||||||
|
|
||||||
|
@ -288,7 +262,6 @@ public class ArchiveTests extends PackagingTestCase {
|
||||||
"-Dlog4j2.disable.jmx=true\n";
|
"-Dlog4j2.disable.jmx=true\n";
|
||||||
append(tempConf.resolve("jvm.options"), jvmOptions);
|
append(tempConf.resolve("jvm.options"), jvmOptions);
|
||||||
|
|
||||||
final Shell sh = newShell();
|
|
||||||
Platforms.onLinux(() -> sh.run("chown -R elasticsearch:elasticsearch " + tempConf));
|
Platforms.onLinux(() -> sh.run("chown -R elasticsearch:elasticsearch " + tempConf));
|
||||||
Platforms.onWindows(() -> sh.run(
|
Platforms.onWindows(() -> sh.run(
|
||||||
"$account = New-Object System.Security.Principal.NTAccount 'vagrant'; " +
|
"$account = New-Object System.Security.Principal.NTAccount 'vagrant'; " +
|
||||||
|
@ -301,11 +274,10 @@ public class ArchiveTests extends PackagingTestCase {
|
||||||
"}"
|
"}"
|
||||||
));
|
));
|
||||||
|
|
||||||
final Shell serverShell = newShell();
|
sh.getEnv().put("ES_PATH_CONF", tempConf.toString());
|
||||||
serverShell.getEnv().put("ES_PATH_CONF", tempConf.toString());
|
sh.getEnv().put("ES_JAVA_OPTS", "-XX:-UseCompressedOops");
|
||||||
serverShell.getEnv().put("ES_JAVA_OPTS", "-XX:-UseCompressedOops");
|
|
||||||
|
|
||||||
Archives.runElasticsearch(installation, serverShell);
|
Archives.runElasticsearch(installation, sh);
|
||||||
|
|
||||||
final String nodesResponse = makeRequest(Request.Get("http://localhost:9200/_nodes"));
|
final String nodesResponse = makeRequest(Request.Get("http://localhost:9200/_nodes"));
|
||||||
assertThat(nodesResponse, containsString("\"heap_init_in_bytes\":536870912"));
|
assertThat(nodesResponse, containsString("\"heap_init_in_bytes\":536870912"));
|
||||||
|
@ -319,7 +291,6 @@ public class ArchiveTests extends PackagingTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void test80RelativePathConf() throws Exception {
|
public void test80RelativePathConf() throws Exception {
|
||||||
assumeThat(installation, is(notNullValue()));
|
|
||||||
|
|
||||||
final Path temp = getTempDir().resolve("esconf-alternate");
|
final Path temp = getTempDir().resolve("esconf-alternate");
|
||||||
final Path tempConf = temp.resolve("config");
|
final Path tempConf = temp.resolve("config");
|
||||||
|
@ -334,7 +305,6 @@ public class ArchiveTests extends PackagingTestCase {
|
||||||
|
|
||||||
append(tempConf.resolve("elasticsearch.yml"), "node.name: relative");
|
append(tempConf.resolve("elasticsearch.yml"), "node.name: relative");
|
||||||
|
|
||||||
final Shell sh = newShell();
|
|
||||||
Platforms.onLinux(() -> sh.run("chown -R elasticsearch:elasticsearch " + temp));
|
Platforms.onLinux(() -> sh.run("chown -R elasticsearch:elasticsearch " + temp));
|
||||||
Platforms.onWindows(() -> sh.run(
|
Platforms.onWindows(() -> sh.run(
|
||||||
"$account = New-Object System.Security.Principal.NTAccount 'vagrant'; " +
|
"$account = New-Object System.Security.Principal.NTAccount 'vagrant'; " +
|
||||||
|
@ -347,10 +317,9 @@ public class ArchiveTests extends PackagingTestCase {
|
||||||
"}"
|
"}"
|
||||||
));
|
));
|
||||||
|
|
||||||
final Shell serverShell = newShell();
|
sh.setWorkingDirectory(temp);
|
||||||
serverShell.setWorkingDirectory(temp);
|
sh.getEnv().put("ES_PATH_CONF", "config");
|
||||||
serverShell.getEnv().put("ES_PATH_CONF", "config");
|
Archives.runElasticsearch(installation, sh);
|
||||||
Archives.runElasticsearch(installation, serverShell);
|
|
||||||
|
|
||||||
final String nodesResponse = makeRequest(Request.Get("http://localhost:9200/_nodes"));
|
final String nodesResponse = makeRequest(Request.Get("http://localhost:9200/_nodes"));
|
||||||
assertThat(nodesResponse, containsString("\"name\":\"relative\""));
|
assertThat(nodesResponse, containsString("\"name\":\"relative\""));
|
||||||
|
@ -363,10 +332,7 @@ public class ArchiveTests extends PackagingTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void test90SecurityCliPackaging() throws Exception {
|
public void test90SecurityCliPackaging() throws Exception {
|
||||||
assumeThat(installation, is(notNullValue()));
|
|
||||||
|
|
||||||
final Installation.Executables bin = installation.executables();
|
final Installation.Executables bin = installation.executables();
|
||||||
final Shell sh = newShell();
|
|
||||||
|
|
||||||
if (distribution().isDefault()) {
|
if (distribution().isDefault()) {
|
||||||
assertTrue(Files.exists(installation.lib.resolve("tools").resolve("security-cli")));
|
assertTrue(Files.exists(installation.lib.resolve("tools").resolve("security-cli")));
|
||||||
|
@ -377,7 +343,7 @@ public class ArchiveTests extends PackagingTestCase {
|
||||||
// Ensure that the exit code from the java command is passed back up through the shell script
|
// Ensure that the exit code from the java command is passed back up through the shell script
|
||||||
result = sh.runIgnoreExitCode(bin.elasticsearchCertutil + " invalid-command");
|
result = sh.runIgnoreExitCode(bin.elasticsearchCertutil + " invalid-command");
|
||||||
assertThat(result.exitCode, is(not(0)));
|
assertThat(result.exitCode, is(not(0)));
|
||||||
assertThat(result.stdout, containsString("Unknown command [invalid-command]"));
|
assertThat(result.stderr, containsString("Unknown command [invalid-command]"));
|
||||||
};
|
};
|
||||||
Platforms.onLinux(action);
|
Platforms.onLinux(action);
|
||||||
Platforms.onWindows(action);
|
Platforms.onWindows(action);
|
||||||
|
@ -387,10 +353,7 @@ public class ArchiveTests extends PackagingTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void test91ElasticsearchShardCliPackaging() throws Exception {
|
public void test91ElasticsearchShardCliPackaging() throws Exception {
|
||||||
assumeThat(installation, is(notNullValue()));
|
|
||||||
|
|
||||||
final Installation.Executables bin = installation.executables();
|
final Installation.Executables bin = installation.executables();
|
||||||
final Shell sh = newShell();
|
|
||||||
|
|
||||||
Platforms.PlatformAction action = () -> {
|
Platforms.PlatformAction action = () -> {
|
||||||
final Result result = sh.run(bin.elasticsearchShard + " -h");
|
final Result result = sh.run(bin.elasticsearchShard + " -h");
|
||||||
|
@ -405,10 +368,7 @@ public class ArchiveTests extends PackagingTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void test92ElasticsearchNodeCliPackaging() throws Exception {
|
public void test92ElasticsearchNodeCliPackaging() throws Exception {
|
||||||
assumeThat(installation, is(notNullValue()));
|
|
||||||
|
|
||||||
final Installation.Executables bin = installation.executables();
|
final Installation.Executables bin = installation.executables();
|
||||||
final Shell sh = newShell();
|
|
||||||
|
|
||||||
Platforms.PlatformAction action = () -> {
|
Platforms.PlatformAction action = () -> {
|
||||||
final Result result = sh.run(bin.elasticsearchNode + " -h");
|
final Result result = sh.run(bin.elasticsearchNode + " -h");
|
||||||
|
@ -424,12 +384,9 @@ public class ArchiveTests extends PackagingTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void test93ElasticsearchNodeCustomDataPathAndNotEsHomeWorkDir() throws Exception {
|
public void test93ElasticsearchNodeCustomDataPathAndNotEsHomeWorkDir() throws Exception {
|
||||||
assumeThat(installation, is(notNullValue()));
|
|
||||||
|
|
||||||
Path relativeDataPath = installation.data.relativize(installation.home);
|
Path relativeDataPath = installation.data.relativize(installation.home);
|
||||||
append(installation.config("elasticsearch.yml"), "path.data: " + relativeDataPath);
|
append(installation.config("elasticsearch.yml"), "path.data: " + relativeDataPath);
|
||||||
|
|
||||||
final Shell sh = newShell();
|
|
||||||
sh.setWorkingDirectory(getTempDir());
|
sh.setWorkingDirectory(getTempDir());
|
||||||
|
|
||||||
Archives.runElasticsearch(installation, sh);
|
Archives.runElasticsearch(installation, sh);
|
||||||
|
@ -440,10 +397,7 @@ public class ArchiveTests extends PackagingTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void test94ElasticsearchNodeExecuteCliNotEsHomeWorkDir() throws Exception {
|
public void test94ElasticsearchNodeExecuteCliNotEsHomeWorkDir() throws Exception {
|
||||||
assumeThat(installation, is(notNullValue()));
|
|
||||||
|
|
||||||
final Installation.Executables bin = installation.executables();
|
final Installation.Executables bin = installation.executables();
|
||||||
final Shell sh = newShell();
|
|
||||||
// Run the cli tools from the tmp dir
|
// Run the cli tools from the tmp dir
|
||||||
sh.setWorkingDirectory(getTempDir());
|
sh.setWorkingDirectory(getTempDir());
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,59 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.packaging.test;
|
||||||
|
|
||||||
|
import junit.framework.TestCase;
|
||||||
|
import org.elasticsearch.packaging.util.Distribution;
|
||||||
|
import org.elasticsearch.packaging.util.FileUtils;
|
||||||
|
import org.elasticsearch.packaging.util.Shell;
|
||||||
|
import org.junit.Before;
|
||||||
|
|
||||||
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
|
import static org.elasticsearch.packaging.util.FileUtils.getDistributionFile;
|
||||||
|
import static org.junit.Assume.assumeTrue;
|
||||||
|
|
||||||
|
public class DebMetadataTests extends PackagingTestCase {
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void filterDistros() {
|
||||||
|
assumeTrue("only deb", distribution.packaging == Distribution.Packaging.DEB);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void test05CheckLintian() {
|
||||||
|
sh.run("lintian --fail-on-warnings " + FileUtils.getDistributionFile(distribution()));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void test06Dependencies() {
|
||||||
|
|
||||||
|
final Shell sh = new Shell();
|
||||||
|
|
||||||
|
final Shell.Result result = sh.run("dpkg -I " + getDistributionFile(distribution()));
|
||||||
|
|
||||||
|
TestCase.assertTrue(Pattern.compile("(?m)^ Depends:.*bash.*").matcher(result.stdout).find());
|
||||||
|
|
||||||
|
String oppositePackageName = "elasticsearch";
|
||||||
|
if (distribution().isDefault()) {
|
||||||
|
oppositePackageName += "-oss";
|
||||||
|
}
|
||||||
|
|
||||||
|
TestCase.assertTrue(Pattern.compile("(?m)^ Conflicts: " + oppositePackageName + "$").matcher(result.stdout).find());
|
||||||
|
}
|
||||||
|
}
|
|
@ -19,10 +19,8 @@
|
||||||
|
|
||||||
package org.elasticsearch.packaging.test;
|
package org.elasticsearch.packaging.test;
|
||||||
|
|
||||||
import com.carrotsearch.randomizedtesting.annotations.TestCaseOrdering;
|
|
||||||
import org.elasticsearch.packaging.util.Distribution;
|
import org.elasticsearch.packaging.util.Distribution;
|
||||||
import org.elasticsearch.packaging.util.Shell;
|
import org.junit.BeforeClass;
|
||||||
import org.junit.Before;
|
|
||||||
|
|
||||||
import java.nio.file.Files;
|
import java.nio.file.Files;
|
||||||
import java.nio.file.Paths;
|
import java.nio.file.Paths;
|
||||||
|
@ -32,37 +30,29 @@ import static org.elasticsearch.packaging.util.FileUtils.assertPathsExist;
|
||||||
import static org.elasticsearch.packaging.util.Packages.SYSVINIT_SCRIPT;
|
import static org.elasticsearch.packaging.util.Packages.SYSVINIT_SCRIPT;
|
||||||
import static org.elasticsearch.packaging.util.Packages.assertInstalled;
|
import static org.elasticsearch.packaging.util.Packages.assertInstalled;
|
||||||
import static org.elasticsearch.packaging.util.Packages.assertRemoved;
|
import static org.elasticsearch.packaging.util.Packages.assertRemoved;
|
||||||
import static org.elasticsearch.packaging.util.Packages.install;
|
import static org.elasticsearch.packaging.util.Packages.installPackage;
|
||||||
import static org.elasticsearch.packaging.util.Packages.packageStatus;
|
import static org.elasticsearch.packaging.util.Packages.packageStatus;
|
||||||
import static org.elasticsearch.packaging.util.Packages.remove;
|
import static org.elasticsearch.packaging.util.Packages.remove;
|
||||||
import static org.elasticsearch.packaging.util.Packages.verifyPackageInstallation;
|
import static org.elasticsearch.packaging.util.Packages.verifyPackageInstallation;
|
||||||
import static org.elasticsearch.packaging.util.Platforms.isDPKG;
|
|
||||||
import static org.hamcrest.CoreMatchers.notNullValue;
|
|
||||||
import static org.hamcrest.core.Is.is;
|
import static org.hamcrest.core.Is.is;
|
||||||
import static org.junit.Assume.assumeThat;
|
|
||||||
import static org.junit.Assume.assumeTrue;
|
import static org.junit.Assume.assumeTrue;
|
||||||
|
|
||||||
@TestCaseOrdering(TestCaseOrdering.AlphabeticOrder.class)
|
|
||||||
public class DebPreservationTests extends PackagingTestCase {
|
public class DebPreservationTests extends PackagingTestCase {
|
||||||
|
|
||||||
@Before
|
@BeforeClass
|
||||||
public void onlyCompatibleDistributions() {
|
public static void filterDistros() {
|
||||||
assumeTrue("only dpkg platforms", isDPKG());
|
assumeTrue("only deb", distribution.packaging == Distribution.Packaging.DEB);
|
||||||
assumeTrue("deb distributions", distribution().packaging == Distribution.Packaging.DEB);
|
assumeTrue("only bundled jdk", distribution.hasJdk);
|
||||||
assumeTrue("only bundled jdk", distribution().hasJdk);
|
|
||||||
assumeTrue("only compatible distributions", distribution().packaging.compatible);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public void test10Install() throws Exception {
|
public void test10Install() throws Exception {
|
||||||
assertRemoved(distribution());
|
assertRemoved(distribution());
|
||||||
installation = install(distribution());
|
installation = installPackage(distribution());
|
||||||
assertInstalled(distribution());
|
assertInstalled(distribution());
|
||||||
verifyPackageInstallation(installation, distribution(), newShell());
|
verifyPackageInstallation(installation, distribution(), newShell());
|
||||||
}
|
}
|
||||||
|
|
||||||
public void test20Remove() throws Exception {
|
public void test20Remove() throws Exception {
|
||||||
assumeThat(installation, is(notNullValue()));
|
|
||||||
|
|
||||||
remove(distribution());
|
remove(distribution());
|
||||||
|
|
||||||
// some config files were not removed
|
// some config files were not removed
|
||||||
|
@ -106,9 +96,6 @@ public class DebPreservationTests extends PackagingTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void test30Purge() throws Exception {
|
public void test30Purge() throws Exception {
|
||||||
assumeThat(installation, is(notNullValue()));
|
|
||||||
|
|
||||||
final Shell sh = new Shell();
|
|
||||||
sh.run("dpkg --purge " + distribution().flavor.name);
|
sh.run("dpkg --purge " + distribution().flavor.name);
|
||||||
|
|
||||||
assertRemoved(distribution());
|
assertRemoved(distribution());
|
||||||
|
|
|
@ -19,15 +19,12 @@
|
||||||
|
|
||||||
package org.elasticsearch.packaging.test;
|
package org.elasticsearch.packaging.test;
|
||||||
|
|
||||||
import com.carrotsearch.randomizedtesting.annotations.TestCaseOrdering;
|
|
||||||
import com.carrotsearch.randomizedtesting.generators.RandomStrings;
|
import com.carrotsearch.randomizedtesting.generators.RandomStrings;
|
||||||
import org.apache.http.client.fluent.Request;
|
import org.apache.http.client.fluent.Request;
|
||||||
import org.elasticsearch.packaging.util.Distribution;
|
|
||||||
import org.elasticsearch.packaging.util.FileUtils;
|
import org.elasticsearch.packaging.util.FileUtils;
|
||||||
import org.elasticsearch.packaging.util.Shell;
|
|
||||||
import org.elasticsearch.packaging.util.Shell.Result;
|
import org.elasticsearch.packaging.util.Shell.Result;
|
||||||
import org.hamcrest.CoreMatchers;
|
import org.hamcrest.CoreMatchers;
|
||||||
import org.junit.Before;
|
import org.junit.BeforeClass;
|
||||||
|
|
||||||
import java.nio.charset.StandardCharsets;
|
import java.nio.charset.StandardCharsets;
|
||||||
import java.nio.file.Files;
|
import java.nio.file.Files;
|
||||||
|
@ -50,53 +47,39 @@ import static org.elasticsearch.packaging.util.FileUtils.slurp;
|
||||||
import static org.elasticsearch.packaging.util.Packages.SYSTEMD_SERVICE;
|
import static org.elasticsearch.packaging.util.Packages.SYSTEMD_SERVICE;
|
||||||
import static org.elasticsearch.packaging.util.Packages.assertInstalled;
|
import static org.elasticsearch.packaging.util.Packages.assertInstalled;
|
||||||
import static org.elasticsearch.packaging.util.Packages.assertRemoved;
|
import static org.elasticsearch.packaging.util.Packages.assertRemoved;
|
||||||
import static org.elasticsearch.packaging.util.Packages.install;
|
import static org.elasticsearch.packaging.util.Packages.installPackage;
|
||||||
import static org.elasticsearch.packaging.util.Packages.remove;
|
import static org.elasticsearch.packaging.util.Packages.remove;
|
||||||
import static org.elasticsearch.packaging.util.Packages.restartElasticsearch;
|
import static org.elasticsearch.packaging.util.Packages.restartElasticsearch;
|
||||||
import static org.elasticsearch.packaging.util.Packages.startElasticsearch;
|
import static org.elasticsearch.packaging.util.Packages.startElasticsearch;
|
||||||
import static org.elasticsearch.packaging.util.Packages.stopElasticsearch;
|
import static org.elasticsearch.packaging.util.Packages.stopElasticsearch;
|
||||||
import static org.elasticsearch.packaging.util.Packages.verifyPackageInstallation;
|
import static org.elasticsearch.packaging.util.Packages.verifyPackageInstallation;
|
||||||
import static org.elasticsearch.packaging.util.Platforms.getOsRelease;
|
import static org.elasticsearch.packaging.util.Platforms.getOsRelease;
|
||||||
import static org.elasticsearch.packaging.util.Platforms.isDPKG;
|
|
||||||
import static org.elasticsearch.packaging.util.Platforms.isSystemd;
|
import static org.elasticsearch.packaging.util.Platforms.isSystemd;
|
||||||
import static org.elasticsearch.packaging.util.ServerUtils.makeRequest;
|
import static org.elasticsearch.packaging.util.ServerUtils.makeRequest;
|
||||||
import static org.elasticsearch.packaging.util.ServerUtils.runElasticsearchTests;
|
import static org.elasticsearch.packaging.util.ServerUtils.runElasticsearchTests;
|
||||||
import static org.hamcrest.CoreMatchers.equalTo;
|
import static org.hamcrest.CoreMatchers.equalTo;
|
||||||
import static org.hamcrest.CoreMatchers.not;
|
import static org.hamcrest.CoreMatchers.not;
|
||||||
import static org.hamcrest.CoreMatchers.notNullValue;
|
|
||||||
import static org.hamcrest.Matchers.containsString;
|
import static org.hamcrest.Matchers.containsString;
|
||||||
import static org.hamcrest.Matchers.isEmptyString;
|
import static org.hamcrest.Matchers.isEmptyString;
|
||||||
import static org.hamcrest.core.Is.is;
|
import static org.hamcrest.core.Is.is;
|
||||||
import static org.junit.Assume.assumeThat;
|
import static org.junit.Assume.assumeThat;
|
||||||
import static org.junit.Assume.assumeTrue;
|
import static org.junit.Assume.assumeTrue;
|
||||||
|
|
||||||
@TestCaseOrdering(TestCaseOrdering.AlphabeticOrder.class)
|
|
||||||
public class PackageTests extends PackagingTestCase {
|
public class PackageTests extends PackagingTestCase {
|
||||||
private Shell sh;
|
|
||||||
|
|
||||||
@Before
|
@BeforeClass
|
||||||
public void onlyCompatibleDistributions() throws Exception {
|
public static void filterDistros() {
|
||||||
assumeTrue("only compatible distributions", distribution().packaging.compatible);
|
assumeTrue("rpm or deb", distribution.isPackage());
|
||||||
assumeTrue("rpm or deb",
|
|
||||||
distribution().packaging == Distribution.Packaging.DEB || distribution().packaging == Distribution.Packaging.RPM);
|
|
||||||
sh = newShell();
|
|
||||||
}
|
|
||||||
|
|
||||||
public void test05CheckLintian() throws Exception {
|
|
||||||
assumeTrue(isDPKG());
|
|
||||||
sh.run("lintian --fail-on-warnings " + FileUtils.getDistributionFile(distribution()));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public void test10InstallPackage() throws Exception {
|
public void test10InstallPackage() throws Exception {
|
||||||
assertRemoved(distribution());
|
assertRemoved(distribution());
|
||||||
installation = install(distribution());
|
installation = installPackage(distribution());
|
||||||
assertInstalled(distribution());
|
assertInstalled(distribution());
|
||||||
verifyPackageInstallation(installation, distribution(), sh);
|
verifyPackageInstallation(installation, distribution(), sh);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void test20PluginsCommandWhenNoPlugins() throws Exception {
|
public void test20PluginsCommandWhenNoPlugins() throws Exception {
|
||||||
assumeThat(installation, is(notNullValue()));
|
|
||||||
|
|
||||||
assertThat(sh.run(installation.bin("elasticsearch-plugin") + " list").stdout, isEmptyString());
|
assertThat(sh.run(installation.bin("elasticsearch-plugin") + " list").stdout, isEmptyString());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -109,13 +92,10 @@ public class PackageTests extends PackagingTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void test31InstallDoesNotStartServer() {
|
public void test31InstallDoesNotStartServer() {
|
||||||
assumeThat(installation, is(notNullValue()));
|
|
||||||
|
|
||||||
assertThat(sh.run("ps aux").stdout, not(containsString("org.elasticsearch.bootstrap.Elasticsearch")));
|
assertThat(sh.run("ps aux").stdout, not(containsString("org.elasticsearch.bootstrap.Elasticsearch")));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void assertRunsWithJavaHome() throws Exception {
|
public void assertRunsWithJavaHome() throws Exception {
|
||||||
String systemJavaHome = sh.run("echo $SYSTEM_JAVA_HOME").stdout.trim();
|
|
||||||
byte[] originalEnvFile = Files.readAllBytes(installation.envFile);
|
byte[] originalEnvFile = Files.readAllBytes(installation.envFile);
|
||||||
try {
|
try {
|
||||||
Files.write(installation.envFile, ("JAVA_HOME=" + systemJavaHome + "\n").getBytes(StandardCharsets.UTF_8),
|
Files.write(installation.envFile, ("JAVA_HOME=" + systemJavaHome + "\n").getBytes(StandardCharsets.UTF_8),
|
||||||
|
@ -132,7 +112,6 @@ public class PackageTests extends PackagingTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void test32JavaHomeOverride() throws Exception {
|
public void test32JavaHomeOverride() throws Exception {
|
||||||
assumeThat(installation, is(notNullValue()));
|
|
||||||
// we always run with java home when no bundled jdk is included, so this test would be repetitive
|
// we always run with java home when no bundled jdk is included, so this test would be repetitive
|
||||||
assumeThat(distribution().hasJdk, is(true));
|
assumeThat(distribution().hasJdk, is(true));
|
||||||
|
|
||||||
|
@ -159,7 +138,6 @@ public class PackageTests extends PackagingTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void test42BundledJdkRemoved() throws Exception {
|
public void test42BundledJdkRemoved() throws Exception {
|
||||||
assumeThat(installation, is(notNullValue()));
|
|
||||||
assumeThat(distribution().hasJdk, is(true));
|
assumeThat(distribution().hasJdk, is(true));
|
||||||
|
|
||||||
Path relocatedJdk = installation.bundledJdk.getParent().resolve("jdk.relocated");
|
Path relocatedJdk = installation.bundledJdk.getParent().resolve("jdk.relocated");
|
||||||
|
@ -173,8 +151,6 @@ public class PackageTests extends PackagingTestCase {
|
||||||
|
|
||||||
public void test40StartServer() throws Exception {
|
public void test40StartServer() throws Exception {
|
||||||
String start = sh.runIgnoreExitCode("date ").stdout.trim();
|
String start = sh.runIgnoreExitCode("date ").stdout.trim();
|
||||||
assumeThat(installation, is(notNullValue()));
|
|
||||||
|
|
||||||
startElasticsearch(sh);
|
startElasticsearch(sh);
|
||||||
|
|
||||||
String journalEntries = sh.runIgnoreExitCode("journalctl _SYSTEMD_UNIT=elasticsearch.service " +
|
String journalEntries = sh.runIgnoreExitCode("journalctl _SYSTEMD_UNIT=elasticsearch.service " +
|
||||||
|
@ -190,8 +166,6 @@ public class PackageTests extends PackagingTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void test50Remove() throws Exception {
|
public void test50Remove() throws Exception {
|
||||||
assumeThat(installation, is(notNullValue()));
|
|
||||||
|
|
||||||
// add fake bin directory as if a plugin was installed
|
// add fake bin directory as if a plugin was installed
|
||||||
Files.createDirectories(installation.bin.resolve("myplugin"));
|
Files.createDirectories(installation.bin.resolve("myplugin"));
|
||||||
|
|
||||||
|
@ -243,9 +217,7 @@ public class PackageTests extends PackagingTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void test60Reinstall() throws Exception {
|
public void test60Reinstall() throws Exception {
|
||||||
assumeThat(installation, is(notNullValue()));
|
installation = installPackage(distribution());
|
||||||
|
|
||||||
installation = install(distribution());
|
|
||||||
assertInstalled(distribution());
|
assertInstalled(distribution());
|
||||||
verifyPackageInstallation(installation, distribution(), sh);
|
verifyPackageInstallation(installation, distribution(), sh);
|
||||||
|
|
||||||
|
@ -255,7 +227,7 @@ public class PackageTests extends PackagingTestCase {
|
||||||
|
|
||||||
public void test70RestartServer() throws Exception {
|
public void test70RestartServer() throws Exception {
|
||||||
try {
|
try {
|
||||||
installation = install(distribution());
|
installation = installPackage(distribution());
|
||||||
assertInstalled(distribution());
|
assertInstalled(distribution());
|
||||||
|
|
||||||
startElasticsearch(sh);
|
startElasticsearch(sh);
|
||||||
|
@ -270,7 +242,7 @@ public class PackageTests extends PackagingTestCase {
|
||||||
|
|
||||||
public void test72TestRuntimeDirectory() throws Exception {
|
public void test72TestRuntimeDirectory() throws Exception {
|
||||||
try {
|
try {
|
||||||
installation = install(distribution());
|
installation = installPackage(distribution());
|
||||||
FileUtils.rm(installation.pidDir);
|
FileUtils.rm(installation.pidDir);
|
||||||
startElasticsearch(sh);
|
startElasticsearch(sh);
|
||||||
assertPathsExist(installation.pidDir);
|
assertPathsExist(installation.pidDir);
|
||||||
|
@ -281,7 +253,7 @@ public class PackageTests extends PackagingTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void test73gcLogsExist() throws Exception {
|
public void test73gcLogsExist() throws Exception {
|
||||||
installation = install(distribution());
|
installation = installPackage(distribution());
|
||||||
startElasticsearch(sh);
|
startElasticsearch(sh);
|
||||||
// it can be gc.log or gc.log.0.current
|
// it can be gc.log or gc.log.0.current
|
||||||
assertThat(installation.logs, fileWithGlobExist("gc.log*"));
|
assertThat(installation.logs, fileWithGlobExist("gc.log*"));
|
||||||
|
@ -316,7 +288,6 @@ public class PackageTests extends PackagingTestCase {
|
||||||
public void test81CustomPathConfAndJvmOptions() throws Exception {
|
public void test81CustomPathConfAndJvmOptions() throws Exception {
|
||||||
assumeTrue(isSystemd());
|
assumeTrue(isSystemd());
|
||||||
|
|
||||||
assumeThat(installation, is(notNullValue()));
|
|
||||||
assertPathsExist(installation.envFile);
|
assertPathsExist(installation.envFile);
|
||||||
|
|
||||||
stopElasticsearch(sh);
|
stopElasticsearch(sh);
|
||||||
|
@ -344,18 +315,17 @@ public class PackageTests extends PackagingTestCase {
|
||||||
|
|
||||||
sh.runIgnoreExitCode("chown -R elasticsearch:elasticsearch " + tempConf);
|
sh.runIgnoreExitCode("chown -R elasticsearch:elasticsearch " + tempConf);
|
||||||
|
|
||||||
final Shell serverShell = newShell();
|
|
||||||
cp(installation.envFile, tempConf.resolve("elasticsearch.bk"));//backup
|
cp(installation.envFile, tempConf.resolve("elasticsearch.bk"));//backup
|
||||||
append(installation.envFile, "ES_PATH_CONF=" + tempConf + "\n");
|
append(installation.envFile, "ES_PATH_CONF=" + tempConf + "\n");
|
||||||
append(installation.envFile, "ES_JAVA_OPTS=-XX:-UseCompressedOops");
|
append(installation.envFile, "ES_JAVA_OPTS=-XX:-UseCompressedOops");
|
||||||
|
|
||||||
startElasticsearch(serverShell);
|
startElasticsearch(sh);
|
||||||
|
|
||||||
final String nodesResponse = makeRequest(Request.Get("http://localhost:9200/_nodes"));
|
final String nodesResponse = makeRequest(Request.Get("http://localhost:9200/_nodes"));
|
||||||
assertThat(nodesResponse, CoreMatchers.containsString("\"heap_init_in_bytes\":536870912"));
|
assertThat(nodesResponse, CoreMatchers.containsString("\"heap_init_in_bytes\":536870912"));
|
||||||
assertThat(nodesResponse, CoreMatchers.containsString("\"using_compressed_ordinary_object_pointers\":\"false\""));
|
assertThat(nodesResponse, CoreMatchers.containsString("\"using_compressed_ordinary_object_pointers\":\"false\""));
|
||||||
|
|
||||||
stopElasticsearch(serverShell);
|
stopElasticsearch(sh);
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
rm(installation.envFile);
|
rm(installation.envFile);
|
||||||
|
@ -371,7 +341,7 @@ public class PackageTests extends PackagingTestCase {
|
||||||
|
|
||||||
sh.run("systemctl mask systemd-sysctl.service");
|
sh.run("systemctl mask systemd-sysctl.service");
|
||||||
|
|
||||||
installation = install(distribution());
|
installation = installPackage(distribution());
|
||||||
|
|
||||||
sh.run("systemctl unmask systemd-sysctl.service");
|
sh.run("systemctl unmask systemd-sysctl.service");
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -383,7 +353,7 @@ public class PackageTests extends PackagingTestCase {
|
||||||
// Limits are changed on systemd platforms only
|
// Limits are changed on systemd platforms only
|
||||||
assumeTrue(isSystemd());
|
assumeTrue(isSystemd());
|
||||||
|
|
||||||
installation = install(distribution());
|
installation = installPackage(distribution());
|
||||||
|
|
||||||
startElasticsearch(sh);
|
startElasticsearch(sh);
|
||||||
|
|
||||||
|
|
|
@ -32,50 +32,86 @@ import org.elasticsearch.packaging.util.Shell;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
|
import org.junit.ClassRule;
|
||||||
import org.junit.Rule;
|
import org.junit.Rule;
|
||||||
import org.junit.rules.TestName;
|
import org.junit.rules.TestName;
|
||||||
|
import org.junit.rules.TestWatcher;
|
||||||
|
import org.junit.runner.Description;
|
||||||
import org.junit.runner.RunWith;
|
import org.junit.runner.RunWith;
|
||||||
|
|
||||||
import java.nio.file.Paths;
|
import java.nio.file.Paths;
|
||||||
|
|
||||||
import static org.elasticsearch.packaging.util.Cleanup.cleanEverything;
|
import static org.elasticsearch.packaging.util.Cleanup.cleanEverything;
|
||||||
|
import static org.junit.Assume.assumeFalse;
|
||||||
import static org.junit.Assume.assumeTrue;
|
import static org.junit.Assume.assumeTrue;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Class that all packaging test cases should inherit from
|
||||||
|
*/
|
||||||
@RunWith(RandomizedRunner.class)
|
@RunWith(RandomizedRunner.class)
|
||||||
@TestMethodProviders({
|
@TestMethodProviders({
|
||||||
JUnit3MethodProvider.class
|
JUnit3MethodProvider.class
|
||||||
})
|
})
|
||||||
@TestCaseOrdering(TestCaseOrdering.AlphabeticOrder.class)
|
@TestCaseOrdering(TestCaseOrdering.AlphabeticOrder.class)
|
||||||
/**
|
|
||||||
* Class that all packaging test cases should inherit from. This makes working with the packaging tests more similar to what we're
|
|
||||||
* familiar with from {@link org.elasticsearch.test.ESTestCase} without having to apply its behavior that's not relevant here
|
|
||||||
*/
|
|
||||||
public abstract class PackagingTestCase extends Assert {
|
public abstract class PackagingTestCase extends Assert {
|
||||||
|
|
||||||
protected final Log logger = LogFactory.getLog(getClass());
|
protected final Log logger = LogFactory.getLog(getClass());
|
||||||
|
|
||||||
private static Distribution distribution;
|
// the distribution being tested
|
||||||
|
protected static final Distribution distribution;
|
||||||
static {
|
static {
|
||||||
distribution = new Distribution(Paths.get(System.getProperty("tests.distribution")));
|
distribution = new Distribution(Paths.get(System.getProperty("tests.distribution")));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// the java installation already installed on the system
|
||||||
|
protected static final String systemJavaHome;
|
||||||
|
static {
|
||||||
|
Shell sh = new Shell();
|
||||||
|
if (Platforms.LINUX) {
|
||||||
|
systemJavaHome = sh.run("echo $SYSTEM_JAVA_HOME").stdout.trim();
|
||||||
|
} else {
|
||||||
|
assert Platforms.WINDOWS;
|
||||||
|
systemJavaHome = sh.run("$Env:SYSTEM_JAVA_HOME").stdout.trim();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// the current installation of the distribution being tested
|
||||||
|
protected static Installation installation;
|
||||||
|
|
||||||
|
private static boolean failed;
|
||||||
|
|
||||||
|
@ClassRule
|
||||||
|
public static final TestWatcher testFailureRule = new TestWatcher() {
|
||||||
|
@Override
|
||||||
|
protected void failed(Throwable e, Description description) {
|
||||||
|
failed = true;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// a shell to run system commands with
|
||||||
|
protected Shell sh;
|
||||||
|
|
||||||
@Rule
|
@Rule
|
||||||
public final TestName testNameRule = new TestName();
|
public final TestName testNameRule = new TestName();
|
||||||
|
|
||||||
@Before
|
@BeforeClass
|
||||||
public void setup() {
|
public static void filterCompatible() {
|
||||||
assumeTrue("only compatible distributions", distribution().packaging.compatible);
|
assumeTrue("only compatible distributions", distribution.packaging.compatible);
|
||||||
logger.info("[" + testNameRule.getMethodName() + "]: before test");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
protected static Installation installation;
|
|
||||||
|
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
public static void cleanup() throws Exception {
|
public static void cleanup() throws Exception {
|
||||||
installation = null;
|
installation = null;
|
||||||
cleanEverything();
|
cleanEverything();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void setup() throws Exception {
|
||||||
|
assumeFalse(failed); // skip rest of tests once one fails
|
||||||
|
|
||||||
|
sh = newShell();
|
||||||
|
}
|
||||||
|
|
||||||
/** The {@link Distribution} that should be tested in this case */
|
/** The {@link Distribution} that should be tested in this case */
|
||||||
protected static Distribution distribution() {
|
protected static Distribution distribution() {
|
||||||
return distribution;
|
return distribution;
|
||||||
|
@ -85,11 +121,9 @@ public abstract class PackagingTestCase extends Assert {
|
||||||
Shell sh = new Shell();
|
Shell sh = new Shell();
|
||||||
if (distribution().hasJdk == false) {
|
if (distribution().hasJdk == false) {
|
||||||
Platforms.onLinux(() -> {
|
Platforms.onLinux(() -> {
|
||||||
String systemJavaHome = sh.run("echo $SYSTEM_JAVA_HOME").stdout.trim();
|
|
||||||
sh.getEnv().put("JAVA_HOME", systemJavaHome);
|
sh.getEnv().put("JAVA_HOME", systemJavaHome);
|
||||||
});
|
});
|
||||||
Platforms.onWindows(() -> {
|
Platforms.onWindows(() -> {
|
||||||
final String systemJavaHome = sh.run("$Env:SYSTEM_JAVA_HOME").stdout.trim();
|
|
||||||
sh.getEnv().put("JAVA_HOME", systemJavaHome);
|
sh.getEnv().put("JAVA_HOME", systemJavaHome);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,37 +30,14 @@ import java.util.regex.Pattern;
|
||||||
import static org.elasticsearch.packaging.util.FileUtils.getDistributionFile;
|
import static org.elasticsearch.packaging.util.FileUtils.getDistributionFile;
|
||||||
import static org.junit.Assume.assumeTrue;
|
import static org.junit.Assume.assumeTrue;
|
||||||
|
|
||||||
public class PackageConflictTests extends PackagingTestCase {
|
public class RpmMetadataTests extends PackagingTestCase {
|
||||||
|
|
||||||
private Shell sh;
|
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
public void onlyCompatibleDistributions() throws Exception {
|
public void filterDistros() {
|
||||||
assumeTrue("only compatible distributions", distribution().packaging.compatible);
|
assumeTrue("only rpm", distribution.packaging == Distribution.Packaging.RPM);
|
||||||
assumeTrue("rpm or deb",
|
|
||||||
distribution().packaging == Distribution.Packaging.DEB || distribution().packaging == Distribution.Packaging.RPM);
|
|
||||||
sh = newShell();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public void test11DebDependencies() {
|
public void test11Dependencies() {
|
||||||
// TODO: rewrite this test to not use a real second distro to try and install
|
|
||||||
assumeTrue(Platforms.isDPKG());
|
|
||||||
|
|
||||||
final Shell sh = new Shell();
|
|
||||||
|
|
||||||
final Shell.Result result = sh.run("dpkg -I " + getDistributionFile(distribution()));
|
|
||||||
|
|
||||||
TestCase.assertTrue(Pattern.compile("(?m)^ Depends:.*bash.*").matcher(result.stdout).find());
|
|
||||||
|
|
||||||
String oppositePackageName = "elasticsearch";
|
|
||||||
if (distribution().isDefault()) {
|
|
||||||
oppositePackageName += "-oss";
|
|
||||||
}
|
|
||||||
|
|
||||||
TestCase.assertTrue(Pattern.compile("(?m)^ Conflicts: " + oppositePackageName + "$").matcher(result.stdout).find());
|
|
||||||
}
|
|
||||||
|
|
||||||
public void test11RpmDependencies() {
|
|
||||||
// TODO: rewrite this test to not use a real second distro to try and install
|
// TODO: rewrite this test to not use a real second distro to try and install
|
||||||
assumeTrue(Platforms.isRPM());
|
assumeTrue(Platforms.isRPM());
|
||||||
|
|
|
@ -19,10 +19,9 @@
|
||||||
|
|
||||||
package org.elasticsearch.packaging.test;
|
package org.elasticsearch.packaging.test;
|
||||||
|
|
||||||
import com.carrotsearch.randomizedtesting.annotations.TestCaseOrdering;
|
|
||||||
import org.elasticsearch.packaging.util.Distribution;
|
import org.elasticsearch.packaging.util.Distribution;
|
||||||
import org.elasticsearch.packaging.util.Shell;
|
import org.elasticsearch.packaging.util.Shell;
|
||||||
import org.junit.Before;
|
import org.junit.BeforeClass;
|
||||||
|
|
||||||
import java.nio.file.Files;
|
import java.nio.file.Files;
|
||||||
import java.nio.file.Path;
|
import java.nio.file.Path;
|
||||||
|
@ -34,37 +33,29 @@ import static org.elasticsearch.packaging.util.Packages.SYSTEMD_SERVICE;
|
||||||
import static org.elasticsearch.packaging.util.Packages.SYSVINIT_SCRIPT;
|
import static org.elasticsearch.packaging.util.Packages.SYSVINIT_SCRIPT;
|
||||||
import static org.elasticsearch.packaging.util.Packages.assertInstalled;
|
import static org.elasticsearch.packaging.util.Packages.assertInstalled;
|
||||||
import static org.elasticsearch.packaging.util.Packages.assertRemoved;
|
import static org.elasticsearch.packaging.util.Packages.assertRemoved;
|
||||||
import static org.elasticsearch.packaging.util.Packages.install;
|
import static org.elasticsearch.packaging.util.Packages.installPackage;
|
||||||
import static org.elasticsearch.packaging.util.Packages.remove;
|
import static org.elasticsearch.packaging.util.Packages.remove;
|
||||||
import static org.elasticsearch.packaging.util.Packages.verifyPackageInstallation;
|
import static org.elasticsearch.packaging.util.Packages.verifyPackageInstallation;
|
||||||
import static org.elasticsearch.packaging.util.Platforms.isRPM;
|
|
||||||
import static org.elasticsearch.packaging.util.Platforms.isSystemd;
|
import static org.elasticsearch.packaging.util.Platforms.isSystemd;
|
||||||
import static org.hamcrest.CoreMatchers.notNullValue;
|
|
||||||
import static org.hamcrest.core.Is.is;
|
import static org.hamcrest.core.Is.is;
|
||||||
import static org.junit.Assume.assumeThat;
|
|
||||||
import static org.junit.Assume.assumeTrue;
|
import static org.junit.Assume.assumeTrue;
|
||||||
|
|
||||||
@TestCaseOrdering(TestCaseOrdering.AlphabeticOrder.class)
|
|
||||||
public class RpmPreservationTests extends PackagingTestCase {
|
public class RpmPreservationTests extends PackagingTestCase {
|
||||||
|
|
||||||
@Before
|
@BeforeClass
|
||||||
public void onlyCompatibleDistributions() {
|
public static void filterDistros() {
|
||||||
assumeTrue("only rpm platforms", isRPM());
|
assumeTrue("only rpm", distribution.packaging == Distribution.Packaging.RPM);
|
||||||
assumeTrue("rpm distributions", distribution().packaging == Distribution.Packaging.RPM);
|
|
||||||
assumeTrue("only bundled jdk", distribution().hasJdk);
|
assumeTrue("only bundled jdk", distribution().hasJdk);
|
||||||
assumeTrue("only compatible distributions", distribution().packaging.compatible);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public void test10Install() throws Exception {
|
public void test10Install() throws Exception {
|
||||||
assertRemoved(distribution());
|
assertRemoved(distribution());
|
||||||
installation = install(distribution());
|
installation = installPackage(distribution());
|
||||||
assertInstalled(distribution());
|
assertInstalled(distribution());
|
||||||
verifyPackageInstallation(installation, distribution(), newShell());
|
verifyPackageInstallation(installation, distribution(), newShell());
|
||||||
}
|
}
|
||||||
|
|
||||||
public void test20Remove() throws Exception {
|
public void test20Remove() throws Exception {
|
||||||
assumeThat(installation, is(notNullValue()));
|
|
||||||
|
|
||||||
remove(distribution());
|
remove(distribution());
|
||||||
|
|
||||||
// config was removed
|
// config was removed
|
||||||
|
@ -80,7 +71,7 @@ public class RpmPreservationTests extends PackagingTestCase {
|
||||||
public void test30PreserveConfig() throws Exception {
|
public void test30PreserveConfig() throws Exception {
|
||||||
final Shell sh = new Shell();
|
final Shell sh = new Shell();
|
||||||
|
|
||||||
installation = install(distribution());
|
installation = installPackage(distribution());
|
||||||
assertInstalled(distribution());
|
assertInstalled(distribution());
|
||||||
verifyPackageInstallation(installation, distribution(), newShell());
|
verifyPackageInstallation(installation, distribution(), newShell());
|
||||||
|
|
||||||
|
|
|
@ -26,7 +26,6 @@ import org.elasticsearch.packaging.util.ServerUtils;
|
||||||
import org.elasticsearch.packaging.util.Shell;
|
import org.elasticsearch.packaging.util.Shell;
|
||||||
import org.elasticsearch.packaging.util.Shell.Result;
|
import org.elasticsearch.packaging.util.Shell.Result;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Before;
|
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
@ -47,13 +46,6 @@ public class WindowsServiceTests extends PackagingTestCase {
|
||||||
private static final String DEFAULT_DISPLAY_NAME = "Elasticsearch " + FileUtils.getCurrentVersion() + " (elasticsearch-service-x64)";
|
private static final String DEFAULT_DISPLAY_NAME = "Elasticsearch " + FileUtils.getCurrentVersion() + " (elasticsearch-service-x64)";
|
||||||
private static String serviceScript;
|
private static String serviceScript;
|
||||||
|
|
||||||
private Shell sh;
|
|
||||||
|
|
||||||
@Before
|
|
||||||
public void createShell() {
|
|
||||||
sh = new Shell();
|
|
||||||
}
|
|
||||||
|
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
public static void ensureWindows() {
|
public static void ensureWindows() {
|
||||||
assumeTrue(Platforms.WINDOWS);
|
assumeTrue(Platforms.WINDOWS);
|
||||||
|
|
|
@ -49,6 +49,14 @@ public class Distribution {
|
||||||
return flavor.equals(Flavor.OSS);
|
return flavor.equals(Flavor.OSS);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public boolean isArchive() {
|
||||||
|
return packaging == Packaging.TAR || packaging == Packaging.ZIP;
|
||||||
|
}
|
||||||
|
|
||||||
|
public boolean isPackage() {
|
||||||
|
return packaging == Packaging.RPM || packaging == Packaging.DEB;
|
||||||
|
}
|
||||||
|
|
||||||
public enum Packaging {
|
public enum Packaging {
|
||||||
|
|
||||||
TAR(".tar.gz", Platforms.LINUX || Platforms.DARWIN),
|
TAR(".tar.gz", Platforms.LINUX || Platforms.DARWIN),
|
||||||
|
|
|
@ -94,7 +94,7 @@ public class Packages {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static Installation install(Distribution distribution) throws IOException {
|
public static Installation installPackage(Distribution distribution) throws IOException {
|
||||||
Shell sh = new Shell();
|
Shell sh = new Shell();
|
||||||
String systemJavaHome = sh.run("echo $SYSTEM_JAVA_HOME").stdout.trim();
|
String systemJavaHome = sh.run("echo $SYSTEM_JAVA_HOME").stdout.trim();
|
||||||
if (distribution.hasJdk == false) {
|
if (distribution.hasJdk == false) {
|
||||||
|
|
|
@ -0,0 +1,34 @@
|
||||||
|
{
|
||||||
|
"snapshot.cleanup_repository": {
|
||||||
|
"documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html",
|
||||||
|
"stability": "stable",
|
||||||
|
"url": {
|
||||||
|
"paths": [
|
||||||
|
{
|
||||||
|
"path": "/_snapshot/{repository}/_cleanup",
|
||||||
|
"methods": [
|
||||||
|
"POST"
|
||||||
|
],
|
||||||
|
"parts": {
|
||||||
|
"repository": {
|
||||||
|
"type": "string",
|
||||||
|
"required" : true,
|
||||||
|
"description": "A repository name"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"params": {
|
||||||
|
"master_timeout": {
|
||||||
|
"type" : "time",
|
||||||
|
"description" : "Explicit operation timeout for connection to master node"
|
||||||
|
},
|
||||||
|
"timeout": {
|
||||||
|
"type" : "time",
|
||||||
|
"description" : "Explicit operation timeout"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"body": {}
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,9 +1,37 @@
|
||||||
|
|
||||||
---
|
---
|
||||||
"Help":
|
"Help":
|
||||||
|
- skip:
|
||||||
|
version: " - 7.3.99"
|
||||||
|
reason: "is_write_index is shown in cat.aliases starting version 7.4.0"
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
cat.aliases:
|
cat.aliases:
|
||||||
help: true
|
help: true
|
||||||
|
|
||||||
|
- match:
|
||||||
|
$body: |
|
||||||
|
/^ alias .+ \n
|
||||||
|
index .+ \n
|
||||||
|
filter .+ \n
|
||||||
|
routing.index .+ \n
|
||||||
|
routing.search .+ \n
|
||||||
|
is_write_index .+ \n
|
||||||
|
$/
|
||||||
|
|
||||||
|
---
|
||||||
|
"Help (pre 7.4.0)":
|
||||||
|
- skip:
|
||||||
|
version: "7.4.0 - "
|
||||||
|
features: node_selector
|
||||||
|
reason: "is_write_index is shown in cat.aliases starting version 7.4.0"
|
||||||
|
|
||||||
|
- do:
|
||||||
|
node_selector:
|
||||||
|
version: " - 7.3.99"
|
||||||
|
cat.aliases:
|
||||||
|
help: true
|
||||||
|
|
||||||
- match:
|
- match:
|
||||||
$body: |
|
$body: |
|
||||||
/^ alias .+ \n
|
/^ alias .+ \n
|
||||||
|
@ -26,6 +54,9 @@
|
||||||
|
|
||||||
---
|
---
|
||||||
"Simple alias":
|
"Simple alias":
|
||||||
|
- skip:
|
||||||
|
version: " - 7.3.99"
|
||||||
|
reason: "is_write_index is shown in cat.aliases starting version 7.4.0"
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
indices.create:
|
indices.create:
|
||||||
|
@ -39,6 +70,38 @@
|
||||||
- do:
|
- do:
|
||||||
cat.aliases: {}
|
cat.aliases: {}
|
||||||
|
|
||||||
|
- match:
|
||||||
|
$body: |
|
||||||
|
/^
|
||||||
|
test_alias \s+
|
||||||
|
test \s+
|
||||||
|
- \s+
|
||||||
|
- \s+
|
||||||
|
- \s+
|
||||||
|
- \s+
|
||||||
|
$/
|
||||||
|
|
||||||
|
---
|
||||||
|
"Simple alias (pre 7.4.0)":
|
||||||
|
- skip:
|
||||||
|
version: "7.4.0 - "
|
||||||
|
features: node_selector
|
||||||
|
reason: "is_write_index is shown in cat.aliases starting version 7.4.0"
|
||||||
|
|
||||||
|
- do:
|
||||||
|
indices.create:
|
||||||
|
index: test
|
||||||
|
|
||||||
|
- do:
|
||||||
|
indices.put_alias:
|
||||||
|
index: test
|
||||||
|
name: test_alias
|
||||||
|
|
||||||
|
- do:
|
||||||
|
node_selector:
|
||||||
|
version: " - 7.3.99"
|
||||||
|
cat.aliases: {}
|
||||||
|
|
||||||
- match:
|
- match:
|
||||||
$body: |
|
$body: |
|
||||||
/^
|
/^
|
||||||
|
@ -51,6 +114,50 @@
|
||||||
|
|
||||||
---
|
---
|
||||||
"Complex alias":
|
"Complex alias":
|
||||||
|
- skip:
|
||||||
|
version: " - 7.3.99"
|
||||||
|
reason: "is_write_index is shown in cat.aliases starting version 7.4.0"
|
||||||
|
|
||||||
|
- do:
|
||||||
|
indices.create:
|
||||||
|
index: test
|
||||||
|
body:
|
||||||
|
mappings:
|
||||||
|
properties:
|
||||||
|
foo:
|
||||||
|
type: text
|
||||||
|
|
||||||
|
- do:
|
||||||
|
indices.put_alias:
|
||||||
|
index: test
|
||||||
|
name: test_alias
|
||||||
|
body:
|
||||||
|
index_routing: ir
|
||||||
|
search_routing: "sr1,sr2"
|
||||||
|
is_write_index: true
|
||||||
|
filter:
|
||||||
|
term:
|
||||||
|
foo: bar
|
||||||
|
- do:
|
||||||
|
cat.aliases: {}
|
||||||
|
|
||||||
|
- match:
|
||||||
|
$body: |
|
||||||
|
/^
|
||||||
|
test_alias \s+
|
||||||
|
test \s+
|
||||||
|
[*] \s+
|
||||||
|
ir \s+
|
||||||
|
sr1,sr2 \s+
|
||||||
|
true \s+
|
||||||
|
$/
|
||||||
|
|
||||||
|
---
|
||||||
|
"Complex alias (pre 7.4.0)":
|
||||||
|
- skip:
|
||||||
|
version: "7.4.0 - "
|
||||||
|
features: node_selector
|
||||||
|
reason: "is_write_index is shown in cat.aliases starting version 7.4.0"
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
indices.create:
|
indices.create:
|
||||||
|
@ -72,6 +179,8 @@
|
||||||
term:
|
term:
|
||||||
foo: bar
|
foo: bar
|
||||||
- do:
|
- do:
|
||||||
|
node_selector:
|
||||||
|
version: " - 7.3.99"
|
||||||
cat.aliases: {}
|
cat.aliases: {}
|
||||||
|
|
||||||
- match:
|
- match:
|
||||||
|
@ -169,6 +278,9 @@
|
||||||
|
|
||||||
---
|
---
|
||||||
"Column headers":
|
"Column headers":
|
||||||
|
- skip:
|
||||||
|
version: " - 7.3.99"
|
||||||
|
reason: "is_write_index is shown in cat.aliases starting version 7.4.0"
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
indices.create:
|
indices.create:
|
||||||
|
@ -183,6 +295,45 @@
|
||||||
cat.aliases:
|
cat.aliases:
|
||||||
v: true
|
v: true
|
||||||
|
|
||||||
|
- match:
|
||||||
|
$body: |
|
||||||
|
/^ alias \s+
|
||||||
|
index \s+
|
||||||
|
filter \s+
|
||||||
|
routing.index \s+
|
||||||
|
routing.search \s+
|
||||||
|
is_write_index
|
||||||
|
\n
|
||||||
|
test_1 \s+
|
||||||
|
test \s+
|
||||||
|
- \s+
|
||||||
|
- \s+
|
||||||
|
- \s+
|
||||||
|
- \s+
|
||||||
|
$/
|
||||||
|
|
||||||
|
---
|
||||||
|
"Column headers (pre 7.4.0)":
|
||||||
|
- skip:
|
||||||
|
version: "7.4.0 - "
|
||||||
|
features: node_selector
|
||||||
|
reason: "is_write_index is shown in cat.aliases starting version 7.4.0"
|
||||||
|
|
||||||
|
- do:
|
||||||
|
indices.create:
|
||||||
|
index: test
|
||||||
|
|
||||||
|
- do:
|
||||||
|
indices.put_alias:
|
||||||
|
index: test
|
||||||
|
name: test_1
|
||||||
|
|
||||||
|
- do:
|
||||||
|
node_selector:
|
||||||
|
version: " - 7.3.99"
|
||||||
|
cat.aliases:
|
||||||
|
v: true
|
||||||
|
|
||||||
- match:
|
- match:
|
||||||
$body: |
|
$body: |
|
||||||
/^ alias \s+
|
/^ alias \s+
|
||||||
|
@ -198,7 +349,6 @@
|
||||||
- \s+
|
- \s+
|
||||||
$/
|
$/
|
||||||
|
|
||||||
|
|
||||||
---
|
---
|
||||||
"Select columns":
|
"Select columns":
|
||||||
|
|
||||||
|
@ -232,6 +382,9 @@
|
||||||
|
|
||||||
---
|
---
|
||||||
"Alias against closed index":
|
"Alias against closed index":
|
||||||
|
- skip:
|
||||||
|
version: " - 7.3.99"
|
||||||
|
reason: "is_write_index is shown in cat.aliases starting version 7.4.0"
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
indices.create:
|
indices.create:
|
||||||
|
@ -247,6 +400,40 @@
|
||||||
- do:
|
- do:
|
||||||
cat.aliases: {}
|
cat.aliases: {}
|
||||||
|
|
||||||
|
- match:
|
||||||
|
$body: |
|
||||||
|
/^
|
||||||
|
test_alias \s+
|
||||||
|
test_index \s+
|
||||||
|
- \s+
|
||||||
|
- \s+
|
||||||
|
- \s+
|
||||||
|
- \s+
|
||||||
|
$/
|
||||||
|
|
||||||
|
---
|
||||||
|
"Alias against closed index (pre 7.4.0)":
|
||||||
|
- skip:
|
||||||
|
version: "7.4.0 - "
|
||||||
|
features: node_selector
|
||||||
|
reason: "is_write_index is shown in cat.aliases starting version 7.4.0"
|
||||||
|
|
||||||
|
- do:
|
||||||
|
indices.create:
|
||||||
|
index: test_index
|
||||||
|
body:
|
||||||
|
aliases:
|
||||||
|
test_alias: {}
|
||||||
|
|
||||||
|
- do:
|
||||||
|
indices.close:
|
||||||
|
index: test_index
|
||||||
|
|
||||||
|
- do:
|
||||||
|
node_selector:
|
||||||
|
version: " - 7.3.99"
|
||||||
|
cat.aliases: {}
|
||||||
|
|
||||||
- match:
|
- match:
|
||||||
$body: |
|
$body: |
|
||||||
/^
|
/^
|
||||||
|
|
|
@ -1,14 +1,14 @@
|
||||||
---
|
---
|
||||||
setup:
|
"Translog retention without soft_deletes":
|
||||||
- do:
|
- do:
|
||||||
indices.create:
|
indices.create:
|
||||||
index: test
|
index: test
|
||||||
|
body:
|
||||||
|
settings:
|
||||||
|
soft_deletes.enabled: false
|
||||||
- do:
|
- do:
|
||||||
cluster.health:
|
cluster.health:
|
||||||
wait_for_no_initializing_shards: true
|
wait_for_no_initializing_shards: true
|
||||||
|
|
||||||
---
|
|
||||||
"Translog retention":
|
|
||||||
- do:
|
- do:
|
||||||
indices.stats:
|
indices.stats:
|
||||||
metric: [ translog ]
|
metric: [ translog ]
|
||||||
|
@ -64,6 +64,53 @@ setup:
|
||||||
- lte: { indices.test.primaries.translog.uncommitted_size_in_bytes: $creation_size }
|
- lte: { indices.test.primaries.translog.uncommitted_size_in_bytes: $creation_size }
|
||||||
- match: { indices.test.primaries.translog.uncommitted_operations: 0 }
|
- match: { indices.test.primaries.translog.uncommitted_operations: 0 }
|
||||||
|
|
||||||
|
---
|
||||||
|
"Translog retention with soft_deletes":
|
||||||
|
- skip:
|
||||||
|
version: " - 7.3.99"
|
||||||
|
reason: "start ignoring translog retention policy with soft-deletes enabled in 7.4"
|
||||||
|
- do:
|
||||||
|
indices.create:
|
||||||
|
index: test
|
||||||
|
body:
|
||||||
|
settings:
|
||||||
|
soft_deletes.enabled: true
|
||||||
|
- do:
|
||||||
|
cluster.health:
|
||||||
|
wait_for_no_initializing_shards: true
|
||||||
|
- do:
|
||||||
|
indices.stats:
|
||||||
|
metric: [ translog ]
|
||||||
|
- set: { indices.test.primaries.translog.size_in_bytes: creation_size }
|
||||||
|
|
||||||
|
- do:
|
||||||
|
index:
|
||||||
|
index: test
|
||||||
|
id: 1
|
||||||
|
body: { "foo": "bar" }
|
||||||
|
|
||||||
|
- do:
|
||||||
|
indices.stats:
|
||||||
|
metric: [ translog ]
|
||||||
|
- gt: { indices.test.primaries.translog.size_in_bytes: $creation_size }
|
||||||
|
- match: { indices.test.primaries.translog.operations: 1 }
|
||||||
|
- match: { indices.test.primaries.translog.uncommitted_operations: 1 }
|
||||||
|
# call flush twice to sync the global checkpoint after the last operation so that we can have the safe commit
|
||||||
|
- do:
|
||||||
|
indices.flush:
|
||||||
|
index: test
|
||||||
|
- do:
|
||||||
|
indices.flush:
|
||||||
|
index: test
|
||||||
|
- do:
|
||||||
|
indices.stats:
|
||||||
|
metric: [ translog ]
|
||||||
|
# after flushing we have one empty translog file while an empty index before flushing has two empty translog files.
|
||||||
|
- lt: { indices.test.primaries.translog.size_in_bytes: $creation_size }
|
||||||
|
- match: { indices.test.primaries.translog.operations: 0 }
|
||||||
|
- lt: { indices.test.primaries.translog.uncommitted_size_in_bytes: $creation_size }
|
||||||
|
- match: { indices.test.primaries.translog.uncommitted_operations: 0 }
|
||||||
|
|
||||||
---
|
---
|
||||||
"Translog last modified age stats":
|
"Translog last modified age stats":
|
||||||
- skip:
|
- skip:
|
||||||
|
@ -81,11 +128,20 @@ setup:
|
||||||
- gte: { indices.test.primaries.translog.earliest_last_modified_age: 0 }
|
- gte: { indices.test.primaries.translog.earliest_last_modified_age: 0 }
|
||||||
|
|
||||||
---
|
---
|
||||||
"Translog stats on closed indices":
|
"Translog stats on closed indices without soft-deletes":
|
||||||
- skip:
|
- skip:
|
||||||
version: " - 7.2.99"
|
version: " - 7.2.99"
|
||||||
reason: "closed indices have translog stats starting version 7.3.0"
|
reason: "closed indices have translog stats starting version 7.3.0"
|
||||||
|
|
||||||
|
- do:
|
||||||
|
indices.create:
|
||||||
|
index: test
|
||||||
|
body:
|
||||||
|
settings:
|
||||||
|
soft_deletes.enabled: false
|
||||||
|
- do:
|
||||||
|
cluster.health:
|
||||||
|
wait_for_no_initializing_shards: true
|
||||||
- do:
|
- do:
|
||||||
index:
|
index:
|
||||||
index: test
|
index: test
|
||||||
|
@ -123,3 +179,40 @@ setup:
|
||||||
forbid_closed_indices: false
|
forbid_closed_indices: false
|
||||||
- match: { indices.test.primaries.translog.operations: 3 }
|
- match: { indices.test.primaries.translog.operations: 3 }
|
||||||
- match: { indices.test.primaries.translog.uncommitted_operations: 0 }
|
- match: { indices.test.primaries.translog.uncommitted_operations: 0 }
|
||||||
|
|
||||||
|
---
|
||||||
|
"Translog stats on closed indices with soft-deletes":
|
||||||
|
- skip:
|
||||||
|
version: " - 7.3.99"
|
||||||
|
reason: "start ignoring translog retention policy with soft-deletes enabled in 7.4"
|
||||||
|
- do:
|
||||||
|
indices.create:
|
||||||
|
index: test
|
||||||
|
body:
|
||||||
|
settings:
|
||||||
|
soft_deletes.enabled: true
|
||||||
|
- do:
|
||||||
|
cluster.health:
|
||||||
|
wait_for_no_initializing_shards: true
|
||||||
|
- do:
|
||||||
|
index:
|
||||||
|
index: test
|
||||||
|
id: 1
|
||||||
|
body: { "foo": "bar" }
|
||||||
|
- do:
|
||||||
|
indices.stats:
|
||||||
|
metric: [ translog ]
|
||||||
|
- match: { indices.test.primaries.translog.operations: 1 }
|
||||||
|
- match: { indices.test.primaries.translog.uncommitted_operations: 1 }
|
||||||
|
- do:
|
||||||
|
indices.close:
|
||||||
|
index: test
|
||||||
|
wait_for_active_shards: 1
|
||||||
|
- is_true: acknowledged
|
||||||
|
- do:
|
||||||
|
indices.stats:
|
||||||
|
metric: [ translog ]
|
||||||
|
expand_wildcards: all
|
||||||
|
forbid_closed_indices: false
|
||||||
|
- match: { indices.test.primaries.translog.operations: 0 }
|
||||||
|
- match: { indices.test.primaries.translog.uncommitted_operations: 0 }
|
||||||
|
|
|
@ -38,6 +38,51 @@ setup:
|
||||||
|
|
||||||
- match: { acknowledged: true }
|
- match: { acknowledged: true }
|
||||||
|
|
||||||
|
---
|
||||||
|
"Create a snapshot and clean up repository":
|
||||||
|
- skip:
|
||||||
|
version: " - 7.99.99"
|
||||||
|
reason: cleanup introduced in 8.0
|
||||||
|
|
||||||
|
- do:
|
||||||
|
snapshot.cleanup_repository:
|
||||||
|
repository: test_repo_create_1
|
||||||
|
|
||||||
|
- match: { results.deleted_bytes: 0 }
|
||||||
|
- match: { results.deleted_blobs: 0 }
|
||||||
|
|
||||||
|
- do:
|
||||||
|
snapshot.create:
|
||||||
|
repository: test_repo_create_1
|
||||||
|
snapshot: test_snapshot
|
||||||
|
wait_for_completion: true
|
||||||
|
|
||||||
|
- match: { snapshot.snapshot: test_snapshot }
|
||||||
|
- match: { snapshot.state : SUCCESS }
|
||||||
|
- match: { snapshot.shards.successful: 1 }
|
||||||
|
- match: { snapshot.shards.failed : 0 }
|
||||||
|
|
||||||
|
- do:
|
||||||
|
snapshot.cleanup_repository:
|
||||||
|
repository: test_repo_create_1
|
||||||
|
|
||||||
|
- match: { results.deleted_bytes: 0 }
|
||||||
|
- match: { results.deleted_blobs: 0 }
|
||||||
|
|
||||||
|
- do:
|
||||||
|
snapshot.delete:
|
||||||
|
repository: test_repo_create_1
|
||||||
|
snapshot: test_snapshot
|
||||||
|
|
||||||
|
- match: { acknowledged: true }
|
||||||
|
|
||||||
|
- do:
|
||||||
|
snapshot.cleanup_repository:
|
||||||
|
repository: test_repo_create_1
|
||||||
|
|
||||||
|
- match: { results.deleted_bytes: 0 }
|
||||||
|
- match: { results.deleted_blobs: 0 }
|
||||||
|
|
||||||
---
|
---
|
||||||
"Create a snapshot for missing index":
|
"Create a snapshot for missing index":
|
||||||
- skip:
|
- skip:
|
||||||
|
|
|
@ -116,6 +116,7 @@ public class Version implements Comparable<Version>, ToXContentFragment {
|
||||||
public static final Version V_7_2_1 = new Version(7020199, org.apache.lucene.util.Version.LUCENE_8_0_0);
|
public static final Version V_7_2_1 = new Version(7020199, org.apache.lucene.util.Version.LUCENE_8_0_0);
|
||||||
public static final Version V_7_3_0 = new Version(7030099, org.apache.lucene.util.Version.LUCENE_8_1_0);
|
public static final Version V_7_3_0 = new Version(7030099, org.apache.lucene.util.Version.LUCENE_8_1_0);
|
||||||
public static final Version V_7_3_1 = new Version(7030199, org.apache.lucene.util.Version.LUCENE_8_1_0);
|
public static final Version V_7_3_1 = new Version(7030199, org.apache.lucene.util.Version.LUCENE_8_1_0);
|
||||||
|
public static final Version V_7_3_2 = new Version(7030299, org.apache.lucene.util.Version.LUCENE_8_1_0);
|
||||||
public static final Version V_7_4_0 = new Version(7040099, org.apache.lucene.util.Version.LUCENE_8_2_0);
|
public static final Version V_7_4_0 = new Version(7040099, org.apache.lucene.util.Version.LUCENE_8_2_0);
|
||||||
public static final Version CURRENT = V_7_4_0;
|
public static final Version CURRENT = V_7_4_0;
|
||||||
|
|
||||||
|
|
|
@ -48,6 +48,8 @@ import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageAction;
|
||||||
import org.elasticsearch.action.admin.cluster.node.usage.TransportNodesUsageAction;
|
import org.elasticsearch.action.admin.cluster.node.usage.TransportNodesUsageAction;
|
||||||
import org.elasticsearch.action.admin.cluster.remote.RemoteInfoAction;
|
import org.elasticsearch.action.admin.cluster.remote.RemoteInfoAction;
|
||||||
import org.elasticsearch.action.admin.cluster.remote.TransportRemoteInfoAction;
|
import org.elasticsearch.action.admin.cluster.remote.TransportRemoteInfoAction;
|
||||||
|
import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryAction;
|
||||||
|
import org.elasticsearch.action.admin.cluster.repositories.cleanup.TransportCleanupRepositoryAction;
|
||||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryAction;
|
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryAction;
|
||||||
import org.elasticsearch.action.admin.cluster.repositories.delete.TransportDeleteRepositoryAction;
|
import org.elasticsearch.action.admin.cluster.repositories.delete.TransportDeleteRepositoryAction;
|
||||||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesAction;
|
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesAction;
|
||||||
|
@ -226,6 +228,7 @@ import org.elasticsearch.rest.action.RestFieldCapabilitiesAction;
|
||||||
import org.elasticsearch.rest.action.RestMainAction;
|
import org.elasticsearch.rest.action.RestMainAction;
|
||||||
import org.elasticsearch.rest.action.admin.cluster.RestAddVotingConfigExclusionAction;
|
import org.elasticsearch.rest.action.admin.cluster.RestAddVotingConfigExclusionAction;
|
||||||
import org.elasticsearch.rest.action.admin.cluster.RestCancelTasksAction;
|
import org.elasticsearch.rest.action.admin.cluster.RestCancelTasksAction;
|
||||||
|
import org.elasticsearch.rest.action.admin.cluster.RestCleanupRepositoryAction;
|
||||||
import org.elasticsearch.rest.action.admin.cluster.RestClearVotingConfigExclusionsAction;
|
import org.elasticsearch.rest.action.admin.cluster.RestClearVotingConfigExclusionsAction;
|
||||||
import org.elasticsearch.rest.action.admin.cluster.RestClusterAllocationExplainAction;
|
import org.elasticsearch.rest.action.admin.cluster.RestClusterAllocationExplainAction;
|
||||||
import org.elasticsearch.rest.action.admin.cluster.RestClusterGetSettingsAction;
|
import org.elasticsearch.rest.action.admin.cluster.RestClusterGetSettingsAction;
|
||||||
|
@ -455,6 +458,7 @@ public class ActionModule extends AbstractModule {
|
||||||
actions.register(GetRepositoriesAction.INSTANCE, TransportGetRepositoriesAction.class);
|
actions.register(GetRepositoriesAction.INSTANCE, TransportGetRepositoriesAction.class);
|
||||||
actions.register(DeleteRepositoryAction.INSTANCE, TransportDeleteRepositoryAction.class);
|
actions.register(DeleteRepositoryAction.INSTANCE, TransportDeleteRepositoryAction.class);
|
||||||
actions.register(VerifyRepositoryAction.INSTANCE, TransportVerifyRepositoryAction.class);
|
actions.register(VerifyRepositoryAction.INSTANCE, TransportVerifyRepositoryAction.class);
|
||||||
|
actions.register(CleanupRepositoryAction.INSTANCE, TransportCleanupRepositoryAction.class);
|
||||||
actions.register(GetSnapshotsAction.INSTANCE, TransportGetSnapshotsAction.class);
|
actions.register(GetSnapshotsAction.INSTANCE, TransportGetSnapshotsAction.class);
|
||||||
actions.register(DeleteSnapshotAction.INSTANCE, TransportDeleteSnapshotAction.class);
|
actions.register(DeleteSnapshotAction.INSTANCE, TransportDeleteSnapshotAction.class);
|
||||||
actions.register(CreateSnapshotAction.INSTANCE, TransportCreateSnapshotAction.class);
|
actions.register(CreateSnapshotAction.INSTANCE, TransportCreateSnapshotAction.class);
|
||||||
|
@ -577,6 +581,7 @@ public class ActionModule extends AbstractModule {
|
||||||
registerHandler.accept(new RestGetRepositoriesAction(restController, settingsFilter));
|
registerHandler.accept(new RestGetRepositoriesAction(restController, settingsFilter));
|
||||||
registerHandler.accept(new RestDeleteRepositoryAction(restController));
|
registerHandler.accept(new RestDeleteRepositoryAction(restController));
|
||||||
registerHandler.accept(new RestVerifyRepositoryAction(restController));
|
registerHandler.accept(new RestVerifyRepositoryAction(restController));
|
||||||
|
registerHandler.accept(new RestCleanupRepositoryAction(restController));
|
||||||
registerHandler.accept(new RestGetSnapshotsAction(restController));
|
registerHandler.accept(new RestGetSnapshotsAction(restController));
|
||||||
registerHandler.accept(new RestCreateSnapshotAction(restController));
|
registerHandler.accept(new RestCreateSnapshotAction(restController));
|
||||||
registerHandler.accept(new RestRestoreSnapshotAction(restController));
|
registerHandler.accept(new RestRestoreSnapshotAction(restController));
|
||||||
|
|
|
@ -0,0 +1,31 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
package org.elasticsearch.action.admin.cluster.repositories.cleanup;
|
||||||
|
|
||||||
|
import org.elasticsearch.action.ActionType;
|
||||||
|
|
||||||
|
public final class CleanupRepositoryAction extends ActionType<CleanupRepositoryResponse> {
|
||||||
|
|
||||||
|
public static final CleanupRepositoryAction INSTANCE = new CleanupRepositoryAction();
|
||||||
|
public static final String NAME = "cluster:admin/repository/_cleanup";
|
||||||
|
|
||||||
|
private CleanupRepositoryAction() {
|
||||||
|
super(NAME, CleanupRepositoryResponse::new);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,63 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
package org.elasticsearch.action.admin.cluster.repositories.cleanup;
|
||||||
|
|
||||||
|
import org.elasticsearch.action.ActionRequestValidationException;
|
||||||
|
import org.elasticsearch.action.support.master.AcknowledgedRequest;
|
||||||
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||||
|
|
||||||
|
public class CleanupRepositoryRequest extends AcknowledgedRequest<CleanupRepositoryRequest> {
|
||||||
|
|
||||||
|
private String repository;
|
||||||
|
|
||||||
|
public CleanupRepositoryRequest(String repository) {
|
||||||
|
this.repository = repository;
|
||||||
|
}
|
||||||
|
|
||||||
|
public CleanupRepositoryRequest(StreamInput in) throws IOException {
|
||||||
|
repository = in.readString();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
|
out.writeString(repository);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ActionRequestValidationException validate() {
|
||||||
|
ActionRequestValidationException validationException = null;
|
||||||
|
if (repository == null) {
|
||||||
|
validationException = addValidationError("repository is null", null);
|
||||||
|
}
|
||||||
|
return validationException;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String name() {
|
||||||
|
return repository;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void name(String repository) {
|
||||||
|
this.repository = repository;
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,38 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
package org.elasticsearch.action.admin.cluster.repositories.cleanup;
|
||||||
|
|
||||||
|
import org.elasticsearch.action.ActionType;
|
||||||
|
import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
|
||||||
|
import org.elasticsearch.client.ElasticsearchClient;
|
||||||
|
|
||||||
|
public class CleanupRepositoryRequestBuilder extends MasterNodeOperationRequestBuilder<CleanupRepositoryRequest,
|
||||||
|
CleanupRepositoryResponse,
|
||||||
|
CleanupRepositoryRequestBuilder> {
|
||||||
|
|
||||||
|
public CleanupRepositoryRequestBuilder(ElasticsearchClient client, ActionType<CleanupRepositoryResponse> action,
|
||||||
|
String repository) {
|
||||||
|
super(client, action, new CleanupRepositoryRequest(repository));
|
||||||
|
}
|
||||||
|
|
||||||
|
public CleanupRepositoryRequestBuilder setName(String repository) {
|
||||||
|
request.name(repository);
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,76 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
package org.elasticsearch.action.admin.cluster.repositories.cleanup;
|
||||||
|
|
||||||
|
import org.elasticsearch.action.ActionResponse;
|
||||||
|
import org.elasticsearch.common.ParseField;
|
||||||
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
|
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||||
|
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||||
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
|
import org.elasticsearch.common.xcontent.XContentParser;
|
||||||
|
import org.elasticsearch.repositories.RepositoryCleanupResult;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
public final class CleanupRepositoryResponse extends ActionResponse implements ToXContentObject {
|
||||||
|
|
||||||
|
private static final ObjectParser<CleanupRepositoryResponse, Void> PARSER =
|
||||||
|
new ObjectParser<>(CleanupRepositoryResponse.class.getName(), true, CleanupRepositoryResponse::new);
|
||||||
|
|
||||||
|
static {
|
||||||
|
PARSER.declareObject((response, cleanupResult) -> response.result = cleanupResult,
|
||||||
|
RepositoryCleanupResult.PARSER, new ParseField("results"));
|
||||||
|
}
|
||||||
|
|
||||||
|
private RepositoryCleanupResult result;
|
||||||
|
|
||||||
|
public CleanupRepositoryResponse() {
|
||||||
|
}
|
||||||
|
|
||||||
|
public CleanupRepositoryResponse(RepositoryCleanupResult result) {
|
||||||
|
this.result = result;
|
||||||
|
}
|
||||||
|
|
||||||
|
public CleanupRepositoryResponse(StreamInput in) throws IOException {
|
||||||
|
result = new RepositoryCleanupResult(in);
|
||||||
|
}
|
||||||
|
|
||||||
|
public RepositoryCleanupResult result() {
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
|
result.writeTo(out);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static CleanupRepositoryResponse fromXContent(XContentParser parser) {
|
||||||
|
return PARSER.apply(parser, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||||
|
builder.startObject().field("results");
|
||||||
|
result.toXContent(builder, params);
|
||||||
|
builder.endObject();
|
||||||
|
return builder;
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,248 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
package org.elasticsearch.action.admin.cluster.repositories.cleanup;
|
||||||
|
|
||||||
|
import org.apache.logging.log4j.LogManager;
|
||||||
|
import org.apache.logging.log4j.Logger;
|
||||||
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
|
import org.elasticsearch.Version;
|
||||||
|
import org.elasticsearch.action.ActionListener;
|
||||||
|
import org.elasticsearch.action.ActionRunnable;
|
||||||
|
import org.elasticsearch.action.support.ActionFilters;
|
||||||
|
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||||
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
|
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||||
|
import org.elasticsearch.cluster.RepositoryCleanupInProgress;
|
||||||
|
import org.elasticsearch.cluster.SnapshotDeletionsInProgress;
|
||||||
|
import org.elasticsearch.cluster.SnapshotsInProgress;
|
||||||
|
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||||
|
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||||
|
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||||
|
import org.elasticsearch.cluster.service.ClusterService;
|
||||||
|
import org.elasticsearch.common.Nullable;
|
||||||
|
import org.elasticsearch.common.inject.Inject;
|
||||||
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
|
import org.elasticsearch.repositories.RepositoriesService;
|
||||||
|
import org.elasticsearch.repositories.Repository;
|
||||||
|
import org.elasticsearch.repositories.RepositoryCleanupResult;
|
||||||
|
import org.elasticsearch.repositories.blobstore.BlobStoreRepository;
|
||||||
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
|
import org.elasticsearch.transport.TransportService;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Repository cleanup action for repository implementations based on {@link BlobStoreRepository}.
|
||||||
|
*
|
||||||
|
* The steps taken by the repository cleanup operation are as follows:
|
||||||
|
* <ol>
|
||||||
|
* <li>Check that there are no running repository cleanup, snapshot create, or snapshot delete actions
|
||||||
|
* and add an entry for the repository that is to be cleaned up to {@link RepositoryCleanupInProgress}</li>
|
||||||
|
* <li>Run cleanup actions on the repository. Note, these are executed exclusively on the master node.
|
||||||
|
* For the precise operations execute see {@link BlobStoreRepository#cleanup}</li>
|
||||||
|
* <li>Remove the entry in {@link RepositoryCleanupInProgress} in the first step.</li>
|
||||||
|
* </ol>
|
||||||
|
*
|
||||||
|
* On master failover during the cleanup operation it is simply removed from the cluster state. This is safe because the logic in
|
||||||
|
* {@link BlobStoreRepository#cleanup} ensures that the repository state id has not changed between creation of the cluster state entry
|
||||||
|
* and any delete/write operations. TODO: This will not work if we also want to clean up at the shard level as those will involve writes
|
||||||
|
* as well as deletes.
|
||||||
|
*/
|
||||||
|
public final class TransportCleanupRepositoryAction extends TransportMasterNodeAction<CleanupRepositoryRequest,
|
||||||
|
CleanupRepositoryResponse> {
|
||||||
|
|
||||||
|
private static final Logger logger = LogManager.getLogger(TransportCleanupRepositoryAction.class);
|
||||||
|
|
||||||
|
private static final Version MIN_VERSION = Version.V_7_4_0;
|
||||||
|
|
||||||
|
private final RepositoriesService repositoriesService;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected String executor() {
|
||||||
|
return ThreadPool.Names.GENERIC;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Inject
|
||||||
|
public TransportCleanupRepositoryAction(TransportService transportService, ClusterService clusterService,
|
||||||
|
RepositoriesService repositoriesService, ThreadPool threadPool, ActionFilters actionFilters,
|
||||||
|
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||||
|
super(CleanupRepositoryAction.NAME, transportService, clusterService, threadPool, actionFilters,
|
||||||
|
CleanupRepositoryRequest::new, indexNameExpressionResolver);
|
||||||
|
this.repositoriesService = repositoriesService;
|
||||||
|
// We add a state applier that will remove any dangling repository cleanup actions on master failover.
|
||||||
|
// This is safe to do since cleanups will increment the repository state id before executing any operations to prevent concurrent
|
||||||
|
// operations from corrupting the repository. This is the same safety mechanism used by snapshot deletes.
|
||||||
|
clusterService.addStateApplier(event -> {
|
||||||
|
if (event.localNodeMaster() && event.previousState().nodes().isLocalNodeElectedMaster() == false) {
|
||||||
|
final RepositoryCleanupInProgress repositoryCleanupInProgress = event.state().custom(RepositoryCleanupInProgress.TYPE);
|
||||||
|
if (repositoryCleanupInProgress == null || repositoryCleanupInProgress.cleanupInProgress() == false) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
clusterService.submitStateUpdateTask("clean up repository cleanup task after master failover",
|
||||||
|
new ClusterStateUpdateTask() {
|
||||||
|
@Override
|
||||||
|
public ClusterState execute(ClusterState currentState) {
|
||||||
|
return removeInProgressCleanup(currentState);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||||
|
logger.debug("Removed repository cleanup task [{}] from cluster state", repositoryCleanupInProgress);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void onFailure(String source, Exception e) {
|
||||||
|
logger.warn(
|
||||||
|
"Failed to remove repository cleanup task [{}] from cluster state", repositoryCleanupInProgress);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
private static ClusterState removeInProgressCleanup(final ClusterState currentState) {
|
||||||
|
RepositoryCleanupInProgress cleanupInProgress = currentState.custom(RepositoryCleanupInProgress.TYPE);
|
||||||
|
if (cleanupInProgress != null) {
|
||||||
|
boolean changed = false;
|
||||||
|
if (cleanupInProgress.cleanupInProgress() == false) {
|
||||||
|
cleanupInProgress = new RepositoryCleanupInProgress();
|
||||||
|
changed = true;
|
||||||
|
}
|
||||||
|
if (changed) {
|
||||||
|
return ClusterState.builder(currentState).putCustom(
|
||||||
|
RepositoryCleanupInProgress.TYPE, cleanupInProgress).build();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return currentState;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected CleanupRepositoryResponse read(StreamInput in) throws IOException {
|
||||||
|
return new CleanupRepositoryResponse(in);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void masterOperation(CleanupRepositoryRequest request, ClusterState state,
|
||||||
|
ActionListener<CleanupRepositoryResponse> listener) {
|
||||||
|
if (state.nodes().getMinNodeVersion().onOrAfter(MIN_VERSION)) {
|
||||||
|
cleanupRepo(request.name(), ActionListener.map(listener, CleanupRepositoryResponse::new));
|
||||||
|
} else {
|
||||||
|
throw new IllegalArgumentException("Repository cleanup is only supported from version [" + MIN_VERSION
|
||||||
|
+ "] but the oldest node version in the cluster is [" + state.nodes().getMinNodeVersion() + ']');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected ClusterBlockException checkBlock(CleanupRepositoryRequest request, ClusterState state) {
|
||||||
|
// Cluster is not affected but we look up repositories in metadata
|
||||||
|
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Runs cleanup operations on the given repository.
|
||||||
|
* @param repositoryName Repository to clean up
|
||||||
|
* @param listener Listener for cleanup result
|
||||||
|
*/
|
||||||
|
private void cleanupRepo(String repositoryName, ActionListener<RepositoryCleanupResult> listener) {
|
||||||
|
final Repository repository = repositoriesService.repository(repositoryName);
|
||||||
|
if (repository instanceof BlobStoreRepository == false) {
|
||||||
|
listener.onFailure(new IllegalArgumentException("Repository [" + repositoryName + "] does not support repository cleanup"));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
final BlobStoreRepository blobStoreRepository = (BlobStoreRepository) repository;
|
||||||
|
final long repositoryStateId = repository.getRepositoryData().getGenId();
|
||||||
|
logger.info("Running cleanup operations on repository [{}][{}]", repositoryName, repositoryStateId);
|
||||||
|
clusterService.submitStateUpdateTask("cleanup repository [" + repositoryName + "][" + repositoryStateId + ']',
|
||||||
|
new ClusterStateUpdateTask() {
|
||||||
|
@Override
|
||||||
|
public ClusterState execute(ClusterState currentState) {
|
||||||
|
final RepositoryCleanupInProgress repositoryCleanupInProgress = currentState.custom(RepositoryCleanupInProgress.TYPE);
|
||||||
|
if (repositoryCleanupInProgress != null && repositoryCleanupInProgress.cleanupInProgress() == false) {
|
||||||
|
throw new IllegalStateException(
|
||||||
|
"Cannot cleanup [" + repositoryName + "] - a repository cleanup is already in-progress");
|
||||||
|
}
|
||||||
|
SnapshotDeletionsInProgress deletionsInProgress = currentState.custom(SnapshotDeletionsInProgress.TYPE);
|
||||||
|
if (deletionsInProgress != null && deletionsInProgress.hasDeletionsInProgress()) {
|
||||||
|
throw new IllegalStateException("Cannot cleanup [" + repositoryName + "] - a snapshot is currently being deleted");
|
||||||
|
}
|
||||||
|
SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE);
|
||||||
|
if (snapshots != null && !snapshots.entries().isEmpty()) {
|
||||||
|
throw new IllegalStateException("Cannot cleanup [" + repositoryName + "] - a snapshot is currently running");
|
||||||
|
}
|
||||||
|
return ClusterState.builder(currentState).putCustom(RepositoryCleanupInProgress.TYPE,
|
||||||
|
new RepositoryCleanupInProgress(
|
||||||
|
RepositoryCleanupInProgress.startedEntry(repositoryName, repositoryStateId))).build();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void onFailure(String source, Exception e) {
|
||||||
|
after(e, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||||
|
logger.debug("Initialized repository cleanup in cluster state for [{}][{}]", repositoryName, repositoryStateId);
|
||||||
|
threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(ActionRunnable.wrap(listener,
|
||||||
|
l -> blobStoreRepository.cleanup(
|
||||||
|
repositoryStateId, ActionListener.wrap(result -> after(null, result), e -> after(e, null)))));
|
||||||
|
}
|
||||||
|
|
||||||
|
private void after(@Nullable Exception failure, @Nullable RepositoryCleanupResult result) {
|
||||||
|
if (failure == null) {
|
||||||
|
logger.debug("Finished repository cleanup operations on [{}][{}]", repositoryName, repositoryStateId);
|
||||||
|
} else {
|
||||||
|
logger.debug(() -> new ParameterizedMessage(
|
||||||
|
"Failed to finish repository cleanup operations on [{}][{}]", repositoryName, repositoryStateId), failure);
|
||||||
|
}
|
||||||
|
assert failure != null || result != null;
|
||||||
|
clusterService.submitStateUpdateTask(
|
||||||
|
"remove repository cleanup task [" + repositoryName + "][" + repositoryStateId + ']',
|
||||||
|
new ClusterStateUpdateTask() {
|
||||||
|
@Override
|
||||||
|
public ClusterState execute(ClusterState currentState) {
|
||||||
|
return removeInProgressCleanup(currentState);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void onFailure(String source, Exception e) {
|
||||||
|
if (failure != null) {
|
||||||
|
e.addSuppressed(failure);
|
||||||
|
}
|
||||||
|
logger.warn(() ->
|
||||||
|
new ParameterizedMessage("[{}] failed to remove repository cleanup task", repositoryName), e);
|
||||||
|
listener.onFailure(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||||
|
if (failure == null) {
|
||||||
|
logger.info("Done with repository cleanup on [{}][{}] with result [{}]",
|
||||||
|
repositoryName, repositoryStateId, result);
|
||||||
|
listener.onResponse(result);
|
||||||
|
} else {
|
||||||
|
logger.warn(() -> new ParameterizedMessage("Failed to run repository cleanup operations on [{}][{}]",
|
||||||
|
repositoryName, repositoryStateId), failure);
|
||||||
|
listener.onFailure(failure);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.action.admin.cluster.snapshots.status;
|
||||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
|
import org.elasticsearch.action.ActionRunnable;
|
||||||
import org.elasticsearch.action.support.ActionFilters;
|
import org.elasticsearch.action.support.ActionFilters;
|
||||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
|
@ -115,15 +116,13 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction<Sn
|
||||||
for (int i = 0; i < currentSnapshots.size(); i++) {
|
for (int i = 0; i < currentSnapshots.size(); i++) {
|
||||||
snapshots[i] = currentSnapshots.get(i).snapshot();
|
snapshots[i] = currentSnapshots.get(i).snapshot();
|
||||||
}
|
}
|
||||||
|
transportNodesSnapshotsStatus.execute(
|
||||||
TransportNodesSnapshotsStatus.Request nodesRequest =
|
new TransportNodesSnapshotsStatus.Request(nodesIds.toArray(Strings.EMPTY_ARRAY))
|
||||||
new TransportNodesSnapshotsStatus.Request(nodesIds.toArray(new String[nodesIds.size()]))
|
.snapshots(snapshots).timeout(request.masterNodeTimeout()),
|
||||||
.snapshots(snapshots).timeout(request.masterNodeTimeout());
|
ActionListener.wrap(
|
||||||
transportNodesSnapshotsStatus.execute(nodesRequest,
|
nodeSnapshotStatuses -> threadPool.executor(ThreadPool.Names.GENERIC).execute(
|
||||||
ActionListener.map(
|
ActionRunnable.wrap(listener, l -> l.onResponse(buildResponse(request, snapshotsService.currentSnapshots(
|
||||||
listener, nodeSnapshotStatuses ->
|
request.repository(), Arrays.asList(request.snapshots())), nodeSnapshotStatuses)))), listener::onFailure));
|
||||||
buildResponse(request, snapshotsService.currentSnapshots(request.repository(), Arrays.asList(request.snapshots())),
|
|
||||||
nodeSnapshotStatuses)));
|
|
||||||
} else {
|
} else {
|
||||||
// We don't have any in-progress shards, just return current stats
|
// We don't have any in-progress shards, just return current stats
|
||||||
listener.onResponse(buildResponse(request, currentSnapshots, null));
|
listener.onResponse(buildResponse(request, currentSnapshots, null));
|
||||||
|
|
|
@ -49,6 +49,9 @@ import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
|
||||||
import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageRequest;
|
import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageRequest;
|
||||||
import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageRequestBuilder;
|
import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageRequestBuilder;
|
||||||
import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageResponse;
|
import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageResponse;
|
||||||
|
import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequest;
|
||||||
|
import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequestBuilder;
|
||||||
|
import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryResponse;
|
||||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
|
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
|
||||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequestBuilder;
|
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequestBuilder;
|
||||||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
|
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
|
||||||
|
@ -453,6 +456,21 @@ public interface ClusterAdminClient extends ElasticsearchClient {
|
||||||
*/
|
*/
|
||||||
GetRepositoriesRequestBuilder prepareGetRepositories(String... name);
|
GetRepositoriesRequestBuilder prepareGetRepositories(String... name);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Cleans up repository.
|
||||||
|
*/
|
||||||
|
CleanupRepositoryRequestBuilder prepareCleanupRepository(String repository);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Cleans up repository.
|
||||||
|
*/
|
||||||
|
ActionFuture<CleanupRepositoryResponse> cleanupRepository(CleanupRepositoryRequest repository);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Cleans up repository.
|
||||||
|
*/
|
||||||
|
void cleanupRepository(CleanupRepositoryRequest repository, ActionListener<CleanupRepositoryResponse> listener);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Verifies a repository.
|
* Verifies a repository.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -26,6 +26,7 @@ import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksReque
|
||||||
import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskRequest;
|
import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskRequest;
|
||||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
|
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
|
||||||
import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageRequest;
|
import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageRequest;
|
||||||
|
import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequest;
|
||||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
|
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
|
||||||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
|
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
|
||||||
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
|
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
|
||||||
|
@ -471,6 +472,16 @@ public class Requests {
|
||||||
return new DeleteRepositoryRequest(name);
|
return new DeleteRepositoryRequest(name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Cleanup repository
|
||||||
|
*
|
||||||
|
* @param name repository name
|
||||||
|
* @return cleanup repository request
|
||||||
|
*/
|
||||||
|
public static CleanupRepositoryRequest cleanupRepositoryRequest(String name) {
|
||||||
|
return new CleanupRepositoryRequest(name);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Verifies snapshot repository
|
* Verifies snapshot repository
|
||||||
*
|
*
|
||||||
|
|
|
@ -64,6 +64,10 @@ import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageAction;
|
||||||
import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageRequest;
|
import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageRequest;
|
||||||
import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageRequestBuilder;
|
import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageRequestBuilder;
|
||||||
import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageResponse;
|
import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageResponse;
|
||||||
|
import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryAction;
|
||||||
|
import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequest;
|
||||||
|
import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequestBuilder;
|
||||||
|
import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryResponse;
|
||||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryAction;
|
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryAction;
|
||||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
|
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
|
||||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequestBuilder;
|
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequestBuilder;
|
||||||
|
@ -1019,6 +1023,21 @@ public abstract class AbstractClient implements Client {
|
||||||
return new GetRepositoriesRequestBuilder(this, GetRepositoriesAction.INSTANCE, name);
|
return new GetRepositoriesRequestBuilder(this, GetRepositoriesAction.INSTANCE, name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CleanupRepositoryRequestBuilder prepareCleanupRepository(String repository) {
|
||||||
|
return new CleanupRepositoryRequestBuilder(this, CleanupRepositoryAction.INSTANCE, repository);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ActionFuture<CleanupRepositoryResponse> cleanupRepository(CleanupRepositoryRequest request) {
|
||||||
|
return execute(CleanupRepositoryAction.INSTANCE, request);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void cleanupRepository(CleanupRepositoryRequest request, ActionListener<CleanupRepositoryResponse> listener) {
|
||||||
|
execute(CleanupRepositoryAction.INSTANCE, request, listener);
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ActionFuture<RestoreSnapshotResponse> restoreSnapshot(RestoreSnapshotRequest request) {
|
public ActionFuture<RestoreSnapshotResponse> restoreSnapshot(RestoreSnapshotRequest request) {
|
||||||
return execute(RestoreSnapshotAction.INSTANCE, request);
|
return execute(RestoreSnapshotAction.INSTANCE, request);
|
||||||
|
|
|
@ -121,6 +121,8 @@ public class ClusterModule extends AbstractModule {
|
||||||
registerClusterCustom(entries, RestoreInProgress.TYPE, RestoreInProgress::new, RestoreInProgress::readDiffFrom);
|
registerClusterCustom(entries, RestoreInProgress.TYPE, RestoreInProgress::new, RestoreInProgress::readDiffFrom);
|
||||||
registerClusterCustom(entries, SnapshotDeletionsInProgress.TYPE, SnapshotDeletionsInProgress::new,
|
registerClusterCustom(entries, SnapshotDeletionsInProgress.TYPE, SnapshotDeletionsInProgress::new,
|
||||||
SnapshotDeletionsInProgress::readDiffFrom);
|
SnapshotDeletionsInProgress::readDiffFrom);
|
||||||
|
registerClusterCustom(entries, RepositoryCleanupInProgress.TYPE, RepositoryCleanupInProgress::new,
|
||||||
|
RepositoryCleanupInProgress::readDiffFrom);
|
||||||
// Metadata
|
// Metadata
|
||||||
registerMetaDataCustom(entries, RepositoriesMetaData.TYPE, RepositoriesMetaData::new, RepositoriesMetaData::readDiffFrom);
|
registerMetaDataCustom(entries, RepositoriesMetaData.TYPE, RepositoriesMetaData::new, RepositoriesMetaData::readDiffFrom);
|
||||||
registerMetaDataCustom(entries, IngestMetadata.TYPE, IngestMetadata::new, IngestMetadata::readDiffFrom);
|
registerMetaDataCustom(entries, IngestMetadata.TYPE, IngestMetadata::new, IngestMetadata::readDiffFrom);
|
||||||
|
|
|
@ -0,0 +1,120 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
package org.elasticsearch.cluster;
|
||||||
|
|
||||||
|
import org.elasticsearch.Version;
|
||||||
|
import org.elasticsearch.common.Strings;
|
||||||
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
|
import org.elasticsearch.common.io.stream.Writeable;
|
||||||
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
public final class RepositoryCleanupInProgress extends AbstractNamedDiffable<ClusterState.Custom> implements ClusterState.Custom {
|
||||||
|
|
||||||
|
public static final String TYPE = "repository_cleanup";
|
||||||
|
|
||||||
|
private final List<Entry> entries;
|
||||||
|
|
||||||
|
public RepositoryCleanupInProgress(Entry... entries) {
|
||||||
|
this.entries = Arrays.asList(entries);
|
||||||
|
}
|
||||||
|
|
||||||
|
RepositoryCleanupInProgress(StreamInput in) throws IOException {
|
||||||
|
this.entries = in.readList(Entry::new);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static NamedDiff<ClusterState.Custom> readDiffFrom(StreamInput in) throws IOException {
|
||||||
|
return readDiffFrom(ClusterState.Custom.class, TYPE, in);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static Entry startedEntry(String repository, long repositoryStateId) {
|
||||||
|
return new Entry(repository, repositoryStateId);
|
||||||
|
}
|
||||||
|
|
||||||
|
public boolean cleanupInProgress() {
|
||||||
|
// TODO: Should we allow parallelism across repositories here maybe?
|
||||||
|
return entries.isEmpty();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getWriteableName() {
|
||||||
|
return TYPE;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
|
out.writeList(entries);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||||
|
builder.startArray(TYPE);
|
||||||
|
for (Entry entry : entries) {
|
||||||
|
builder.startObject();
|
||||||
|
{
|
||||||
|
builder.field("repository", entry.repository);
|
||||||
|
}
|
||||||
|
builder.endObject();
|
||||||
|
}
|
||||||
|
builder.endArray();
|
||||||
|
return builder;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return Strings.toString(this);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Version getMinimalSupportedVersion() {
|
||||||
|
return Version.V_7_4_0;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static final class Entry implements Writeable {
|
||||||
|
|
||||||
|
private final String repository;
|
||||||
|
|
||||||
|
private final long repositoryStateId;
|
||||||
|
|
||||||
|
private Entry(StreamInput in) throws IOException {
|
||||||
|
repository = in.readString();
|
||||||
|
repositoryStateId = in.readLong();
|
||||||
|
}
|
||||||
|
|
||||||
|
private Entry(String repository, long repositoryStateId) {
|
||||||
|
this.repository = repository;
|
||||||
|
this.repositoryStateId = repositoryStateId;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
|
out.writeString(repository);
|
||||||
|
out.writeLong(repositoryStateId);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return "{" + repository + '}' + '{' + repositoryStateId + '}';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -102,9 +102,11 @@ public interface BlobContainer {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Deletes this container and all its contents from the repository.
|
* Deletes this container and all its contents from the repository.
|
||||||
|
*
|
||||||
|
* @return delete result
|
||||||
* @throws IOException on failure
|
* @throws IOException on failure
|
||||||
*/
|
*/
|
||||||
void delete() throws IOException;
|
DeleteResult delete() throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Deletes the blobs with given names. Unlike {@link #deleteBlob(String)} this method will not throw an exception
|
* Deletes the blobs with given names. Unlike {@link #deleteBlob(String)} this method will not throw an exception
|
||||||
|
|
|
@ -0,0 +1,52 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.common.blobstore;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The result of deleting multiple blobs from a {@link BlobStore}.
|
||||||
|
*/
|
||||||
|
public final class DeleteResult {
|
||||||
|
|
||||||
|
public static final DeleteResult ZERO = new DeleteResult(0, 0);
|
||||||
|
|
||||||
|
private final long blobsDeleted;
|
||||||
|
private final long bytesDeleted;
|
||||||
|
|
||||||
|
public DeleteResult(long blobsDeleted, long bytesDeleted) {
|
||||||
|
this.blobsDeleted = blobsDeleted;
|
||||||
|
this.bytesDeleted = bytesDeleted;
|
||||||
|
}
|
||||||
|
|
||||||
|
public long blobsDeleted() {
|
||||||
|
return blobsDeleted;
|
||||||
|
}
|
||||||
|
|
||||||
|
public long bytesDeleted() {
|
||||||
|
return bytesDeleted;
|
||||||
|
}
|
||||||
|
|
||||||
|
public DeleteResult add(DeleteResult other) {
|
||||||
|
return new DeleteResult(blobsDeleted + other.blobsDeleted(), bytesDeleted + other.bytesDeleted());
|
||||||
|
}
|
||||||
|
|
||||||
|
public DeleteResult add(long blobs, long bytes) {
|
||||||
|
return new DeleteResult(blobsDeleted + blobs, bytesDeleted + bytes);
|
||||||
|
}
|
||||||
|
}
|
|
@ -23,6 +23,7 @@ import org.elasticsearch.common.UUIDs;
|
||||||
import org.elasticsearch.common.blobstore.BlobContainer;
|
import org.elasticsearch.common.blobstore.BlobContainer;
|
||||||
import org.elasticsearch.common.blobstore.BlobMetaData;
|
import org.elasticsearch.common.blobstore.BlobMetaData;
|
||||||
import org.elasticsearch.common.blobstore.BlobPath;
|
import org.elasticsearch.common.blobstore.BlobPath;
|
||||||
|
import org.elasticsearch.common.blobstore.DeleteResult;
|
||||||
import org.elasticsearch.common.blobstore.support.AbstractBlobContainer;
|
import org.elasticsearch.common.blobstore.support.AbstractBlobContainer;
|
||||||
import org.elasticsearch.common.blobstore.support.PlainBlobMetaData;
|
import org.elasticsearch.common.blobstore.support.PlainBlobMetaData;
|
||||||
import org.elasticsearch.core.internal.io.IOUtils;
|
import org.elasticsearch.core.internal.io.IOUtils;
|
||||||
|
@ -45,6 +46,7 @@ import java.nio.file.StandardOpenOption;
|
||||||
import java.nio.file.attribute.BasicFileAttributes;
|
import java.nio.file.attribute.BasicFileAttributes;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.concurrent.atomic.AtomicLong;
|
||||||
|
|
||||||
import static java.util.Collections.unmodifiableMap;
|
import static java.util.Collections.unmodifiableMap;
|
||||||
|
|
||||||
|
@ -123,8 +125,26 @@ public class FsBlobContainer extends AbstractBlobContainer {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void delete() throws IOException {
|
public DeleteResult delete() throws IOException {
|
||||||
IOUtils.rm(path);
|
final AtomicLong filesDeleted = new AtomicLong(0L);
|
||||||
|
final AtomicLong bytesDeleted = new AtomicLong(0L);
|
||||||
|
Files.walkFileTree(path, new SimpleFileVisitor<Path>() {
|
||||||
|
@Override
|
||||||
|
public FileVisitResult postVisitDirectory(Path dir, IOException impossible) throws IOException {
|
||||||
|
assert impossible == null;
|
||||||
|
Files.delete(dir);
|
||||||
|
return FileVisitResult.CONTINUE;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
|
||||||
|
Files.delete(file);
|
||||||
|
filesDeleted.incrementAndGet();
|
||||||
|
bytesDeleted.addAndGet(attrs.size());
|
||||||
|
return FileVisitResult.CONTINUE;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return new DeleteResult(filesDeleted.get(), bytesDeleted.get());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -447,6 +447,7 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
||||||
Client.CLIENT_TYPE_SETTING_S,
|
Client.CLIENT_TYPE_SETTING_S,
|
||||||
ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING,
|
ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING,
|
||||||
EsExecutors.PROCESSORS_SETTING,
|
EsExecutors.PROCESSORS_SETTING,
|
||||||
|
EsExecutors.NODE_PROCESSORS_SETTING,
|
||||||
ThreadContext.DEFAULT_HEADERS_SETTING,
|
ThreadContext.DEFAULT_HEADERS_SETTING,
|
||||||
Loggers.LOG_DEFAULT_LEVEL_SETTING,
|
Loggers.LOG_DEFAULT_LEVEL_SETTING,
|
||||||
Loggers.LOG_LEVEL_SETTING,
|
Loggers.LOG_LEVEL_SETTING,
|
||||||
|
|
|
@ -44,6 +44,7 @@ import java.util.concurrent.ThreadFactory;
|
||||||
import java.util.concurrent.ThreadPoolExecutor;
|
import java.util.concurrent.ThreadPoolExecutor;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
|
import java.util.function.Function;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
public class EsExecutors {
|
public class EsExecutors {
|
||||||
|
@ -56,19 +57,33 @@ public class EsExecutors {
|
||||||
public static final Setting<Integer> PROCESSORS_SETTING = new Setting<>(
|
public static final Setting<Integer> PROCESSORS_SETTING = new Setting<>(
|
||||||
"processors",
|
"processors",
|
||||||
s -> Integer.toString(Runtime.getRuntime().availableProcessors()),
|
s -> Integer.toString(Runtime.getRuntime().availableProcessors()),
|
||||||
s -> {
|
processorsParser("processors"),
|
||||||
final int value = Setting.parseInt(s, 1, "processors");
|
Property.Deprecated,
|
||||||
|
Property.NodeScope);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Setting to manually set the number of available processors. This setting is used to adjust thread pool sizes per node.
|
||||||
|
*/
|
||||||
|
public static final Setting<Integer> NODE_PROCESSORS_SETTING = new Setting<>(
|
||||||
|
"node.processors",
|
||||||
|
PROCESSORS_SETTING,
|
||||||
|
processorsParser("node.processors"),
|
||||||
|
Property.NodeScope);
|
||||||
|
|
||||||
|
private static Function<String, Integer> processorsParser(final String name) {
|
||||||
|
return s -> {
|
||||||
|
final int value = Setting.parseInt(s, 1, name);
|
||||||
final int availableProcessors = Runtime.getRuntime().availableProcessors();
|
final int availableProcessors = Runtime.getRuntime().availableProcessors();
|
||||||
if (value > availableProcessors) {
|
if (value > availableProcessors) {
|
||||||
deprecationLogger.deprecatedAndMaybeLog(
|
deprecationLogger.deprecatedAndMaybeLog(
|
||||||
"processors",
|
"processors",
|
||||||
"setting processors to value [{}] which is more than available processors [{}] is deprecated",
|
"setting [" + name + "] to value [{}] which is more than available processors [{}] is deprecated",
|
||||||
value,
|
value,
|
||||||
availableProcessors);
|
availableProcessors);
|
||||||
}
|
}
|
||||||
return value;
|
return value;
|
||||||
},
|
};
|
||||||
Property.NodeScope);
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the number of available processors. Defaults to
|
* Returns the number of available processors. Defaults to
|
||||||
|
@ -79,7 +94,7 @@ public class EsExecutors {
|
||||||
* @return the number of available processors
|
* @return the number of available processors
|
||||||
*/
|
*/
|
||||||
public static int numberOfProcessors(final Settings settings) {
|
public static int numberOfProcessors(final Settings settings) {
|
||||||
return PROCESSORS_SETTING.get(settings);
|
return NODE_PROCESSORS_SETTING.get(settings);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static PrioritizedEsThreadPoolExecutor newSinglePrioritizing(String name, ThreadFactory threadFactory,
|
public static PrioritizedEsThreadPoolExecutor newSinglePrioritizing(String name, ThreadFactory threadFactory,
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue