Remove /_optimize REST API endpoint
The `/_optimize` endpoint was deprecated in 2.1.0 and can now be removed entirely.
This commit is contained in:
parent
935a8fc3d4
commit
3a458af0b7
|
@ -26,7 +26,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* A request to upgrade one or more indices. In order to optimize on all the indices, pass an empty array or
|
||||
* A request to upgrade one or more indices. In order to update all indices, pass an empty array or
|
||||
* <tt>null</tt> for the indices.
|
||||
* @see org.elasticsearch.client.Requests#upgradeRequest(String...)
|
||||
* @see org.elasticsearch.client.IndicesAdminClient#upgrade(UpgradeRequest)
|
||||
|
@ -43,7 +43,7 @@ public class UpgradeRequest extends BroadcastRequest<UpgradeRequest> {
|
|||
/**
|
||||
* Constructs an optimization request over one or more indices.
|
||||
*
|
||||
* @param indices The indices to optimize, no indices passed means all indices will be optimized.
|
||||
* @param indices The indices to upgrade, no indices passed means all indices will be optimized.
|
||||
*/
|
||||
public UpgradeRequest(String... indices) {
|
||||
super(indices);
|
||||
|
|
|
@ -32,7 +32,7 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* A response for optimize action.
|
||||
* A response for the upgrade action.
|
||||
*
|
||||
*
|
||||
*/
|
||||
|
@ -80,4 +80,4 @@ public class UpgradeResponse extends BroadcastResponse {
|
|||
public Map<String, Tuple<Version, String>> versions() {
|
||||
return versions;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -488,7 +488,7 @@ public abstract class Engine implements Closeable {
|
|||
public abstract CommitId flush() throws EngineException;
|
||||
|
||||
/**
|
||||
* Optimizes to 1 segment
|
||||
* Force merges to 1 segment
|
||||
*/
|
||||
public void forceMerge(boolean flush) throws IOException {
|
||||
forceMerge(flush, 1, false, false, false);
|
||||
|
|
|
@ -665,7 +665,7 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
logger.trace("upgrade with {}", upgrade);
|
||||
}
|
||||
org.apache.lucene.util.Version previousVersion = minimumCompatibleVersion();
|
||||
// we just want to upgrade the segments, not actually optimize to a single segment
|
||||
// we just want to upgrade the segments, not actually forge merge to a single segment
|
||||
getEngine().forceMerge(true, // we need to flush at the end to make sure the upgrade is durable
|
||||
Integer.MAX_VALUE, // we just want to upgrade the segments, not actually optimize to a single segment
|
||||
false, true, upgrade.upgradeOnlyAncientSegments());
|
||||
|
|
|
@ -64,15 +64,15 @@ import org.elasticsearch.common.unit.ByteSizeValue;
|
|||
*
|
||||
* <li><code>index.merge.policy.max_merge_at_once_explicit</code>:
|
||||
*
|
||||
* Maximum number of segments to be merged at a time, during optimize or
|
||||
* Maximum number of segments to be merged at a time, during force merge or
|
||||
* expungeDeletes. Default is <code>30</code>.
|
||||
*
|
||||
* <li><code>index.merge.policy.max_merged_segment</code>:
|
||||
*
|
||||
* Maximum sized segment to produce during normal merging (not explicit
|
||||
* optimize). This setting is approximate: the estimate of the merged segment
|
||||
* size is made by summing sizes of to-be-merged segments (compensating for
|
||||
* percent deleted docs). Default is <code>5gb</code>.
|
||||
* force merge). This setting is approximate: the estimate of the merged
|
||||
* segment size is made by summing sizes of to-be-merged segments
|
||||
* (compensating for percent deleted docs). Default is <code>5gb</code>.
|
||||
*
|
||||
* <li><code>index.merge.policy.segments_per_tier</code>:
|
||||
*
|
||||
|
|
|
@ -48,13 +48,6 @@ public class RestForceMergeAction extends BaseRestHandler {
|
|||
|
||||
controller.registerHandler(GET, "/_forcemerge", this);
|
||||
controller.registerHandler(GET, "/{index}/_forcemerge", this);
|
||||
|
||||
// TODO: Remove for 3.0
|
||||
controller.registerHandler(POST, "/_optimize", this);
|
||||
controller.registerHandler(POST, "/{index}/_optimize", this);
|
||||
|
||||
controller.registerHandler(GET, "/_optimize", this);
|
||||
controller.registerHandler(GET, "/{index}/_optimize", this);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -62,12 +62,12 @@ public class FileUtilsTests extends ESTestCase {
|
|||
assertThat(yamlSuites.get("index").size(), greaterThan(1));
|
||||
|
||||
//multiple paths, which can be both directories or yaml test suites (with optional file extension)
|
||||
yamlSuites = FileUtils.findYamlSuites(null, "/rest-api-spec/test", "indices.optimize/10_basic", "index");
|
||||
yamlSuites = FileUtils.findYamlSuites(null, "/rest-api-spec/test", "indices.forcemerge/10_basic", "index");
|
||||
assertThat(yamlSuites, notNullValue());
|
||||
assertThat(yamlSuites.size(), equalTo(2));
|
||||
assertThat(yamlSuites.containsKey("indices.optimize"), equalTo(true));
|
||||
assertThat(yamlSuites.get("indices.optimize").size(), equalTo(1));
|
||||
assertSingleFile(yamlSuites.get("indices.optimize"), "indices.optimize", "10_basic.yaml");
|
||||
assertThat(yamlSuites.containsKey("indices.forcemerge"), equalTo(true));
|
||||
assertThat(yamlSuites.get("indices.forcemerge").size(), equalTo(1));
|
||||
assertSingleFile(yamlSuites.get("indices.forcemerge"), "indices.forcemerge", "10_basic.yaml");
|
||||
assertThat(yamlSuites.containsKey("index"), equalTo(true));
|
||||
assertThat(yamlSuites.get("index").size(), greaterThan(1));
|
||||
|
||||
|
|
|
@ -112,7 +112,5 @@ include::indices/refresh.asciidoc[]
|
|||
|
||||
include::indices/forcemerge.asciidoc[]
|
||||
|
||||
include::indices/optimize.asciidoc[]
|
||||
|
||||
include::indices/upgrade.asciidoc[]
|
||||
|
||||
|
|
|
@ -1,54 +0,0 @@
|
|||
[[indices-optimize]]
|
||||
== Optimize
|
||||
|
||||
deprecated[2.1.0,Optimize API has been renamed to the force merge API]
|
||||
|
||||
The optimize API allows to optimize one or more indices through an API.
|
||||
The optimize process basically optimizes the index for faster search
|
||||
operations (and relates to the number of segments a Lucene index holds
|
||||
within each shard). The optimize operation allows to reduce the number
|
||||
of segments by merging them.
|
||||
|
||||
This call will block until the optimize is complete. If the http connection
|
||||
is lost, the request will continue in the background, and
|
||||
any new requests will block until the previous optimize is complete.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
$ curl -XPOST 'http://localhost:9200/twitter/_optimize'
|
||||
--------------------------------------------------
|
||||
|
||||
[float]
|
||||
[[optimize-parameters]]
|
||||
=== Request Parameters
|
||||
|
||||
The optimize API accepts the following request parameters as query arguments:
|
||||
|
||||
[horizontal]
|
||||
`max_num_segments`:: The number of segments to optimize to. To fully
|
||||
optimize the index, set it to `1`. Defaults to simply checking if a
|
||||
merge needs to execute, and if so, executes it.
|
||||
|
||||
`only_expunge_deletes`:: Should the optimize process only expunge segments with
|
||||
deletes in it. In Lucene, a document is not deleted from a segment, just marked
|
||||
as deleted. During a merge process of segments, a new segment is created that
|
||||
does not have those deletes. This flag allows to only merge segments that have
|
||||
deletes. Defaults to `false`. Note that this won't override the
|
||||
`index.merge.policy.expunge_deletes_allowed` threshold.
|
||||
|
||||
`flush`:: Should a flush be performed after the optimize. Defaults to
|
||||
`true`.
|
||||
|
||||
[float]
|
||||
[[optimize-multi-index]]
|
||||
=== Multi Index
|
||||
|
||||
The optimize API can be applied to more than one index with a single
|
||||
call, or even on `_all` the indices.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
$ curl -XPOST 'http://localhost:9200/kimchy,elasticsearch/_optimize'
|
||||
|
||||
$ curl -XPOST 'http://localhost:9200/_optimize?only_expunge_deletes=true'
|
||||
--------------------------------------------------
|
|
@ -63,11 +63,18 @@ Scroll requests sorted by `_doc` have been optimized to more efficiently resume
|
|||
from where the previous request stopped, so this will have the same performance
|
||||
characteristics as the former `scan` search type.
|
||||
|
||||
=== REST API changes
|
||||
|
||||
==== search exists api removed
|
||||
|
||||
The search exists api has been removed in favour of using the search api with
|
||||
`size` set to `0` and `terminate_after` set to `1`.
|
||||
|
||||
==== `/_optimize` endpoint removed
|
||||
|
||||
The deprecated `/_optimize` endpoint has been removed. The `/_forcemerge`
|
||||
endpoint should be used in lieu of optimize.
|
||||
|
||||
=== Parent/Child changes
|
||||
|
||||
The `children` aggregation, parent child inner hits and `has_child` and `has_parent` queries will not work on indices
|
||||
|
|
|
@ -135,10 +135,10 @@ payloads or weights. This form does still work inside of multi fields.
|
|||
}
|
||||
--------------------------------------------------
|
||||
|
||||
NOTE: The suggest data structure might not reflect deletes on
|
||||
documents immediately. You may need to do an <<indices-optimize>> for that.
|
||||
You can call optimize with the `only_expunge_deletes=true` to only target
|
||||
deletions for merging.
|
||||
NOTE: The suggest data structure might not reflect deletes on documents
|
||||
immediately. You may need to do an <<indices-forcemerge>> for that. You can call
|
||||
force merge with the `only_expunge_deletes=true` to only target deletions for
|
||||
merging.
|
||||
|
||||
[[querying]]
|
||||
==== Querying
|
||||
|
|
|
@ -1,52 +0,0 @@
|
|||
{
|
||||
"indices.optimize": {
|
||||
"documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-optimize.html",
|
||||
"methods": ["POST", "GET"],
|
||||
"url": {
|
||||
"path": "/_optimize",
|
||||
"paths": ["/_optimize", "/{index}/_optimize"],
|
||||
"parts": {
|
||||
"index": {
|
||||
"type" : "list",
|
||||
"description" : "A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices"
|
||||
}
|
||||
},
|
||||
"params": {
|
||||
"flush": {
|
||||
"type" : "boolean",
|
||||
"description" : "Specify whether the index should be flushed after performing the operation (default: true)"
|
||||
},
|
||||
"ignore_unavailable": {
|
||||
"type" : "boolean",
|
||||
"description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
|
||||
},
|
||||
"allow_no_indices": {
|
||||
"type" : "boolean",
|
||||
"description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
|
||||
},
|
||||
"expand_wildcards": {
|
||||
"type" : "enum",
|
||||
"options" : ["open","closed","none","all"],
|
||||
"default" : "open",
|
||||
"description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both."
|
||||
},
|
||||
"max_num_segments": {
|
||||
"type" : "number",
|
||||
"description" : "The number of segments the index should be merged into (default: dynamic)"
|
||||
},
|
||||
"only_expunge_deletes": {
|
||||
"type" : "boolean",
|
||||
"description" : "Specify whether the operation should only expunge deleted documents"
|
||||
},
|
||||
"operation_threading": {
|
||||
"description" : "TODO: ?"
|
||||
},
|
||||
"wait_for_merge": {
|
||||
"type" : "boolean",
|
||||
"description" : "Specify whether the request should block until the merge process is finished (default: true)"
|
||||
}
|
||||
}
|
||||
},
|
||||
"body": null
|
||||
}
|
||||
}
|
|
@ -1,10 +0,0 @@
|
|||
---
|
||||
"Optimize index tests":
|
||||
- do:
|
||||
indices.create:
|
||||
index: testing
|
||||
|
||||
- do:
|
||||
indices.optimize:
|
||||
index: testing
|
||||
max_num_segments: 1
|
Loading…
Reference in New Issue