mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-03-25 17:38:44 +00:00
Add Clone Index API (#44267)
Adds an API to clone an index. This is similar to the index split and shrink APIs, just with the difference that the number of primary shards is kept the same. In case where the filesystem provides hard-linking capabilities, this is a very cheap operation. Indexing cloning can be done by running `POST my_source_index/_clone/my_target_index` and it supports the same options as the split and shrink APIs. Closes #44128
This commit is contained in:
parent
a89860160b
commit
0ce841915c
@ -909,6 +909,33 @@ public final class IndicesClient {
|
||||
ResizeResponse::fromXContent, listener, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Clones an index using the Clone Index API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-clone-index.html">
|
||||
* Clone Index API on elastic.co</a>
|
||||
* @param resizeRequest the request
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @return the response
|
||||
* @throws IOException in case there is a problem sending the request or parsing back the response
|
||||
*/
|
||||
public ResizeResponse clone(ResizeRequest resizeRequest, RequestOptions options) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(resizeRequest, IndicesRequestConverters::clone, options,
|
||||
ResizeResponse::fromXContent, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously clones an index using the Clone Index API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-clone-index.html">
|
||||
* Clone Index API on elastic.co</a>
|
||||
* @param resizeRequest the request
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @param listener the listener to be notified upon request completion
|
||||
*/
|
||||
public void cloneAsync(ResizeRequest resizeRequest, RequestOptions options, ActionListener<ResizeResponse> listener) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(resizeRequest, IndicesRequestConverters::clone, options,
|
||||
ResizeResponse::fromXContent, listener, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Rolls over an index using the Rollover Index API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-rollover-index.html">
|
||||
|
@ -337,6 +337,13 @@ final class IndicesRequestConverters {
|
||||
return resize(resizeRequest);
|
||||
}
|
||||
|
||||
static Request clone(ResizeRequest resizeRequest) throws IOException {
|
||||
if (resizeRequest.getResizeType() != ResizeType.CLONE) {
|
||||
throw new IllegalArgumentException("Wrong resize type [" + resizeRequest.getResizeType() + "] for indices clone request");
|
||||
}
|
||||
return resize(resizeRequest);
|
||||
}
|
||||
|
||||
private static Request resize(ResizeRequest resizeRequest) throws IOException {
|
||||
String endpoint = new RequestConverters.EndpointBuilder().addPathPart(resizeRequest.getSourceIndex())
|
||||
.addPathPartAsIs("_" + resizeRequest.getResizeType().name().toLowerCase(Locale.ROOT))
|
||||
|
@ -1128,6 +1128,30 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
assertNotNull(aliasData);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testClone() throws IOException {
|
||||
createIndex("source", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0)
|
||||
.put("index.number_of_routing_shards", 4).build());
|
||||
updateIndexSettings("source", Settings.builder().put("index.blocks.write", true));
|
||||
|
||||
ResizeRequest resizeRequest = new ResizeRequest("target", "source");
|
||||
resizeRequest.setResizeType(ResizeType.CLONE);
|
||||
Settings targetSettings = Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build();
|
||||
resizeRequest.setTargetIndex(new org.elasticsearch.action.admin.indices.create.CreateIndexRequest("target")
|
||||
.settings(targetSettings)
|
||||
.alias(new Alias("alias")));
|
||||
ResizeResponse resizeResponse = execute(resizeRequest, highLevelClient().indices()::clone, highLevelClient().indices()::cloneAsync);
|
||||
assertTrue(resizeResponse.isAcknowledged());
|
||||
assertTrue(resizeResponse.isShardsAcknowledged());
|
||||
Map<String, Object> getIndexResponse = getAsMap("target");
|
||||
Map<String, Object> indexSettings = (Map<String, Object>)XContentMapValues.extractValue("target.settings.index", getIndexResponse);
|
||||
assertNotNull(indexSettings);
|
||||
assertEquals("2", indexSettings.get("number_of_shards"));
|
||||
assertEquals("0", indexSettings.get("number_of_replicas"));
|
||||
Map<String, Object> aliasData = (Map<String, Object>)XContentMapValues.extractValue("target.aliases.alias", getIndexResponse);
|
||||
assertNotNull(aliasData);
|
||||
}
|
||||
|
||||
public void testRollover() throws IOException {
|
||||
highLevelClient().indices().create(new CreateIndexRequest("test").alias(new Alias("alias")), RequestOptions.DEFAULT);
|
||||
RolloverRequest rolloverRequest = new RolloverRequest("alias", "test_new");
|
||||
|
@ -830,18 +830,33 @@ public class IndicesRequestConvertersTests extends ESTestCase {
|
||||
|
||||
public void testSplitWrongResizeType() {
|
||||
ResizeRequest resizeRequest = new ResizeRequest("target", "source");
|
||||
resizeRequest.setResizeType(ResizeType.SHRINK);
|
||||
ResizeType wrongType = randomFrom(ResizeType.SHRINK, ResizeType.CLONE);
|
||||
resizeRequest.setResizeType(wrongType);
|
||||
IllegalArgumentException iae = LuceneTestCase.expectThrows(IllegalArgumentException.class, ()
|
||||
-> IndicesRequestConverters.split(resizeRequest));
|
||||
Assert.assertEquals("Wrong resize type [SHRINK] for indices split request", iae.getMessage());
|
||||
Assert.assertEquals("Wrong resize type [" + wrongType.name() + "] for indices split request", iae.getMessage());
|
||||
}
|
||||
|
||||
public void testClone() throws IOException {
|
||||
resizeTest(ResizeType.CLONE, IndicesRequestConverters::clone);
|
||||
}
|
||||
|
||||
public void testCloneWrongResizeType() {
|
||||
ResizeRequest resizeRequest = new ResizeRequest("target", "source");
|
||||
ResizeType wrongType = randomFrom(ResizeType.SHRINK, ResizeType.SPLIT);
|
||||
resizeRequest.setResizeType(wrongType);
|
||||
IllegalArgumentException iae = LuceneTestCase.expectThrows(IllegalArgumentException.class, ()
|
||||
-> IndicesRequestConverters.clone(resizeRequest));
|
||||
Assert.assertEquals("Wrong resize type [" + wrongType.name() + "] for indices clone request", iae.getMessage());
|
||||
}
|
||||
|
||||
public void testShrinkWrongResizeType() {
|
||||
ResizeRequest resizeRequest = new ResizeRequest("target", "source");
|
||||
resizeRequest.setResizeType(ResizeType.SPLIT);
|
||||
ResizeType wrongType = randomFrom(ResizeType.SPLIT, ResizeType.CLONE);
|
||||
resizeRequest.setResizeType(wrongType);
|
||||
IllegalArgumentException iae = LuceneTestCase.expectThrows(IllegalArgumentException.class, ()
|
||||
-> IndicesRequestConverters.shrink(resizeRequest));
|
||||
Assert.assertEquals("Wrong resize type [SPLIT] for indices shrink request", iae.getMessage());
|
||||
Assert.assertEquals("Wrong resize type [" + wrongType.name() + "] for indices shrink request", iae.getMessage());
|
||||
}
|
||||
|
||||
public void testShrink() throws IOException {
|
||||
|
@ -1808,6 +1808,75 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
public void testCloneIndex() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
createIndex("source_index", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build());
|
||||
updateIndexSettings("source_index", Settings.builder().put("index.blocks.write", true));
|
||||
}
|
||||
|
||||
// tag::clone-index-request
|
||||
ResizeRequest request = new ResizeRequest("target_index","source_index"); // <1>
|
||||
request.setResizeType(ResizeType.CLONE); // <2>
|
||||
// end::clone-index-request
|
||||
|
||||
// tag::clone-index-request-timeout
|
||||
request.timeout(TimeValue.timeValueMinutes(2)); // <1>
|
||||
request.timeout("2m"); // <2>
|
||||
// end::clone-index-request-timeout
|
||||
// tag::clone-index-request-masterTimeout
|
||||
request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
request.masterNodeTimeout("1m"); // <2>
|
||||
// end::clone-index-request-masterTimeout
|
||||
// tag::clone-index-request-waitForActiveShards
|
||||
request.setWaitForActiveShards(2); // <1>
|
||||
request.setWaitForActiveShards(ActiveShardCount.DEFAULT); // <2>
|
||||
// end::clone-index-request-waitForActiveShards
|
||||
// tag::clone-index-request-settings
|
||||
request.getTargetIndexRequest().settings(Settings.builder()
|
||||
.put("index.number_of_shards", 2)); // <1>
|
||||
// end::clone-index-request-settings
|
||||
// tag::clone-index-request-aliases
|
||||
request.getTargetIndexRequest().alias(new Alias("target_alias")); // <1>
|
||||
// end::clone-index-request-aliases
|
||||
|
||||
// tag::clone-index-execute
|
||||
ResizeResponse resizeResponse = client.indices().clone(request, RequestOptions.DEFAULT);
|
||||
// end::clone-index-execute
|
||||
|
||||
// tag::clone-index-response
|
||||
boolean acknowledged = resizeResponse.isAcknowledged(); // <1>
|
||||
boolean shardsAcked = resizeResponse.isShardsAcknowledged(); // <2>
|
||||
// end::clone-index-response
|
||||
assertTrue(acknowledged);
|
||||
assertTrue(shardsAcked);
|
||||
|
||||
// tag::clone-index-execute-listener
|
||||
ActionListener<ResizeResponse> listener = new ActionListener<ResizeResponse>() {
|
||||
@Override
|
||||
public void onResponse(ResizeResponse resizeResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::clone-index-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::clone-index-execute-async
|
||||
client.indices().cloneAsync(request, RequestOptions.DEFAULT,listener); // <1>
|
||||
// end::clone-index-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
public void testRolloverIndex() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
|
80
docs/java-rest/high-level/indices/clone_index.asciidoc
Normal file
80
docs/java-rest/high-level/indices/clone_index.asciidoc
Normal file
@ -0,0 +1,80 @@
|
||||
--
|
||||
:api: clone-index
|
||||
:request: ResizeRequest
|
||||
:response: ResizeResponse
|
||||
--
|
||||
|
||||
[id="{upid}-{api}"]
|
||||
=== Clone Index API
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Resize Request
|
||||
|
||||
The Clone Index API requires a +{request}+ instance.
|
||||
A +{request}+ requires two string arguments:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> The target index (first argument) to clone the source index (second argument) into
|
||||
<2> The resize type needs to be set to `CLONE`
|
||||
|
||||
==== Optional arguments
|
||||
The following arguments can optionally be provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-timeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to wait for the all the nodes to acknowledge the index is opened
|
||||
as a `TimeValue`
|
||||
<2> Timeout to wait for the all the nodes to acknowledge the index is opened
|
||||
as a `String`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-masterTimeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to connect to the master node as a `TimeValue`
|
||||
<2> Timeout to connect to the master node as a `String`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-waitForActiveShards]
|
||||
--------------------------------------------------
|
||||
<1> The number of active shard copies to wait for before the clone index API
|
||||
returns a response, as an `int`
|
||||
<2> The number of active shard copies to wait for before the clone index API
|
||||
returns a response, as an `ActiveShardCount`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-settings]
|
||||
--------------------------------------------------
|
||||
<1> The settings to apply to the target index, which optionally include the
|
||||
number of shards to create for it
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-aliases]
|
||||
--------------------------------------------------
|
||||
<1> The aliases to associate the target index with
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Clone Index Response
|
||||
|
||||
The returned +{response}+ allows to retrieve information about the
|
||||
executed operation as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Indicates whether all of the nodes have acknowledged the request
|
||||
<2> Indicates whether the requisite number of shard copies were started for
|
||||
each shard in the index before timing out
|
||||
|
||||
|
@ -99,6 +99,7 @@ Index Management::
|
||||
* <<{upid}-close-index>>
|
||||
* <<{upid}-shrink-index>>
|
||||
* <<{upid}-split-index>>
|
||||
* <<{upid}-clone-index>>
|
||||
* <<{upid}-refresh>>
|
||||
* <<{upid}-flush>>
|
||||
* <<{upid}-flush-synced>>
|
||||
@ -133,6 +134,7 @@ include::indices/open_index.asciidoc[]
|
||||
include::indices/close_index.asciidoc[]
|
||||
include::indices/shrink_index.asciidoc[]
|
||||
include::indices/split_index.asciidoc[]
|
||||
include::indices/clone_index.asciidoc[]
|
||||
include::indices/refresh.asciidoc[]
|
||||
include::indices/flush.asciidoc[]
|
||||
include::indices/flush_synced.asciidoc[]
|
||||
|
@ -15,6 +15,7 @@ index settings, aliases, mappings, and index templates.
|
||||
* <<indices-open-close>>
|
||||
* <<indices-shrink-index>>
|
||||
* <<indices-split-index>>
|
||||
* <<indices-clone-index>>
|
||||
* <<indices-rollover-index>>
|
||||
* <<freeze-index-api>>
|
||||
* <<unfreeze-index-api>>
|
||||
@ -72,6 +73,8 @@ include::indices/shrink-index.asciidoc[]
|
||||
|
||||
include::indices/split-index.asciidoc[]
|
||||
|
||||
include::indices/clone-index.asciidoc[]
|
||||
|
||||
include::indices/rollover-index.asciidoc[]
|
||||
|
||||
include::indices/apis/freeze.asciidoc[]
|
||||
|
138
docs/reference/indices/clone-index.asciidoc
Normal file
138
docs/reference/indices/clone-index.asciidoc
Normal file
@ -0,0 +1,138 @@
|
||||
[[indices-clone-index]]
|
||||
== Clone Index
|
||||
|
||||
The clone index API allows you to clone an existing index into a new index,
|
||||
where each original primary shard is cloned into a new primary shard in
|
||||
the new index.
|
||||
|
||||
[float]
|
||||
=== How does cloning work?
|
||||
|
||||
Cloning works as follows:
|
||||
|
||||
* First, it creates a new target index with the same definition as the source
|
||||
index.
|
||||
|
||||
* Then it hard-links segments from the source index into the target index. (If
|
||||
the file system doesn't support hard-linking, then all segments are copied
|
||||
into the new index, which is a much more time consuming process.)
|
||||
|
||||
* Finally, it recovers the target index as though it were a closed index which
|
||||
had just been re-opened.
|
||||
|
||||
[float]
|
||||
=== Preparing an index for cloning
|
||||
|
||||
Create a new index:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT my_source_index
|
||||
{
|
||||
"settings": {
|
||||
"index.number_of_shards" : 5
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
In order to clone an index, the index must be marked as read-only,
|
||||
and have <<cluster-health,health>> `green`.
|
||||
|
||||
This can be achieved with the following request:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /my_source_index/_settings
|
||||
{
|
||||
"settings": {
|
||||
"index.blocks.write": true <1>
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
<1> Prevents write operations to this index while still allowing metadata
|
||||
changes like deleting the index.
|
||||
|
||||
[float]
|
||||
=== Cloning an index
|
||||
|
||||
To clone `my_source_index` into a new index called `my_target_index`, issue
|
||||
the following request:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST my_source_index/_clone/my_target_index
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
The above request returns immediately once the target index has been added to
|
||||
the cluster state -- it doesn't wait for the clone operation to start.
|
||||
|
||||
[IMPORTANT]
|
||||
=====================================
|
||||
|
||||
Indices can only be cloned if they satisfy the following requirements:
|
||||
|
||||
* the target index must not exist
|
||||
|
||||
* The source index must have the same number of primary shards as the target index.
|
||||
|
||||
* The node handling the clone process must have sufficient free disk space to
|
||||
accommodate a second copy of the existing index.
|
||||
|
||||
=====================================
|
||||
|
||||
The `_clone` API is similar to the <<indices-create-index, `create index` API>>
|
||||
and accepts `settings` and `aliases` parameters for the target index:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST my_source_index/_clone/my_target_index
|
||||
{
|
||||
"settings": {
|
||||
"index.number_of_shards": 5 <1>
|
||||
},
|
||||
"aliases": {
|
||||
"my_search_indices": {}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[s/^/PUT my_source_index\n{"settings": {"index.blocks.write": true, "index.number_of_shards": "5"}}\n/]
|
||||
|
||||
<1> The number of shards in the target index. This must be equal to the
|
||||
number of shards in the source index.
|
||||
|
||||
|
||||
NOTE: Mappings may not be specified in the `_clone` request. The mappings of
|
||||
the source index will be used for the target index.
|
||||
|
||||
[float]
|
||||
=== Monitoring the clone process
|
||||
|
||||
The clone process can be monitored with the <<cat-recovery,`_cat recovery`
|
||||
API>>, or the <<cluster-health, `cluster health` API>> can be used to wait
|
||||
until all primary shards have been allocated by setting the `wait_for_status`
|
||||
parameter to `yellow`.
|
||||
|
||||
The `_clone` API returns as soon as the target index has been added to the
|
||||
cluster state, before any shards have been allocated. At this point, all
|
||||
shards are in the state `unassigned`. If, for any reason, the target index
|
||||
can't be allocated, its primary shard will remain `unassigned` until it
|
||||
can be allocated on that node.
|
||||
|
||||
Once the primary shard is allocated, it moves to state `initializing`, and the
|
||||
clone process begins. When the clone operation completes, the shard will
|
||||
become `active`. At that point, Elasticsearch will try to allocate any
|
||||
replicas and may decide to relocate the primary shard to another node.
|
||||
|
||||
[float]
|
||||
=== Wait For Active Shards
|
||||
|
||||
Because the clone operation creates a new index to clone the shards to,
|
||||
the <<create-index-wait-for-active-shards,wait for active shards>> setting
|
||||
on index creation applies to the clone index action as well.
|
@ -0,0 +1,39 @@
|
||||
{
|
||||
"indices.clone": {
|
||||
"documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clone-index.html",
|
||||
"stability": "stable",
|
||||
"methods": ["PUT", "POST"],
|
||||
"url": {
|
||||
"paths": ["/{index}/_clone/{target}"],
|
||||
"parts": {
|
||||
"index": {
|
||||
"type" : "string",
|
||||
"required" : true,
|
||||
"description" : "The name of the source index to clone"
|
||||
},
|
||||
"target": {
|
||||
"type" : "string",
|
||||
"required" : true,
|
||||
"description" : "The name of the target index to clone into"
|
||||
}
|
||||
},
|
||||
"params": {
|
||||
"timeout": {
|
||||
"type" : "time",
|
||||
"description" : "Explicit operation timeout"
|
||||
},
|
||||
"master_timeout": {
|
||||
"type" : "time",
|
||||
"description" : "Specify timeout for connection to master"
|
||||
},
|
||||
"wait_for_active_shards": {
|
||||
"type" : "string",
|
||||
"description" : "Set the number of active shards to wait for on the cloned index before the operation returns."
|
||||
}
|
||||
}
|
||||
},
|
||||
"body": {
|
||||
"description" : "The configuration for the target index (`settings` and `aliases`)"
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,111 @@
|
||||
---
|
||||
setup:
|
||||
- do:
|
||||
indices.create:
|
||||
index: source
|
||||
wait_for_active_shards: 1
|
||||
body:
|
||||
settings:
|
||||
index.number_of_shards: 2
|
||||
index.number_of_replicas: 0
|
||||
- do:
|
||||
index:
|
||||
index: source
|
||||
id: "1"
|
||||
body: { "foo": "hello world" }
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: source
|
||||
id: "2"
|
||||
body: { "foo": "hello world 2" }
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: source
|
||||
id: "3"
|
||||
body: { "foo": "hello world 3" }
|
||||
|
||||
---
|
||||
"Clone index via API":
|
||||
- skip:
|
||||
version: " - 7.3.99"
|
||||
reason: index cloning was added in 7.4.0
|
||||
# make it read-only
|
||||
- do:
|
||||
indices.put_settings:
|
||||
index: source
|
||||
body:
|
||||
index.blocks.write: true
|
||||
index.number_of_replicas: 0
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: green
|
||||
index: source
|
||||
|
||||
# now we do the actual clone
|
||||
- do:
|
||||
indices.clone:
|
||||
index: "source"
|
||||
target: "target"
|
||||
wait_for_active_shards: 1
|
||||
master_timeout: 10s
|
||||
body:
|
||||
settings:
|
||||
index.number_of_replicas: 0
|
||||
index.number_of_shards: 2
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: green
|
||||
|
||||
- do:
|
||||
get:
|
||||
index: target
|
||||
id: "1"
|
||||
|
||||
- match: { _index: target }
|
||||
- match: { _type: _doc }
|
||||
- match: { _id: "1" }
|
||||
- match: { _source: { foo: "hello world" } }
|
||||
|
||||
|
||||
- do:
|
||||
get:
|
||||
index: target
|
||||
id: "2"
|
||||
|
||||
- match: { _index: target }
|
||||
- match: { _type: _doc }
|
||||
- match: { _id: "2" }
|
||||
- match: { _source: { foo: "hello world 2" } }
|
||||
|
||||
|
||||
- do:
|
||||
get:
|
||||
index: target
|
||||
id: "3"
|
||||
|
||||
- match: { _index: target }
|
||||
- match: { _type: _doc }
|
||||
- match: { _id: "3" }
|
||||
- match: { _source: { foo: "hello world 3" } }
|
||||
|
||||
---
|
||||
"Create illegal clone indices":
|
||||
- skip:
|
||||
version: " - 7.3.99"
|
||||
reason: index cloning was added in 7.4.0
|
||||
# try to do an illegal clone with illegal number_of_shards
|
||||
- do:
|
||||
catch: /illegal_argument_exception/
|
||||
indices.clone:
|
||||
index: "source"
|
||||
target: "target"
|
||||
wait_for_active_shards: 1
|
||||
master_timeout: 10s
|
||||
body:
|
||||
settings:
|
||||
index.number_of_replicas: 0
|
||||
index.number_of_shards: 6
|
@ -0,0 +1,65 @@
|
||||
---
|
||||
"Clone index ignores target template mapping":
|
||||
- skip:
|
||||
version: " - 7.3.99"
|
||||
reason: index cloning was added in 7.4.0
|
||||
# create index
|
||||
- do:
|
||||
indices.create:
|
||||
index: source
|
||||
wait_for_active_shards: 1
|
||||
body:
|
||||
settings:
|
||||
number_of_shards: 1
|
||||
number_of_replicas: 0
|
||||
mappings:
|
||||
properties:
|
||||
count:
|
||||
type: text
|
||||
|
||||
# index document
|
||||
- do:
|
||||
index:
|
||||
index: source
|
||||
id: "1"
|
||||
body: { "count": "1" }
|
||||
|
||||
# create template matching shrink target
|
||||
- do:
|
||||
indices.put_template:
|
||||
name: tpl1
|
||||
body:
|
||||
index_patterns: targ*
|
||||
mappings:
|
||||
properties:
|
||||
count:
|
||||
type: integer
|
||||
|
||||
# make it read-only
|
||||
- do:
|
||||
indices.put_settings:
|
||||
index: source
|
||||
body:
|
||||
index.blocks.write: true
|
||||
index.number_of_replicas: 0
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: green
|
||||
index: source
|
||||
|
||||
# now we do the actual clone
|
||||
- do:
|
||||
indices.clone:
|
||||
index: "source"
|
||||
target: "target"
|
||||
wait_for_active_shards: 1
|
||||
master_timeout: 10s
|
||||
body:
|
||||
settings:
|
||||
index.number_of_shards: 1
|
||||
index.number_of_replicas: 0
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: green
|
@ -0,0 +1,61 @@
|
||||
---
|
||||
"Copy settings during clone index":
|
||||
- skip:
|
||||
version: " - 7.3.99"
|
||||
reason: index cloning was added in 7.4.0
|
||||
features: [arbitrary_key]
|
||||
|
||||
- do:
|
||||
nodes.info:
|
||||
node_id: data:true
|
||||
- set:
|
||||
nodes._arbitrary_key_: node_id
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: source
|
||||
wait_for_active_shards: 1
|
||||
body:
|
||||
settings:
|
||||
index.number_of_replicas: 0
|
||||
index.number_of_shards: 1
|
||||
index.merge.scheduler.max_merge_count: 4
|
||||
|
||||
# make it read-only
|
||||
- do:
|
||||
indices.put_settings:
|
||||
index: source
|
||||
body:
|
||||
index.blocks.write: true
|
||||
index.number_of_replicas: 0
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: green
|
||||
index: source
|
||||
|
||||
# now we do an actual clone and copy settings
|
||||
- do:
|
||||
indices.clone:
|
||||
index: "source"
|
||||
target: "copy-settings-target"
|
||||
wait_for_active_shards: 1
|
||||
master_timeout: 10s
|
||||
body:
|
||||
settings:
|
||||
index.number_of_replicas: 0
|
||||
index.number_of_shards: 1
|
||||
index.merge.scheduler.max_thread_count: 2
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: green
|
||||
|
||||
- do:
|
||||
indices.get_settings:
|
||||
index: "copy-settings-target"
|
||||
|
||||
# settings should be copied
|
||||
- match: { copy-settings-target.settings.index.merge.scheduler.max_merge_count: "4" }
|
||||
- match: { copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" }
|
||||
- match: { copy-settings-target.settings.index.blocks.write: "true" }
|
@ -593,6 +593,7 @@ public class ActionModule extends AbstractModule {
|
||||
registerHandler.accept(new RestCreateIndexAction(settings, restController));
|
||||
registerHandler.accept(new RestResizeHandler.RestShrinkIndexAction(settings, restController));
|
||||
registerHandler.accept(new RestResizeHandler.RestSplitIndexAction(settings, restController));
|
||||
registerHandler.accept(new RestResizeHandler.RestCloneIndexAction(settings, restController));
|
||||
registerHandler.accept(new RestRolloverIndexAction(settings, restController));
|
||||
registerHandler.accept(new RestDeleteIndexAction(settings, restController));
|
||||
registerHandler.accept(new RestCloseIndexAction(settings, restController));
|
||||
|
@ -110,6 +110,9 @@ public class ResizeRequest extends AcknowledgedRequest<ResizeRequest> implements
|
||||
targetIndexRequest.writeTo(out);
|
||||
out.writeString(sourceIndex);
|
||||
if (out.getVersion().onOrAfter(ResizeAction.COMPATIBILITY_VERSION)) {
|
||||
if (type == ResizeType.CLONE && out.getVersion().before(Version.V_7_4_0)) {
|
||||
throw new IllegalArgumentException("can't send clone request to a node that's older than " + Version.V_7_4_0);
|
||||
}
|
||||
out.writeEnum(type);
|
||||
}
|
||||
// noinspection StatementWithEmptyBody
|
||||
|
@ -23,5 +23,5 @@ package org.elasticsearch.action.admin.indices.shrink;
|
||||
* The type of the resize operation
|
||||
*/
|
||||
public enum ResizeType {
|
||||
SHRINK, SPLIT;
|
||||
SHRINK, SPLIT, CLONE;
|
||||
}
|
||||
|
@ -127,8 +127,13 @@ public class TransportResizeAction extends TransportMasterNodeAction<ResizeReque
|
||||
if (IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexSettings)) {
|
||||
numShards = IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings);
|
||||
} else {
|
||||
assert resizeRequest.getResizeType() == ResizeType.SHRINK : "split must specify the number of shards explicitly";
|
||||
numShards = 1;
|
||||
assert resizeRequest.getResizeType() != ResizeType.SPLIT : "split must specify the number of shards explicitly";
|
||||
if (resizeRequest.getResizeType() == ResizeType.SHRINK) {
|
||||
numShards = 1;
|
||||
} else {
|
||||
assert resizeRequest.getResizeType() == ResizeType.CLONE;
|
||||
numShards = metaData.getNumberOfShards();
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < numShards; i++) {
|
||||
@ -145,15 +150,17 @@ public class TransportResizeAction extends TransportMasterNodeAction<ResizeReque
|
||||
+ "] docs - too many documents in shards " + shardIds);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
} else if (resizeRequest.getResizeType() == ResizeType.SPLIT) {
|
||||
Objects.requireNonNull(IndexMetaData.selectSplitShard(i, metaData, numShards));
|
||||
// we just execute this to ensure we get the right exceptions if the number of shards is wrong or less then etc.
|
||||
} else {
|
||||
Objects.requireNonNull(IndexMetaData.selectCloneShard(i, metaData, numShards));
|
||||
// we just execute this to ensure we get the right exceptions if the number of shards is wrong etc.
|
||||
}
|
||||
}
|
||||
|
||||
if (IndexMetaData.INDEX_ROUTING_PARTITION_SIZE_SETTING.exists(targetIndexSettings)) {
|
||||
throw new IllegalArgumentException("cannot provide a routing partition size value when resizing an index");
|
||||
|
||||
}
|
||||
if (IndexMetaData.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.exists(targetIndexSettings)) {
|
||||
// if we have a source index with 1 shards it's legal to set this
|
||||
|
@ -1548,6 +1548,22 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
|
||||
return new ShardId(sourceIndexMetadata.getIndex(), shardId/routingFactor);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the source shard ID to clone the given target shard off
|
||||
* @param shardId the id of the target shard to clone into
|
||||
* @param sourceIndexMetadata the source index metadata
|
||||
* @param numTargetShards the total number of shards in the target index
|
||||
* @return a the source shard ID to clone from
|
||||
*/
|
||||
public static ShardId selectCloneShard(int shardId, IndexMetaData sourceIndexMetadata, int numTargetShards) {
|
||||
int numSourceShards = sourceIndexMetadata.getNumberOfShards();
|
||||
if (numSourceShards != numTargetShards) {
|
||||
throw new IllegalArgumentException("the number of target shards (" + numTargetShards + ") must be the same as the number of "
|
||||
+ " source shards ( " + numSourceShards + ")");
|
||||
}
|
||||
return new ShardId(sourceIndexMetadata.getIndex(), shardId);
|
||||
}
|
||||
|
||||
private static void assertSplitMetadata(int numSourceShards, int numTargetShards, IndexMetaData sourceIndexMetadata) {
|
||||
if (numSourceShards > numTargetShards) {
|
||||
throw new IllegalArgumentException("the number of source shards [" + numSourceShards
|
||||
@ -1578,8 +1594,9 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
|
||||
return selectShrinkShards(shardId, sourceIndexMetadata, numTargetShards);
|
||||
} else if (sourceIndexMetadata.getNumberOfShards() < numTargetShards) {
|
||||
return Collections.singleton(selectSplitShard(shardId, sourceIndexMetadata, numTargetShards));
|
||||
} else {
|
||||
return Collections.singleton(selectCloneShard(shardId, sourceIndexMetadata, numTargetShards));
|
||||
}
|
||||
throw new IllegalArgumentException("can't select recover from shards if both indices have the same number of shards");
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -727,9 +727,16 @@ public class MetaDataCreateIndexService {
|
||||
|
||||
}
|
||||
|
||||
private static IndexMetaData validateResize(ClusterState state, String sourceIndex,
|
||||
Set<String> targetIndexMappingsTypes, String targetIndexName,
|
||||
Settings targetIndexSettings) {
|
||||
static void validateCloneIndex(ClusterState state, String sourceIndex,
|
||||
Set<String> targetIndexMappingsTypes, String targetIndexName,
|
||||
Settings targetIndexSettings) {
|
||||
IndexMetaData sourceMetaData = validateResize(state, sourceIndex, targetIndexMappingsTypes, targetIndexName, targetIndexSettings);
|
||||
IndexMetaData.selectCloneShard(0, sourceMetaData, IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings));
|
||||
}
|
||||
|
||||
static IndexMetaData validateResize(ClusterState state, String sourceIndex,
|
||||
Set<String> targetIndexMappingsTypes, String targetIndexName,
|
||||
Settings targetIndexSettings) {
|
||||
if (state.metaData().hasIndex(targetIndexName)) {
|
||||
throw new ResourceAlreadyExistsException(state.metaData().index(targetIndexName).getIndex());
|
||||
}
|
||||
@ -782,6 +789,9 @@ public class MetaDataCreateIndexService {
|
||||
} else if (type == ResizeType.SPLIT) {
|
||||
validateSplitIndex(currentState, resizeSourceIndex.getName(), mappingKeys, resizeIntoName, indexSettingsBuilder.build());
|
||||
indexSettingsBuilder.putNull(initialRecoveryIdFilter);
|
||||
} else if (type == ResizeType.CLONE) {
|
||||
validateCloneIndex(currentState, resizeSourceIndex.getName(), mappingKeys, resizeIntoName, indexSettingsBuilder.build());
|
||||
indexSettingsBuilder.putNull(initialRecoveryIdFilter);
|
||||
} else {
|
||||
throw new IllegalStateException("unknown resize type is " + type);
|
||||
}
|
||||
|
@ -54,11 +54,13 @@ public class ResizeAllocationDecider extends AllocationDecider {
|
||||
}
|
||||
IndexMetaData sourceIndexMetaData = allocation.metaData().getIndexSafe(resizeSourceIndex);
|
||||
if (indexMetaData.getNumberOfShards() < sourceIndexMetaData.getNumberOfShards()) {
|
||||
// this only handles splits so far.
|
||||
// this only handles splits and clone so far.
|
||||
return Decision.ALWAYS;
|
||||
}
|
||||
|
||||
ShardId shardId = IndexMetaData.selectSplitShard(shardRouting.id(), sourceIndexMetaData, indexMetaData.getNumberOfShards());
|
||||
ShardId shardId = indexMetaData.getNumberOfShards() == sourceIndexMetaData.getNumberOfShards() ?
|
||||
IndexMetaData.selectCloneShard(shardRouting.id(), sourceIndexMetaData, indexMetaData.getNumberOfShards()) :
|
||||
IndexMetaData.selectSplitShard(shardRouting.id(), sourceIndexMetaData, indexMetaData.getNumberOfShards());
|
||||
ShardRouting sourceShardRouting = allocation.routingNodes().activePrimary(shardId);
|
||||
if (sourceShardRouting == null) {
|
||||
return allocation.decision(Decision.NO, NAME, "source primary shard [%s] is not active", shardId);
|
||||
|
@ -118,4 +118,24 @@ public abstract class RestResizeHandler extends BaseRestHandler {
|
||||
|
||||
}
|
||||
|
||||
public static class RestCloneIndexAction extends RestResizeHandler {
|
||||
|
||||
public RestCloneIndexAction(final Settings settings, final RestController controller) {
|
||||
super(settings);
|
||||
controller.registerHandler(RestRequest.Method.PUT, "/{index}/_clone/{target}", this);
|
||||
controller.registerHandler(RestRequest.Method.POST, "/{index}/_clone/{target}", this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return "clone_index_action";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ResizeType getResizeType() {
|
||||
return ResizeType.CLONE;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -0,0 +1,126 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.admin.indices.create;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
|
||||
import org.elasticsearch.action.admin.indices.shrink.ResizeType;
|
||||
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.query.TermsQueryBuilder;
|
||||
import org.elasticsearch.index.seqno.SeqNoStats;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public class CloneIndexIT extends ESIntegTestCase {
|
||||
|
||||
@Override
|
||||
protected boolean forbidPrivateIndexSettings() {
|
||||
return false;
|
||||
}
|
||||
|
||||
public void testCreateCloneIndex() {
|
||||
Version version = VersionUtils.randomIndexCompatibleVersion(random());
|
||||
int numPrimaryShards = randomIntBetween(1, 5);
|
||||
prepareCreate("source").setSettings(Settings.builder().put(indexSettings())
|
||||
.put("number_of_shards", numPrimaryShards)
|
||||
.put("index.version.created", version)
|
||||
).get();
|
||||
final int docs = randomIntBetween(0, 128);
|
||||
for (int i = 0; i < docs; i++) {
|
||||
client().prepareIndex("source", "type")
|
||||
.setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get();
|
||||
}
|
||||
internalCluster().ensureAtLeastNumDataNodes(2);
|
||||
// ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node
|
||||
// if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due
|
||||
// to the require._name below.
|
||||
ensureGreen();
|
||||
// relocate all shards to one node such that we can merge it.
|
||||
client().admin().indices().prepareUpdateSettings("source")
|
||||
.setSettings(Settings.builder()
|
||||
.put("index.blocks.write", true)).get();
|
||||
ensureGreen();
|
||||
|
||||
final IndicesStatsResponse sourceStats = client().admin().indices().prepareStats("source").setSegments(true).get();
|
||||
|
||||
// disable rebalancing to be able to capture the right stats. balancing can move the target primary
|
||||
// making it hard to pin point the source shards.
|
||||
client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(
|
||||
EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none"
|
||||
)).get();
|
||||
try {
|
||||
|
||||
final boolean createWithReplicas = randomBoolean();
|
||||
assertAcked(client().admin().indices().prepareResizeIndex("source", "target")
|
||||
.setResizeType(ResizeType.CLONE)
|
||||
.setSettings(Settings.builder()
|
||||
.put("index.number_of_replicas", createWithReplicas ? 1 : 0)
|
||||
.putNull("index.blocks.write")
|
||||
.build()).get());
|
||||
ensureGreen();
|
||||
|
||||
final IndicesStatsResponse targetStats = client().admin().indices().prepareStats("target").get();
|
||||
assertThat(targetStats.getIndex("target").getIndexShards().keySet().size(), equalTo(numPrimaryShards));
|
||||
|
||||
for (int i = 0; i < numPrimaryShards; i++) {
|
||||
final SeqNoStats sourceSeqNoStats = sourceStats.getIndex("source").getIndexShards().get(i).getAt(0).getSeqNoStats();
|
||||
final SeqNoStats targetSeqNoStats = targetStats.getIndex("target").getIndexShards().get(i).getAt(0).getSeqNoStats();
|
||||
assertEquals(sourceSeqNoStats.getMaxSeqNo(), targetSeqNoStats.getMaxSeqNo());
|
||||
assertEquals(targetSeqNoStats.getMaxSeqNo(), targetSeqNoStats.getLocalCheckpoint());
|
||||
}
|
||||
|
||||
final int size = docs > 0 ? 2 * docs : 1;
|
||||
assertHitCount(client().prepareSearch("target").setSize(size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), docs);
|
||||
|
||||
if (createWithReplicas == false) {
|
||||
// bump replicas
|
||||
client().admin().indices().prepareUpdateSettings("target")
|
||||
.setSettings(Settings.builder()
|
||||
.put("index.number_of_replicas", 1)).get();
|
||||
ensureGreen();
|
||||
assertHitCount(client().prepareSearch("target").setSize(size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), docs);
|
||||
}
|
||||
|
||||
for (int i = docs; i < 2 * docs; i++) {
|
||||
client().prepareIndex("target", "type")
|
||||
.setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get();
|
||||
}
|
||||
flushAndRefresh();
|
||||
assertHitCount(client().prepareSearch("target").setSize(2 * size).setQuery(new TermsQueryBuilder("foo", "bar")).get(),
|
||||
2 * docs);
|
||||
assertHitCount(client().prepareSearch("source").setSize(size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), docs);
|
||||
GetSettingsResponse target = client().admin().indices().prepareGetSettings("target").get();
|
||||
assertEquals(version, target.getIndexToSettings().get("target").getAsVersion("index.version.created", null));
|
||||
} finally {
|
||||
// clean up
|
||||
client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(
|
||||
EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), (String)null
|
||||
)).get();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
@ -203,8 +203,7 @@ public class IndexMetaDataTests extends ESTestCase {
|
||||
assertEquals(IndexMetaData.selectShrinkShards(shard, shrink, numTargetShards),
|
||||
IndexMetaData.selectRecoverFromShards(shard, shrink, numTargetShards));
|
||||
|
||||
assertEquals("can't select recover from shards if both indices have the same number of shards",
|
||||
expectThrows(IllegalArgumentException.class, () -> IndexMetaData.selectRecoverFromShards(0, shrink, 32)).getMessage());
|
||||
IndexMetaData.selectRecoverFromShards(0, shrink, 32);
|
||||
}
|
||||
|
||||
public void testSelectSplitShard() {
|
||||
|
@ -252,4 +252,11 @@ public class VersionUtils {
|
||||
assert compatible.size() > 0;
|
||||
return compatible.get(compatible.size() - 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a random version index compatible with the current version.
|
||||
*/
|
||||
public static Version randomIndexCompatibleVersion(Random random) {
|
||||
return randomVersionBetween(random, Version.CURRENT.minimumIndexCompatibilityVersion(), Version.CURRENT);
|
||||
}
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user